public class TransactionAwareHTable extends AbstractTransactionAwareTable implements TransactionAware
AbstractTransactionAwareTable.ActionChangeallowNonTransactional, changeSets, conflictLevel, pre014ChangeSetKey, SEPARATOR_BYTE_ARRAY, tx, txCodec| Constructor and Description |
|---|
TransactionAwareHTable(HTableInterface hTable)
Create a transactional aware instance of the passed HTable
|
TransactionAwareHTable(HTableInterface hTable,
boolean allowNonTransactional)
Create a transactional aware instance of the passed HTable, with the option
of allowing non-transactional operations.
|
TransactionAwareHTable(HTableInterface hTable,
TxConstants.ConflictDetection conflictLevel)
Create a transactional aware instance of the passed HTable
|
TransactionAwareHTable(HTableInterface hTable,
TxConstants.ConflictDetection conflictLevel,
boolean allowNonTransactional)
Create a transactional aware instance of the passed HTable, with the option
of allowing non-transactional operations.
|
| Modifier and Type | Method and Description |
|---|---|
void |
addToOperation(org.apache.hadoop.hbase.client.OperationWithAttributes op,
Transaction tx) |
org.apache.hadoop.hbase.client.Result |
append(org.apache.hadoop.hbase.client.Append append) |
Object[] |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions) |
void |
batch(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results) |
<R> Object[] |
batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
<R> void |
batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions,
Object[] results,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
boolean |
checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete) |
boolean |
checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put) |
void |
close() |
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel |
coprocessorService(byte[] row) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) |
<T extends com.google.protobuf.Service,R> |
coprocessorService(Class<T> service,
byte[] startKey,
byte[] endKey,
org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable,
org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) |
void |
delete(org.apache.hadoop.hbase.client.Delete delete) |
void |
delete(List<org.apache.hadoop.hbase.client.Delete> deletes) |
protected boolean |
doCommit()
Commits any pending writes by flushing the wrapped
HTable instance. |
protected boolean |
doRollback()
Rolls back any persisted changes from the transaction by issuing offsetting deletes to the
wrapped
HTable instance. |
boolean |
exists(org.apache.hadoop.hbase.client.Get get) |
Boolean[] |
exists(List<org.apache.hadoop.hbase.client.Get> gets) |
void |
flushCommits() |
org.apache.hadoop.hbase.client.Result |
get(org.apache.hadoop.hbase.client.Get get) |
org.apache.hadoop.hbase.client.Result[] |
get(List<org.apache.hadoop.hbase.client.Get> gets) |
org.apache.hadoop.conf.Configuration |
getConfiguration() |
org.apache.hadoop.hbase.TableName |
getName() |
org.apache.hadoop.hbase.client.Result |
getRowOrBefore(byte[] row,
byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(byte[] family,
byte[] qualifier) |
org.apache.hadoop.hbase.client.ResultScanner |
getScanner(org.apache.hadoop.hbase.client.Scan scan) |
org.apache.hadoop.hbase.HTableDescriptor |
getTableDescriptor() |
protected byte[] |
getTableKey()
Returns the table name to use as a key prefix for the transaction change set.
|
byte[] |
getTableName() |
long |
getWriteBufferSize() |
org.apache.hadoop.hbase.client.Result |
increment(org.apache.hadoop.hbase.client.Increment increment) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
boolean writeToWAL) |
long |
incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability) |
boolean |
isAutoFlush() |
protected void |
makeRollbackOperation(org.apache.hadoop.hbase.client.Delete delete) |
void |
mutateRow(org.apache.hadoop.hbase.client.RowMutations rm) |
void |
put(List<org.apache.hadoop.hbase.client.Put> puts) |
void |
put(org.apache.hadoop.hbase.client.Put put) |
void |
setAutoFlush(boolean autoFlush) |
void |
setAutoFlush(boolean autoFlush,
boolean clearBufferOnFail) |
void |
setAutoFlushTo(boolean autoFlush) |
void |
setWriteBufferSize(long writeBufferSize) |
addToChangeSet, commitTx, getAllowNonTransactional, getChangeKey, getTransactionAwareName, getTxChanges, getVIntBytes, postTxCommit, rollbackTx, setAllowNonTransactional, startTx, updateTxclone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, waitcommitTx, getTransactionAwareName, getTxChanges, postTxCommit, rollbackTx, startTx, updateTxpublic TransactionAwareHTable(HTableInterface hTable)
hTable - underlying HBase table to usepublic TransactionAwareHTable(HTableInterface hTable,
TxConstants.ConflictDetection conflictLevel)
hTable - underlying HBase table to useconflictLevel - level of conflict detection to perform (defaults to COLUMN)public TransactionAwareHTable(HTableInterface hTable,
boolean allowNonTransactional)
hTable - underlying HBase table to useallowNonTransactional - if true, additional operations (checkAndPut, increment, checkAndDelete)
will be available, though non-transactionalpublic TransactionAwareHTable(HTableInterface hTable,
TxConstants.ConflictDetection conflictLevel,
boolean allowNonTransactional)
hTable - underlying HBase table to useconflictLevel - level of conflict detection to perform (defaults to COLUMN)allowNonTransactional - if true, additional operations (checkAndPut, increment, checkAndDelete)
will be available, though non-transactionalprotected byte[] getTableKey()
AbstractTransactionAwareTablegetTableKey in class AbstractTransactionAwareTableprotected boolean doCommit()
throws IOException
AbstractTransactionAwareTableHTable instance.doCommit in class AbstractTransactionAwareTableIOExceptionprotected boolean doRollback()
throws Exception
AbstractTransactionAwareTableHTable instance. How this is handled will depend on the delete API exposed
by the specific version of HBase.doRollback in class AbstractTransactionAwareTableExceptionpublic byte[] getTableName()
public org.apache.hadoop.hbase.TableName getName()
public org.apache.hadoop.conf.Configuration getConfiguration()
public org.apache.hadoop.hbase.HTableDescriptor getTableDescriptor()
throws IOException
IOExceptionpublic boolean exists(org.apache.hadoop.hbase.client.Get get)
throws IOException
IOExceptionpublic Boolean[] exists(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
IOExceptionpublic void batch(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results) throws IOException, InterruptedException
IOExceptionInterruptedExceptionpublic Object[] batch(List<? extends org.apache.hadoop.hbase.client.Row> actions) throws IOException, InterruptedException
IOExceptionInterruptedExceptionpublic <R> void batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions, Object[] results, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
IOExceptionInterruptedExceptionpublic <R> Object[] batchCallback(List<? extends org.apache.hadoop.hbase.client.Row> actions, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws IOException, InterruptedException
IOExceptionInterruptedExceptionpublic org.apache.hadoop.hbase.client.Result get(org.apache.hadoop.hbase.client.Get get)
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.Result[] get(List<org.apache.hadoop.hbase.client.Get> gets) throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.Result getRowOrBefore(byte[] row,
byte[] family)
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(org.apache.hadoop.hbase.client.Scan scan)
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family)
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.ResultScanner getScanner(byte[] family,
byte[] qualifier)
throws IOException
IOExceptionpublic void put(org.apache.hadoop.hbase.client.Put put)
throws IOException
IOExceptionpublic void put(List<org.apache.hadoop.hbase.client.Put> puts) throws IOException
IOExceptionpublic boolean checkAndPut(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Put put)
throws IOException
IOExceptionpublic void delete(org.apache.hadoop.hbase.client.Delete delete)
throws IOException
IOExceptionpublic void delete(List<org.apache.hadoop.hbase.client.Delete> deletes) throws IOException
IOExceptionpublic boolean checkAndDelete(byte[] row,
byte[] family,
byte[] qualifier,
byte[] value,
org.apache.hadoop.hbase.client.Delete delete)
throws IOException
IOExceptionpublic void mutateRow(org.apache.hadoop.hbase.client.RowMutations rm)
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.Result append(org.apache.hadoop.hbase.client.Append append)
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.client.Result increment(org.apache.hadoop.hbase.client.Increment increment)
throws IOException
IOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount)
throws IOException
IOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
org.apache.hadoop.hbase.client.Durability durability)
throws IOException
IOExceptionpublic long incrementColumnValue(byte[] row,
byte[] family,
byte[] qualifier,
long amount,
boolean writeToWAL)
throws IOException
IOExceptionpublic boolean isAutoFlush()
public void flushCommits()
throws IOException
IOExceptionpublic void close()
throws IOException
IOExceptionpublic org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel coprocessorService(byte[] row)
public <T extends com.google.protobuf.Service,R> Map<byte[],R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable) throws com.google.protobuf.ServiceException, Throwable
com.google.protobuf.ServiceExceptionThrowablepublic <T extends com.google.protobuf.Service,R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, org.apache.hadoop.hbase.client.coprocessor.Batch.Call<T,R> callable, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable
com.google.protobuf.ServiceExceptionThrowablepublic void setAutoFlush(boolean autoFlush)
public void setAutoFlush(boolean autoFlush,
boolean clearBufferOnFail)
public void setAutoFlushTo(boolean autoFlush)
public long getWriteBufferSize()
public void setWriteBufferSize(long writeBufferSize)
throws IOException
IOExceptionpublic void addToOperation(org.apache.hadoop.hbase.client.OperationWithAttributes op,
Transaction tx)
throws IOException
IOExceptionprotected void makeRollbackOperation(org.apache.hadoop.hbase.client.Delete delete)
Copyright © 2018 The Apache Software Foundation. All rights reserved.