View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import java.io.IOException;
22  import java.io.InterruptedIOException;
23  import java.net.SocketTimeoutException;
24  import java.util.ArrayList;
25  import java.util.Arrays;
26  import java.util.HashMap;
27  import java.util.LinkedList;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Map.Entry;
31  import java.util.concurrent.atomic.AtomicInteger;
32  import java.util.concurrent.atomic.AtomicReference;
33  import java.util.concurrent.ExecutionException;
34  import java.util.concurrent.Future;
35  import java.util.concurrent.TimeUnit;
36  import java.util.concurrent.TimeoutException;
37  import java.util.regex.Pattern;
38  
39  import org.apache.commons.logging.Log;
40  import org.apache.commons.logging.LogFactory;
41  import org.apache.hadoop.conf.Configuration;
42  import org.apache.hadoop.hbase.Abortable;
43  import org.apache.hadoop.hbase.ClusterStatus;
44  import org.apache.hadoop.hbase.DoNotRetryIOException;
45  import org.apache.hadoop.hbase.HBaseConfiguration;
46  import org.apache.hadoop.hbase.HColumnDescriptor;
47  import org.apache.hadoop.hbase.HConstants;
48  import org.apache.hadoop.hbase.HRegionInfo;
49  import org.apache.hadoop.hbase.HRegionLocation;
50  import org.apache.hadoop.hbase.HTableDescriptor;
51  import org.apache.hadoop.hbase.MasterNotRunningException;
52  import org.apache.hadoop.hbase.MetaTableAccessor;
53  import org.apache.hadoop.hbase.NamespaceDescriptor;
54  import org.apache.hadoop.hbase.NotServingRegionException;
55  import org.apache.hadoop.hbase.ProcedureInfo;
56  import org.apache.hadoop.hbase.RegionException;
57  import org.apache.hadoop.hbase.RegionLocations;
58  import org.apache.hadoop.hbase.ServerName;
59  import org.apache.hadoop.hbase.TableExistsException;
60  import org.apache.hadoop.hbase.TableName;
61  import org.apache.hadoop.hbase.TableNotDisabledException;
62  import org.apache.hadoop.hbase.TableNotEnabledException;
63  import org.apache.hadoop.hbase.TableNotFoundException;
64  import org.apache.hadoop.hbase.UnknownRegionException;
65  import org.apache.hadoop.hbase.ZooKeeperConnectionException;
66  import org.apache.hadoop.hbase.classification.InterfaceAudience;
67  import org.apache.hadoop.hbase.classification.InterfaceStability;
68  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
69  import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
70  import org.apache.hadoop.hbase.client.security.SecurityCapability;
71  import org.apache.hadoop.hbase.exceptions.DeserializationException;
72  import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
73  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
74  import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
75  import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
76  import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
77  import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
78  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
79  import org.apache.hadoop.hbase.protobuf.RequestConverter;
80  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
81  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
82  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
83  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
84  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
85  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
86  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
87  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
88  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
89  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
90  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
91  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
92  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
93  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
94  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
95  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
96  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
97  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
98  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
99  import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
131 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
132 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
133 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
134 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest;
135 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
136 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
137 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
138 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
139 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
140 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
141 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
142 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
143 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
144 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
145 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
146 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
147 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
148 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
149 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
150 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
151 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
152 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
153 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
154 import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
155 import org.apache.hadoop.hbase.quotas.QuotaFilter;
156 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
157 import org.apache.hadoop.hbase.quotas.QuotaSettings;
158 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
159 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
160 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
161 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
162 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
163 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
164 import org.apache.hadoop.hbase.util.Addressing;
165 import org.apache.hadoop.hbase.util.Bytes;
166 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
167 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
168 import org.apache.hadoop.hbase.util.Pair;
169 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
170 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
171 import org.apache.hadoop.ipc.RemoteException;
172 import org.apache.hadoop.util.StringUtils;
173 import org.apache.zookeeper.KeeperException;
174 
175 import com.google.common.annotations.VisibleForTesting;
176 import com.google.protobuf.ByteString;
177 import com.google.protobuf.ServiceException;
178 
179 /**
180  * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that
181  * this is an HBase-internal class as defined in
182  * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
183  * There are no guarantees for backwards source / binary compatibility and methods or class can
184  * change or go away without deprecation.
185  * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing
186  * an HBaseAdmin directly.
187  *
188  * <p>Connection should be an <i>unmanaged</i> connection obtained via
189  * {@link ConnectionFactory#createConnection(Configuration)}
190  *
191  * @see ConnectionFactory
192  * @see Connection
193  * @see Admin
194  */
195 @InterfaceAudience.Private
196 @InterfaceStability.Evolving
197 public class HBaseAdmin implements Admin {
198   private static final Log LOG = LogFactory.getLog(HBaseAdmin.class);
199 
200   private static final String ZK_IDENTIFIER_PREFIX =  "hbase-admin-on-";
201 
202   private ClusterConnection connection;
203 
204   private volatile Configuration conf;
205   private final long pause;
206   private final int numRetries;
207   // Some operations can take a long time such as disable of big table.
208   // numRetries is for 'normal' stuff... Multiply by this factor when
209   // want to wait a long time.
210   private final int retryLongerMultiplier;
211   private final int syncWaitTimeout;
212   private boolean aborted;
213   private boolean cleanupConnectionOnClose = false; // close the connection in close()
214   private boolean closed = false;
215   private int operationTimeout;
216 
217   private RpcRetryingCallerFactory rpcCallerFactory;
218   private RpcControllerFactory rpcControllerFactory;
219 
220   private NonceGenerator ng;
221 
222   /**
223    * Constructor.
224    * See {@link #HBaseAdmin(Connection connection)}
225    *
226    * @param c Configuration object. Copied internally.
227    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
228    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
229    */
230   @Deprecated
231   public HBaseAdmin(Configuration c)
232   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
233     // Will not leak connections, as the new implementation of the constructor
234     // does not throw exceptions anymore.
235     this(ConnectionManager.getConnectionInternal(new Configuration(c)));
236     this.cleanupConnectionOnClose = true;
237   }
238 
239   @Override
240   public int getOperationTimeout() {
241     return operationTimeout;
242   }
243 
244 
245   /**
246    * Constructor for externally managed Connections.
247    * The connection to master will be created when required by admin functions.
248    *
249    * @param connection The Connection instance to use
250    * @throws MasterNotRunningException
251    * @throws ZooKeeperConnectionException are not
252    *  thrown anymore but kept into the interface for backward api compatibility
253    * @deprecated Constructing HBaseAdmin objects manually has been deprecated.
254    * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
255    */
256   @Deprecated
257   public HBaseAdmin(Connection connection)
258       throws MasterNotRunningException, ZooKeeperConnectionException {
259     this((ClusterConnection)connection);
260   }
261 
262   HBaseAdmin(ClusterConnection connection) {
263     this.conf = connection.getConfiguration();
264     this.connection = connection;
265 
266     // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time.
267     this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
268         HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
269     this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
270         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
271     this.retryLongerMultiplier = this.conf.getInt(
272         "hbase.client.retries.longer.multiplier", 10);
273     this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
274         HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
275     this.syncWaitTimeout = this.conf.getInt(
276       "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min
277 
278     this.rpcCallerFactory = connection.getRpcRetryingCallerFactory();
279     this.rpcControllerFactory = connection.getRpcControllerFactory();
280 
281     this.ng = this.connection.getNonceGenerator();
282   }
283 
284   @Override
285   public void abort(String why, Throwable e) {
286     // Currently does nothing but throw the passed message and exception
287     this.aborted = true;
288     throw new RuntimeException(why, e);
289   }
290 
291   @Override
292   public boolean isAborted(){
293     return this.aborted;
294   }
295 
296   /**
297    * Abort a procedure
298    * @param procId ID of the procedure to abort
299    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
300    * @return true if aborted, false if procedure already completed or does not exist
301    * @throws IOException
302    */
303   @Override
304   public boolean abortProcedure(
305       final long procId,
306       final boolean mayInterruptIfRunning) throws IOException {
307     Future<Boolean> future = abortProcedureAsync(procId, mayInterruptIfRunning);
308     try {
309       return future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
310     } catch (InterruptedException e) {
311       throw new InterruptedIOException("Interrupted when waiting for procedure to be cancelled");
312     } catch (TimeoutException e) {
313       throw new TimeoutIOException(e);
314     } catch (ExecutionException e) {
315       if (e.getCause() instanceof IOException) {
316         throw (IOException)e.getCause();
317       } else {
318         throw new IOException(e.getCause());
319       }
320     }
321   }
322 
323   /**
324    * Abort a procedure but does not block and wait for it be completely removed.
325    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
326    * It may throw ExecutionException if there was an error while executing the operation
327    * or TimeoutException in case the wait timeout was not long enough to allow the
328    * operation to complete.
329    *
330    * @param procId ID of the procedure to abort
331    * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
332    * @return true if aborted, false if procedure already completed or does not exist
333    * @throws IOException
334    */
335   @Override
336   public Future<Boolean> abortProcedureAsync(
337       final long procId,
338       final boolean mayInterruptIfRunning) throws IOException {
339     Boolean abortProcResponse = executeCallable(
340       new MasterCallable<AbortProcedureResponse>(getConnection()) {
341         @Override
342         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
343           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
344           controller.setCallTimeout(callTimeout);
345           AbortProcedureRequest abortProcRequest =
346               AbortProcedureRequest.newBuilder().setProcId(procId).build();
347           return master.abortProcedure(controller, abortProcRequest);
348         }
349       }).getIsProcedureAborted();
350 
351     AbortProcedureFuture abortProcFuture =
352         new AbortProcedureFuture(this, procId, abortProcResponse);
353     return abortProcFuture;
354   }
355 
356   private static class AbortProcedureFuture extends ProcedureFuture<Boolean> {
357     private boolean isAbortInProgress;
358 
359     public AbortProcedureFuture(
360         final HBaseAdmin admin,
361         final Long procId,
362         final Boolean abortProcResponse) {
363       super(admin, procId);
364       this.isAbortInProgress = abortProcResponse;
365     }
366 
367     @Override
368     public Boolean get(long timeout, TimeUnit unit)
369         throws InterruptedException, ExecutionException, TimeoutException {
370       if (!this.isAbortInProgress) {
371         return false;
372       }
373       super.get(timeout, unit);
374       return true;
375     }
376   }
377 
378   /** @return HConnection used by this object. */
379   @Override
380   public HConnection getConnection() {
381     return connection;
382   }
383 
384   /** @return - true if the master server is running. Throws an exception
385    *  otherwise.
386    * @throws ZooKeeperConnectionException
387    * @throws MasterNotRunningException
388    * @deprecated this has been deprecated without a replacement
389    */
390   @Deprecated
391   public boolean isMasterRunning()
392   throws MasterNotRunningException, ZooKeeperConnectionException {
393     return connection.isMasterRunning();
394   }
395 
396   /**
397    * @param tableName Table to check.
398    * @return True if table exists already.
399    * @throws IOException
400    */
401   @Override
402   public boolean tableExists(final TableName tableName) throws IOException {
403     return MetaTableAccessor.tableExists(connection, tableName);
404   }
405 
406   public boolean tableExists(final byte[] tableName)
407   throws IOException {
408     return tableExists(TableName.valueOf(tableName));
409   }
410 
411   public boolean tableExists(final String tableName)
412   throws IOException {
413     return tableExists(TableName.valueOf(tableName));
414   }
415 
416   @Override
417   public HTableDescriptor[] listTables() throws IOException {
418     return listTables((Pattern)null, false);
419   }
420 
421   @Override
422   public HTableDescriptor[] listTables(Pattern pattern) throws IOException {
423     return listTables(pattern, false);
424   }
425 
426   @Override
427   public HTableDescriptor[] listTables(String regex) throws IOException {
428     return listTables(Pattern.compile(regex), false);
429   }
430 
431   @Override
432   public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables)
433       throws IOException {
434     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
435       @Override
436       public HTableDescriptor[] call(int callTimeout) throws ServiceException {
437         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
438         controller.setCallTimeout(callTimeout);
439         GetTableDescriptorsRequest req =
440             RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
441         return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
442       }
443     });
444   }
445 
446   @Override
447   public HTableDescriptor[] listTables(String regex, boolean includeSysTables)
448       throws IOException {
449     return listTables(Pattern.compile(regex), includeSysTables);
450   }
451 
452   /**
453    * List all of the names of userspace tables.
454    * @return String[] table names
455    * @throws IOException if a remote or network exception occurs
456    * @deprecated Use {@link Admin#listTableNames()} instead
457    */
458   @Deprecated
459   public String[] getTableNames() throws IOException {
460     TableName[] tableNames = listTableNames();
461     String result[] = new String[tableNames.length];
462     for (int i = 0; i < tableNames.length; i++) {
463       result[i] = tableNames[i].getNameAsString();
464     }
465     return result;
466   }
467 
468   /**
469    * List all of the names of userspace tables matching the given regular expression.
470    * @param pattern The regular expression to match against
471    * @return String[] table names
472    * @throws IOException if a remote or network exception occurs
473    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
474    */
475   @Deprecated
476   public String[] getTableNames(Pattern pattern) throws IOException {
477     TableName[] tableNames = listTableNames(pattern);
478     String result[] = new String[tableNames.length];
479     for (int i = 0; i < tableNames.length; i++) {
480       result[i] = tableNames[i].getNameAsString();
481     }
482     return result;
483   }
484 
485   /**
486    * List all of the names of userspace tables matching the given regular expression.
487    * @param regex The regular expression to match against
488    * @return String[] table names
489    * @throws IOException if a remote or network exception occurs
490    * @deprecated Use {@link Admin#listTableNames(Pattern)} instead.
491    */
492   @Deprecated
493   public String[] getTableNames(String regex) throws IOException {
494     return getTableNames(Pattern.compile(regex));
495   }
496 
497   @Override
498   public TableName[] listTableNames() throws IOException {
499     return listTableNames((Pattern)null, false);
500   }
501 
502   @Override
503   public TableName[] listTableNames(Pattern pattern) throws IOException {
504     return listTableNames(pattern, false);
505   }
506 
507   @Override
508   public TableName[] listTableNames(String regex) throws IOException {
509     return listTableNames(Pattern.compile(regex), false);
510   }
511 
512   @Override
513   public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables)
514       throws IOException {
515     return executeCallable(new MasterCallable<TableName[]>(getConnection()) {
516       @Override
517       public TableName[] call(int callTimeout) throws ServiceException {
518         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
519         controller.setCallTimeout(callTimeout);
520         GetTableNamesRequest req =
521             RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables);
522         return ProtobufUtil.getTableNameArray(master.getTableNames(controller, req)
523             .getTableNamesList());
524       }
525     });
526   }
527 
528   @Override
529   public TableName[] listTableNames(final String regex, final boolean includeSysTables)
530       throws IOException {
531     return listTableNames(Pattern.compile(regex), includeSysTables);
532   }
533 
534   /**
535    * Method for getting the tableDescriptor
536    * @param tableName as a byte []
537    * @return the tableDescriptor
538    * @throws TableNotFoundException
539    * @throws IOException if a remote or network exception occurs
540    */
541   @Override
542   public HTableDescriptor getTableDescriptor(final TableName tableName)
543   throws TableNotFoundException, IOException {
544      return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory,
545        operationTimeout);
546   }
547 
548   static HTableDescriptor getTableDescriptor(final TableName tableName, HConnection connection,
549       RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
550          int operationTimeout) throws TableNotFoundException, IOException {
551 
552       if (tableName == null) return null;
553       HTableDescriptor htd = executeCallable(new MasterCallable<HTableDescriptor>(connection) {
554         @Override
555         public HTableDescriptor call(int callTimeout) throws ServiceException {
556           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
557           controller.setCallTimeout(callTimeout);
558           GetTableDescriptorsResponse htds;
559           GetTableDescriptorsRequest req =
560                   RequestConverter.buildGetTableDescriptorsRequest(tableName);
561           htds = master.getTableDescriptors(controller, req);
562 
563           if (!htds.getTableSchemaList().isEmpty()) {
564             return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
565           }
566           return null;
567         }
568       }, rpcCallerFactory, operationTimeout);
569       if (htd != null) {
570         return htd;
571       }
572       throw new TableNotFoundException(tableName.getNameAsString());
573   }
574 
575   public HTableDescriptor getTableDescriptor(final byte[] tableName)
576   throws TableNotFoundException, IOException {
577     return getTableDescriptor(TableName.valueOf(tableName));
578   }
579 
580   private long getPauseTime(int tries) {
581     int triesCount = tries;
582     if (triesCount >= HConstants.RETRY_BACKOFF.length) {
583       triesCount = HConstants.RETRY_BACKOFF.length - 1;
584     }
585     return this.pause * HConstants.RETRY_BACKOFF[triesCount];
586   }
587 
588   /**
589    * Creates a new table.
590    * Synchronous operation.
591    *
592    * @param desc table descriptor for table
593    *
594    * @throws IllegalArgumentException if the table name is reserved
595    * @throws MasterNotRunningException if master is not running
596    * @throws TableExistsException if table already exists (If concurrent
597    * threads, the table may have been created between test-for-existence
598    * and attempt-at-creation).
599    * @throws IOException if a remote or network exception occurs
600    */
601   @Override
602   public void createTable(HTableDescriptor desc)
603   throws IOException {
604     createTable(desc, null);
605   }
606 
607   /**
608    * Creates a new table with the specified number of regions.  The start key
609    * specified will become the end key of the first region of the table, and
610    * the end key specified will become the start key of the last region of the
611    * table (the first region has a null start key and the last region has a
612    * null end key).
613    *
614    * BigInteger math will be used to divide the key range specified into
615    * enough segments to make the required number of total regions.
616    *
617    * Synchronous operation.
618    *
619    * @param desc table descriptor for table
620    * @param startKey beginning of key range
621    * @param endKey end of key range
622    * @param numRegions the total number of regions to create
623    *
624    * @throws IllegalArgumentException if the table name is reserved
625    * @throws MasterNotRunningException if master is not running
626    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
627    * threads, the table may have been created between test-for-existence
628    * and attempt-at-creation).
629    * @throws IOException
630    */
631   @Override
632   public void createTable(HTableDescriptor desc, byte [] startKey,
633       byte [] endKey, int numRegions)
634   throws IOException {
635     if(numRegions < 3) {
636       throw new IllegalArgumentException("Must create at least three regions");
637     } else if(Bytes.compareTo(startKey, endKey) >= 0) {
638       throw new IllegalArgumentException("Start key must be smaller than end key");
639     }
640     if (numRegions == 3) {
641       createTable(desc, new byte[][]{startKey, endKey});
642       return;
643     }
644     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
645     if(splitKeys == null || splitKeys.length != numRegions - 1) {
646       throw new IllegalArgumentException("Unable to split key range into enough regions");
647     }
648     createTable(desc, splitKeys);
649   }
650 
651   /**
652    * Creates a new table with an initial set of empty regions defined by the
653    * specified split keys.  The total number of regions created will be the
654    * number of split keys plus one. Synchronous operation.
655    * Note : Avoid passing empty split key.
656    *
657    * @param desc table descriptor for table
658    * @param splitKeys array of split keys for the initial regions of the table
659    *
660    * @throws IllegalArgumentException if the table name is reserved, if the split keys
661    * are repeated and if the split key has empty byte array.
662    * @throws MasterNotRunningException if master is not running
663    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
664    * threads, the table may have been created between test-for-existence
665    * and attempt-at-creation).
666    * @throws IOException
667    */
668   @Override
669   public void createTable(final HTableDescriptor desc, byte [][] splitKeys)
670       throws IOException {
671     Future<Void> future = createTableAsyncV2(desc, splitKeys);
672     try {
673       // TODO: how long should we wait? spin forever?
674       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
675     } catch (InterruptedException e) {
676       throw new InterruptedIOException("Interrupted when waiting" +
677           " for table to be enabled; meta scan was done");
678     } catch (TimeoutException e) {
679       throw new TimeoutIOException(e);
680     } catch (ExecutionException e) {
681       if (e.getCause() instanceof IOException) {
682         throw (IOException)e.getCause();
683       } else {
684         throw new IOException(e.getCause());
685       }
686     }
687   }
688 
689   /**
690    * Creates a new table but does not block and wait for it to come online.
691    * Asynchronous operation.  To check if the table exists, use
692    * {@link #isTableAvailable} -- it is not safe to create an HTable
693    * instance to this table before it is available.
694    * Note : Avoid passing empty split key.
695    * @param desc table descriptor for table
696    *
697    * @throws IllegalArgumentException Bad table name, if the split keys
698    * are repeated and if the split key has empty byte array.
699    * @throws MasterNotRunningException if master is not running
700    * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
701    * threads, the table may have been created between test-for-existence
702    * and attempt-at-creation).
703    * @throws IOException
704    */
705   @Override
706   public void createTableAsync(final HTableDescriptor desc, final byte [][] splitKeys)
707       throws IOException {
708     createTableAsyncV2(desc, splitKeys);
709   }
710 
711   /**
712    * Creates a new table but does not block and wait for it to come online.
713    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
714    * It may throw ExecutionException if there was an error while executing the operation
715    * or TimeoutException in case the wait timeout was not long enough to allow the
716    * operation to complete.
717    *
718    * @param desc table descriptor for table
719    * @param splitKeys keys to check if the table has been created with all split keys
720    * @throws IllegalArgumentException Bad table name, if the split keys
721    *    are repeated and if the split key has empty byte array.
722    * @throws IOException if a remote or network exception occurs
723    * @return the result of the async creation. You can use Future.get(long, TimeUnit)
724    *    to wait on the operation to complete.
725    */
726   // TODO: This should be called Async but it will break binary compatibility
727   private Future<Void> createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys)
728       throws IOException {
729     if (desc.getTableName() == null) {
730       throw new IllegalArgumentException("TableName cannot be null");
731     }
732     if (splitKeys != null && splitKeys.length > 0) {
733       Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
734       // Verify there are no duplicate split keys
735       byte[] lastKey = null;
736       for (byte[] splitKey : splitKeys) {
737         if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
738           throw new IllegalArgumentException(
739               "Empty split key must not be passed in the split keys.");
740         }
741         if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
742           throw new IllegalArgumentException("All split keys must be unique, " +
743             "found duplicate: " + Bytes.toStringBinary(splitKey) +
744             ", " + Bytes.toStringBinary(lastKey));
745         }
746         lastKey = splitKey;
747       }
748     }
749 
750     CreateTableResponse response = executeCallable(
751       new MasterCallable<CreateTableResponse>(getConnection()) {
752         @Override
753         public CreateTableResponse call(int callTimeout) throws ServiceException {
754           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
755           controller.setCallTimeout(callTimeout);
756           controller.setPriority(desc.getTableName());
757           CreateTableRequest request = RequestConverter.buildCreateTableRequest(
758             desc, splitKeys, ng.getNonceGroup(), ng.newNonce());
759           return master.createTable(controller, request);
760         }
761       });
762     return new CreateTableFuture(this, desc, splitKeys, response);
763   }
764 
765   private static class CreateTableFuture extends ProcedureFuture<Void> {
766     private final HTableDescriptor desc;
767     private final byte[][] splitKeys;
768 
769     public CreateTableFuture(final HBaseAdmin admin, final HTableDescriptor desc,
770         final byte[][] splitKeys, final CreateTableResponse response) {
771       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
772       this.splitKeys = splitKeys;
773       this.desc = desc;
774     }
775 
776     @Override
777     protected Void waitOperationResult(final long deadlineTs)
778         throws IOException, TimeoutException {
779       waitForTableEnabled(deadlineTs);
780       waitForAllRegionsOnline(deadlineTs);
781       return null;
782     }
783 
784     @Override
785     protected Void postOperationResult(final Void result, final long deadlineTs)
786         throws IOException, TimeoutException {
787       LOG.info("Created " + desc.getTableName());
788       return result;
789     }
790 
791     private void waitForTableEnabled(final long deadlineTs)
792         throws IOException, TimeoutException {
793       waitForState(deadlineTs, new WaitForStateCallable() {
794         @Override
795         public boolean checkState(int tries) throws IOException {
796           try {
797             if (getAdmin().isTableAvailable(desc.getTableName())) {
798               return true;
799             }
800           } catch (TableNotFoundException tnfe) {
801             LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+  tries);
802           }
803           return false;
804         }
805 
806         @Override
807         public void throwInterruptedException() throws InterruptedIOException {
808           throw new InterruptedIOException("Interrupted when waiting for table " +
809               desc.getTableName() + " to be enabled");
810         }
811 
812         @Override
813         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
814           throw new TimeoutException("Table " + desc.getTableName() +
815             " not enabled after " + elapsedTime + "msec");
816         }
817       });
818     }
819 
820     private void waitForAllRegionsOnline(final long deadlineTs)
821         throws IOException, TimeoutException {
822       final AtomicInteger actualRegCount = new AtomicInteger(0);
823       final MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
824         @Override
825         public boolean processRow(Result rowResult) throws IOException {
826           RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult);
827           if (list == null) {
828             LOG.warn("No serialized HRegionInfo in " + rowResult);
829             return true;
830           }
831           HRegionLocation l = list.getRegionLocation();
832           if (l == null) {
833             return true;
834           }
835           if (!l.getRegionInfo().getTable().equals(desc.getTableName())) {
836             return false;
837           }
838           if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true;
839           HRegionLocation[] locations = list.getRegionLocations();
840           for (HRegionLocation location : locations) {
841             if (location == null) continue;
842             ServerName serverName = location.getServerName();
843             // Make sure that regions are assigned to server
844             if (serverName != null && serverName.getHostAndPort() != null) {
845               actualRegCount.incrementAndGet();
846             }
847           }
848           return true;
849         }
850       };
851 
852       int tries = 0;
853       IOException serverEx = null;
854       int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication();
855       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
856         actualRegCount.set(0);
857         MetaScanner.metaScan(getAdmin().getConnection(), visitor, desc.getTableName());
858         if (actualRegCount.get() == numRegs) {
859           // all the regions are online
860           return;
861         }
862 
863         try {
864           Thread.sleep(getAdmin().getPauseTime(tries++));
865         } catch (InterruptedException e) {
866           throw new InterruptedIOException("Interrupted when opening" +
867             " regions; " + actualRegCount.get() + " of " + numRegs +
868             " regions processed so far");
869         }
870       }
871       throw new TimeoutException("Only " + actualRegCount.get() +
872               " of " + numRegs + " regions are online; retries exhausted.");
873     }
874   }
875 
876   public void deleteTable(final String tableName) throws IOException {
877     deleteTable(TableName.valueOf(tableName));
878   }
879 
880   public void deleteTable(final byte[] tableName) throws IOException {
881     deleteTable(TableName.valueOf(tableName));
882   }
883 
884   /**
885    * Deletes a table.
886    * Synchronous operation.
887    *
888    * @param tableName name of table to delete
889    * @throws IOException if a remote or network exception occurs
890    */
891   @Override
892   public void deleteTable(final TableName tableName) throws IOException {
893     Future<Void> future = deleteTableAsyncV2(tableName);
894     try {
895       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
896     } catch (InterruptedException e) {
897       throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
898     } catch (TimeoutException e) {
899       throw new TimeoutIOException(e);
900     } catch (ExecutionException e) {
901       if (e.getCause() instanceof IOException) {
902         throw (IOException)e.getCause();
903       } else {
904         throw new IOException(e.getCause());
905       }
906     }
907   }
908 
909   /**
910    * Deletes the table but does not block and wait for it be completely removed.
911    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
912    * It may throw ExecutionException if there was an error while executing the operation
913    * or TimeoutException in case the wait timeout was not long enough to allow the
914    * operation to complete.
915    *
916    * @param desc table descriptor for table
917    * @param tableName name of table to delete
918    * @throws IOException if a remote or network exception occurs
919    * @return the result of the async delete. You can use Future.get(long, TimeUnit)
920    *    to wait on the operation to complete.
921    */
922   // TODO: This should be called Async but it will break binary compatibility
923   private Future<Void> deleteTableAsyncV2(final TableName tableName) throws IOException {
924     DeleteTableResponse response = executeCallable(
925       new MasterCallable<DeleteTableResponse>(getConnection()) {
926         @Override
927         public DeleteTableResponse call(int callTimeout) throws ServiceException {
928           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
929           controller.setCallTimeout(callTimeout);
930           controller.setPriority(tableName);
931           DeleteTableRequest req =
932               RequestConverter.buildDeleteTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
933           return master.deleteTable(controller,req);
934         }
935       });
936     return new DeleteTableFuture(this, tableName, response);
937   }
938 
939   private static class DeleteTableFuture extends ProcedureFuture<Void> {
940     private final TableName tableName;
941 
942     public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName,
943         final DeleteTableResponse response) {
944       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
945       this.tableName = tableName;
946     }
947 
948     @Override
949     protected Void waitOperationResult(final long deadlineTs)
950         throws IOException, TimeoutException {
951       waitTableNotFound(deadlineTs);
952       return null;
953     }
954 
955     @Override
956     protected Void postOperationResult(final Void result, final long deadlineTs)
957         throws IOException, TimeoutException {
958       // Delete cached information to prevent clients from using old locations
959       getAdmin().getConnection().clearRegionCache(tableName);
960       LOG.info("Deleted " + tableName);
961       return result;
962     }
963 
964     private void waitTableNotFound(final long deadlineTs)
965         throws IOException, TimeoutException {
966       waitForState(deadlineTs, new WaitForStateCallable() {
967         @Override
968         public boolean checkState(int tries) throws IOException {
969           return !getAdmin().tableExists(tableName);
970         }
971 
972         @Override
973         public void throwInterruptedException() throws InterruptedIOException {
974           throw new InterruptedIOException("Interrupted when waiting for table to be deleted");
975         }
976 
977         @Override
978         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
979           throw new TimeoutException("Table " + tableName + " not yet deleted after " +
980               elapsedTime + "msec");
981         }
982       });
983     }
984   }
985 
986   /**
987    * Deletes tables matching the passed in pattern and wait on completion.
988    *
989    * Warning: Use this method carefully, there is no prompting and the effect is
990    * immediate. Consider using {@link #listTables(java.lang.String)} and
991    * {@link #deleteTable(byte[])}
992    *
993    * @param regex The regular expression to match table names against
994    * @return Table descriptors for tables that couldn't be deleted
995    * @throws IOException
996    * @see #deleteTables(java.util.regex.Pattern)
997    * @see #deleteTable(java.lang.String)
998    */
999   @Override
1000   public HTableDescriptor[] deleteTables(String regex) throws IOException {
1001     return deleteTables(Pattern.compile(regex));
1002   }
1003 
1004   /**
1005    * Delete tables matching the passed in pattern and wait on completion.
1006    *
1007    * Warning: Use this method carefully, there is no prompting and the effect is
1008    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1009    * {@link #deleteTable(byte[])}
1010    *
1011    * @param pattern The pattern to match table names against
1012    * @return Table descriptors for tables that couldn't be deleted
1013    * @throws IOException
1014    */
1015   @Override
1016   public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException {
1017     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1018     for (HTableDescriptor table : listTables(pattern)) {
1019       try {
1020         deleteTable(table.getTableName());
1021       } catch (IOException ex) {
1022         LOG.info("Failed to delete table " + table.getTableName(), ex);
1023         failed.add(table);
1024       }
1025     }
1026     return failed.toArray(new HTableDescriptor[failed.size()]);
1027   }
1028 
1029   /**
1030    * Truncate a table.
1031    * Synchronous operation.
1032    *
1033    * @param tableName name of table to truncate
1034    * @param preserveSplits True if the splits should be preserved
1035    * @throws IOException if a remote or network exception occurs
1036    */
1037   @Override
1038   public void truncateTable(final TableName tableName, final boolean preserveSplits)
1039       throws IOException {
1040     executeCallable(new MasterCallable<Void>(getConnection()) {
1041       @Override
1042       public Void call(int callTimeout) throws ServiceException {
1043         TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(
1044           tableName, preserveSplits, ng.getNonceGroup(), ng.newNonce());
1045         master.truncateTable(null, req);
1046         return null;
1047       }
1048     });
1049   }
1050 
1051   /**
1052    * Enable a table.  May timeout.  Use {@link #enableTableAsync(byte[])}
1053    * and {@link #isTableEnabled(byte[])} instead.
1054    * The table has to be in disabled state for it to be enabled.
1055    * @param tableName name of the table
1056    * @throws IOException if a remote or network exception occurs
1057    * There could be couple types of IOException
1058    * TableNotFoundException means the table doesn't exist.
1059    * TableNotDisabledException means the table isn't in disabled state.
1060    * @see #isTableEnabled(byte[])
1061    * @see #disableTable(byte[])
1062    * @see #enableTableAsync(byte[])
1063    */
1064   @Override
1065   public void enableTable(final TableName tableName)
1066   throws IOException {
1067     Future<Void> future = enableTableAsyncV2(tableName);
1068     try {
1069       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1070     } catch (InterruptedException e) {
1071       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1072     } catch (TimeoutException e) {
1073       throw new TimeoutIOException(e);
1074     } catch (ExecutionException e) {
1075       if (e.getCause() instanceof IOException) {
1076         throw (IOException)e.getCause();
1077       } else {
1078         throw new IOException(e.getCause());
1079       }
1080     }
1081   }
1082 
1083   public void enableTable(final byte[] tableName)
1084   throws IOException {
1085     enableTable(TableName.valueOf(tableName));
1086   }
1087 
1088   public void enableTable(final String tableName)
1089   throws IOException {
1090     enableTable(TableName.valueOf(tableName));
1091   }
1092 
1093   /**
1094    * Wait for the table to be enabled and available
1095    * If enabling the table exceeds the retry period, an exception is thrown.
1096    * @param tableName name of the table
1097    * @throws IOException if a remote or network exception occurs or
1098    *    table is not enabled after the retries period.
1099    */
1100   private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
1101     boolean enabled = false;
1102     long start = EnvironmentEdgeManager.currentTime();
1103     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
1104       try {
1105         enabled = isTableEnabled(tableName);
1106       } catch (TableNotFoundException tnfe) {
1107         // wait for table to be created
1108         enabled = false;
1109       }
1110       enabled = enabled && isTableAvailable(tableName);
1111       if (enabled) {
1112         break;
1113       }
1114       long sleep = getPauseTime(tries);
1115       if (LOG.isDebugEnabled()) {
1116         LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " +
1117           "enabled in " + tableName);
1118       }
1119       try {
1120         Thread.sleep(sleep);
1121       } catch (InterruptedException e) {
1122         // Do this conversion rather than let it out because do not want to
1123         // change the method signature.
1124         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
1125       }
1126     }
1127     if (!enabled) {
1128       long msec = EnvironmentEdgeManager.currentTime() - start;
1129       throw new IOException("Table '" + tableName +
1130         "' not yet enabled, after " + msec + "ms.");
1131     }
1132   }
1133 
1134   /**
1135    * Brings a table on-line (enables it).  Method returns immediately though
1136    * enable of table may take some time to complete, especially if the table
1137    * is large (All regions are opened as part of enabling process).  Check
1138    * {@link #isTableEnabled(byte[])} to learn when table is fully online.  If
1139    * table is taking too long to online, check server logs.
1140    * @param tableName
1141    * @throws IOException
1142    * @since 0.90.0
1143    */
1144   @Override
1145   public void enableTableAsync(final TableName tableName)
1146   throws IOException {
1147     enableTableAsyncV2(tableName);
1148   }
1149 
1150   public void enableTableAsync(final byte[] tableName)
1151   throws IOException {
1152     enableTable(TableName.valueOf(tableName));
1153   }
1154 
1155   public void enableTableAsync(final String tableName)
1156   throws IOException {
1157     enableTableAsync(TableName.valueOf(tableName));
1158   }
1159 
1160   /**
1161    * Enable the table but does not block and wait for it be completely enabled.
1162    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1163    * It may throw ExecutionException if there was an error while executing the operation
1164    * or TimeoutException in case the wait timeout was not long enough to allow the
1165    * operation to complete.
1166    *
1167    * @param tableName name of table to delete
1168    * @throws IOException if a remote or network exception occurs
1169    * @return the result of the async enable. You can use Future.get(long, TimeUnit)
1170    *    to wait on the operation to complete.
1171    */
1172   // TODO: This should be called Async but it will break binary compatibility
1173   private Future<Void> enableTableAsyncV2(final TableName tableName) throws IOException {
1174     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1175     EnableTableResponse response = executeCallable(
1176       new MasterCallable<EnableTableResponse>(getConnection()) {
1177         @Override
1178         public EnableTableResponse call(int callTimeout) throws ServiceException {
1179           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1180           controller.setCallTimeout(callTimeout);
1181           controller.setPriority(tableName);
1182 
1183           LOG.info("Started enable of " + tableName);
1184           EnableTableRequest req =
1185               RequestConverter.buildEnableTableRequest(tableName, ng.getNonceGroup(),ng.newNonce());
1186           return master.enableTable(controller,req);
1187         }
1188       });
1189     return new EnableTableFuture(this, tableName, response);
1190   }
1191 
1192   private static class EnableTableFuture extends ProcedureFuture<Void> {
1193     private final TableName tableName;
1194 
1195     public EnableTableFuture(final HBaseAdmin admin, final TableName tableName,
1196         final EnableTableResponse response) {
1197       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
1198       this.tableName = tableName;
1199     }
1200 
1201     @Override
1202     protected Void waitOperationResult(final long deadlineTs)
1203         throws IOException, TimeoutException {
1204       waitTableEnabled(deadlineTs);
1205       return null;
1206     }
1207 
1208     @Override
1209     protected Void postOperationResult(final Void result, final long deadlineTs)
1210         throws IOException, TimeoutException {
1211       LOG.info("Enabled " + tableName);
1212       return result;
1213     }
1214 
1215     private void waitTableEnabled(final long deadlineTs)
1216         throws IOException, TimeoutException {
1217       waitForState(deadlineTs, new WaitForStateCallable() {
1218         @Override
1219         public boolean checkState(int tries) throws IOException {
1220           boolean enabled;
1221           try {
1222             enabled = getAdmin().isTableEnabled(tableName);
1223           } catch (TableNotFoundException tnfe) {
1224             return false;
1225           }
1226           return enabled && getAdmin().isTableAvailable(tableName);
1227         }
1228 
1229         @Override
1230         public void throwInterruptedException() throws InterruptedIOException {
1231           throw new InterruptedIOException("Interrupted when waiting for table to be enabled");
1232         }
1233 
1234         @Override
1235         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1236           throw new TimeoutException("Table " + tableName + " not yet enabled after " +
1237               elapsedTime + "msec");
1238         }
1239       });
1240     }
1241   }
1242 
1243   /**
1244    * Enable tables matching the passed in pattern and wait on completion.
1245    *
1246    * Warning: Use this method carefully, there is no prompting and the effect is
1247    * immediate. Consider using {@link #listTables(java.lang.String)} and
1248    * {@link #enableTable(byte[])}
1249    *
1250    * @param regex The regular expression to match table names against
1251    * @throws IOException
1252    * @see #enableTables(java.util.regex.Pattern)
1253    * @see #enableTable(java.lang.String)
1254    */
1255   @Override
1256   public HTableDescriptor[] enableTables(String regex) throws IOException {
1257     return enableTables(Pattern.compile(regex));
1258   }
1259 
1260   /**
1261    * Enable tables matching the passed in pattern and wait on completion.
1262    *
1263    * Warning: Use this method carefully, there is no prompting and the effect is
1264    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1265    * {@link #enableTable(byte[])}
1266    *
1267    * @param pattern The pattern to match table names against
1268    * @throws IOException
1269    */
1270   @Override
1271   public HTableDescriptor[] enableTables(Pattern pattern) throws IOException {
1272     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1273     for (HTableDescriptor table : listTables(pattern)) {
1274       if (isTableDisabled(table.getTableName())) {
1275         try {
1276           enableTable(table.getTableName());
1277         } catch (IOException ex) {
1278           LOG.info("Failed to enable table " + table.getTableName(), ex);
1279           failed.add(table);
1280         }
1281       }
1282     }
1283     return failed.toArray(new HTableDescriptor[failed.size()]);
1284   }
1285 
1286   /**
1287    * Starts the disable of a table.  If it is being served, the master
1288    * will tell the servers to stop serving it.  This method returns immediately.
1289    * The disable of a table can take some time if the table is large (all
1290    * regions are closed as part of table disable operation).
1291    * Call {@link #isTableDisabled(byte[])} to check for when disable completes.
1292    * If table is taking too long to online, check server logs.
1293    * @param tableName name of table
1294    * @throws IOException if a remote or network exception occurs
1295    * @see #isTableDisabled(byte[])
1296    * @see #isTableEnabled(byte[])
1297    * @since 0.90.0
1298    */
1299   @Override
1300   public void disableTableAsync(final TableName tableName) throws IOException {
1301     disableTableAsyncV2(tableName);
1302   }
1303 
1304   public void disableTableAsync(final byte[] tableName) throws IOException {
1305     disableTableAsync(TableName.valueOf(tableName));
1306   }
1307 
1308   public void disableTableAsync(final String tableName) throws IOException {
1309     disableTableAsync(TableName.valueOf(tableName));
1310   }
1311 
1312   /**
1313    * Disable table and wait on completion.  May timeout eventually.  Use
1314    * {@link #disableTableAsync(byte[])} and {@link #isTableDisabled(String)}
1315    * instead.
1316    * The table has to be in enabled state for it to be disabled.
1317    * @param tableName
1318    * @throws IOException
1319    * There could be couple types of IOException
1320    * TableNotFoundException means the table doesn't exist.
1321    * TableNotEnabledException means the table isn't in enabled state.
1322    */
1323   @Override
1324   public void disableTable(final TableName tableName)
1325   throws IOException {
1326     Future<Void> future = disableTableAsyncV2(tableName);
1327     try {
1328       future.get(syncWaitTimeout, TimeUnit.MILLISECONDS);
1329     } catch (InterruptedException e) {
1330       throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1331     } catch (TimeoutException e) {
1332       throw new TimeoutIOException(e);
1333     } catch (ExecutionException e) {
1334       if (e.getCause() instanceof IOException) {
1335         throw (IOException)e.getCause();
1336       } else {
1337         throw new IOException(e.getCause());
1338       }
1339     }
1340   }
1341 
1342   public void disableTable(final byte[] tableName)
1343   throws IOException {
1344     disableTable(TableName.valueOf(tableName));
1345   }
1346 
1347   public void disableTable(final String tableName)
1348   throws IOException {
1349     disableTable(TableName.valueOf(tableName));
1350   }
1351 
1352   /**
1353    * Disable the table but does not block and wait for it be completely disabled.
1354    * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
1355    * It may throw ExecutionException if there was an error while executing the operation
1356    * or TimeoutException in case the wait timeout was not long enough to allow the
1357    * operation to complete.
1358    *
1359    * @param tableName name of table to delete
1360    * @throws IOException if a remote or network exception occurs
1361    * @return the result of the async disable. You can use Future.get(long, TimeUnit)
1362    *    to wait on the operation to complete.
1363    */
1364   // TODO: This should be called Async but it will break binary compatibility
1365   private Future<Void> disableTableAsyncV2(final TableName tableName) throws IOException {
1366     TableName.isLegalFullyQualifiedTableName(tableName.getName());
1367     DisableTableResponse response = executeCallable(
1368       new MasterCallable<DisableTableResponse>(getConnection()) {
1369         @Override
1370         public DisableTableResponse call(int callTimeout) throws ServiceException {
1371           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1372           controller.setCallTimeout(callTimeout);
1373           controller.setPriority(tableName);
1374 
1375           LOG.info("Started disable of " + tableName);
1376           DisableTableRequest req =
1377               RequestConverter.buildDisableTableRequest(
1378                 tableName, ng.getNonceGroup(), ng.newNonce());
1379           return master.disableTable(controller, req);
1380         }
1381       });
1382     return new DisableTableFuture(this, tableName, response);
1383   }
1384 
1385   private static class DisableTableFuture extends ProcedureFuture<Void> {
1386     private final TableName tableName;
1387 
1388     public DisableTableFuture(final HBaseAdmin admin, final TableName tableName,
1389         final DisableTableResponse response) {
1390       super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null);
1391       this.tableName = tableName;
1392     }
1393 
1394     @Override
1395     protected Void waitOperationResult(final long deadlineTs)
1396         throws IOException, TimeoutException {
1397       waitTableDisabled(deadlineTs);
1398       return null;
1399     }
1400 
1401     @Override
1402     protected Void postOperationResult(final Void result, final long deadlineTs)
1403         throws IOException, TimeoutException {
1404       LOG.info("Disabled " + tableName);
1405       return result;
1406     }
1407 
1408     private void waitTableDisabled(final long deadlineTs)
1409         throws IOException, TimeoutException {
1410       waitForState(deadlineTs, new WaitForStateCallable() {
1411         @Override
1412         public boolean checkState(int tries) throws IOException {
1413           return getAdmin().isTableDisabled(tableName);
1414         }
1415 
1416         @Override
1417         public void throwInterruptedException() throws InterruptedIOException {
1418           throw new InterruptedIOException("Interrupted when waiting for table to be disabled");
1419         }
1420 
1421         @Override
1422         public void throwTimeoutException(long elapsedTime) throws TimeoutException {
1423           throw new TimeoutException("Table " + tableName + " not yet disabled after " +
1424               elapsedTime + "msec");
1425         }
1426       });
1427     }
1428   }
1429 
1430   /**
1431    * Disable tables matching the passed in pattern and wait on completion.
1432    *
1433    * Warning: Use this method carefully, there is no prompting and the effect is
1434    * immediate. Consider using {@link #listTables(java.lang.String)} and
1435    * {@link #disableTable(byte[])}
1436    *
1437    * @param regex The regular expression to match table names against
1438    * @return Table descriptors for tables that couldn't be disabled
1439    * @throws IOException
1440    * @see #disableTables(java.util.regex.Pattern)
1441    * @see #disableTable(java.lang.String)
1442    */
1443   @Override
1444   public HTableDescriptor[] disableTables(String regex) throws IOException {
1445     return disableTables(Pattern.compile(regex));
1446   }
1447 
1448   /**
1449    * Disable tables matching the passed in pattern and wait on completion.
1450    *
1451    * Warning: Use this method carefully, there is no prompting and the effect is
1452    * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and
1453    * {@link #disableTable(byte[])}
1454    *
1455    * @param pattern The pattern to match table names against
1456    * @return Table descriptors for tables that couldn't be disabled
1457    * @throws IOException
1458    */
1459   @Override
1460   public HTableDescriptor[] disableTables(Pattern pattern) throws IOException {
1461     List<HTableDescriptor> failed = new LinkedList<HTableDescriptor>();
1462     for (HTableDescriptor table : listTables(pattern)) {
1463       if (isTableEnabled(table.getTableName())) {
1464         try {
1465           disableTable(table.getTableName());
1466         } catch (IOException ex) {
1467           LOG.info("Failed to disable table " + table.getTableName(), ex);
1468           failed.add(table);
1469         }
1470       }
1471     }
1472     return failed.toArray(new HTableDescriptor[failed.size()]);
1473   }
1474 
1475   /*
1476    * Checks whether table exists. If not, throws TableNotFoundException
1477    * @param tableName
1478    */
1479   private void checkTableExistence(TableName tableName) throws IOException {
1480     if (!tableExists(tableName)) {
1481       throw new TableNotFoundException(tableName);
1482     }
1483   }
1484 
1485   /**
1486    * @param tableName name of table to check
1487    * @return true if table is on-line
1488    * @throws IOException if a remote or network exception occurs
1489    */
1490   @Override
1491   public boolean isTableEnabled(TableName tableName) throws IOException {
1492     checkTableExistence(tableName);
1493     return connection.isTableEnabled(tableName);
1494   }
1495 
1496   public boolean isTableEnabled(byte[] tableName) throws IOException {
1497     return isTableEnabled(TableName.valueOf(tableName));
1498   }
1499 
1500   public boolean isTableEnabled(String tableName) throws IOException {
1501     return isTableEnabled(TableName.valueOf(tableName));
1502   }
1503 
1504 
1505 
1506   /**
1507    * @param tableName name of table to check
1508    * @return true if table is off-line
1509    * @throws IOException if a remote or network exception occurs
1510    */
1511   @Override
1512   public boolean isTableDisabled(TableName tableName) throws IOException {
1513     checkTableExistence(tableName);
1514     return connection.isTableDisabled(tableName);
1515   }
1516 
1517   public boolean isTableDisabled(byte[] tableName) throws IOException {
1518     return isTableDisabled(TableName.valueOf(tableName));
1519   }
1520 
1521   public boolean isTableDisabled(String tableName) throws IOException {
1522     return isTableDisabled(TableName.valueOf(tableName));
1523   }
1524 
1525   /**
1526    * @param tableName name of table to check
1527    * @return true if all regions of the table are available
1528    * @throws IOException if a remote or network exception occurs
1529    */
1530   @Override
1531   public boolean isTableAvailable(TableName tableName) throws IOException {
1532     return connection.isTableAvailable(tableName);
1533   }
1534 
1535   public boolean isTableAvailable(byte[] tableName) throws IOException {
1536     return isTableAvailable(TableName.valueOf(tableName));
1537   }
1538 
1539   public boolean isTableAvailable(String tableName) throws IOException {
1540     return isTableAvailable(TableName.valueOf(tableName));
1541   }
1542 
1543   /**
1544    * Use this api to check if the table has been created with the specified number of
1545    * splitkeys which was used while creating the given table.
1546    * Note : If this api is used after a table's region gets splitted, the api may return
1547    * false.
1548    * @param tableName
1549    *          name of table to check
1550    * @param splitKeys
1551    *          keys to check if the table has been created with all split keys
1552    * @throws IOException
1553    *           if a remote or network excpetion occurs
1554    */
1555   @Override
1556   public boolean isTableAvailable(TableName tableName,
1557                                   byte[][] splitKeys) throws IOException {
1558     return connection.isTableAvailable(tableName, splitKeys);
1559   }
1560 
1561   public boolean isTableAvailable(byte[] tableName,
1562                                   byte[][] splitKeys) throws IOException {
1563     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1564   }
1565 
1566   public boolean isTableAvailable(String tableName,
1567                                   byte[][] splitKeys) throws IOException {
1568     return isTableAvailable(TableName.valueOf(tableName), splitKeys);
1569   }
1570 
1571   /**
1572    * Get the status of alter command - indicates how many regions have received
1573    * the updated schema Asynchronous operation.
1574    *
1575    * @param tableName TableName instance
1576    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1577    *         regions that are yet to be updated Pair.getSecond() is the total number
1578    *         of regions of the table
1579    * @throws IOException
1580    *           if a remote or network exception occurs
1581    */
1582   @Override
1583   public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
1584   throws IOException {
1585     return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
1586       @Override
1587       public Pair<Integer, Integer> call(int callTimeout) throws ServiceException {
1588         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1589         controller.setCallTimeout(callTimeout);
1590         controller.setPriority(tableName);
1591 
1592         GetSchemaAlterStatusRequest req = RequestConverter
1593             .buildGetSchemaAlterStatusRequest(tableName);
1594         GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(controller, req);
1595         Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
1596             .getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
1597         return pair;
1598       }
1599     });
1600   }
1601 
1602   /**
1603    * Get the status of alter command - indicates how many regions have received
1604    * the updated schema Asynchronous operation.
1605    *
1606    * @param tableName
1607    *          name of the table to get the status of
1608    * @return Pair indicating the number of regions updated Pair.getFirst() is the
1609    *         regions that are yet to be updated Pair.getSecond() is the total number
1610    *         of regions of the table
1611    * @throws IOException
1612    *           if a remote or network exception occurs
1613    */
1614   @Override
1615   public Pair<Integer, Integer> getAlterStatus(final byte[] tableName)
1616    throws IOException {
1617     return getAlterStatus(TableName.valueOf(tableName));
1618   }
1619 
1620   /**
1621    * Add a column to an existing table.
1622    * Asynchronous operation.
1623    *
1624    * @param tableName name of the table to add column to
1625    * @param column column descriptor of column to be added
1626    * @throws IOException if a remote or network exception occurs
1627    */
1628   public void addColumn(final byte[] tableName, HColumnDescriptor column)
1629   throws IOException {
1630     addColumn(TableName.valueOf(tableName), column);
1631   }
1632 
1633   /**
1634    * Add a column to an existing table.
1635    * Asynchronous operation.
1636    *
1637    * @param tableName name of the table to add column to
1638    * @param column column descriptor of column to be added
1639    * @throws IOException if a remote or network exception occurs
1640    */
1641   public void addColumn(final String tableName, HColumnDescriptor column)
1642   throws IOException {
1643     addColumn(TableName.valueOf(tableName), column);
1644   }
1645 
1646   /**
1647    * Add a column to an existing table.
1648    * Asynchronous operation.
1649    *
1650    * @param tableName name of the table to add column to
1651    * @param column column descriptor of column to be added
1652    * @throws IOException if a remote or network exception occurs
1653    */
1654   @Override
1655   public void addColumn(final TableName tableName, final HColumnDescriptor column)
1656   throws IOException {
1657     executeCallable(new MasterCallable<Void>(getConnection()) {
1658       @Override
1659       public Void call(int callTimeout) throws ServiceException {
1660         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1661         controller.setCallTimeout(callTimeout);
1662         controller.setPriority(tableName);
1663         AddColumnRequest req = RequestConverter.buildAddColumnRequest(
1664           tableName, column, ng.getNonceGroup(), ng.newNonce());
1665         master.addColumn(controller,req);
1666         return null;
1667       }
1668     });
1669   }
1670 
1671   /**
1672    * Delete a column from a table.
1673    * Asynchronous operation.
1674    *
1675    * @param tableName name of table
1676    * @param columnName name of column to be deleted
1677    * @throws IOException if a remote or network exception occurs
1678    */
1679   public void deleteColumn(final byte[] tableName, final String columnName)
1680   throws IOException {
1681     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1682   }
1683 
1684   /**
1685    * Delete a column from a table.
1686    * Asynchronous operation.
1687    *
1688    * @param tableName name of table
1689    * @param columnName name of column to be deleted
1690    * @throws IOException if a remote or network exception occurs
1691    */
1692   public void deleteColumn(final String tableName, final String columnName)
1693   throws IOException {
1694     deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName));
1695   }
1696 
1697   /**
1698    * Delete a column from a table.
1699    * Asynchronous operation.
1700    *
1701    * @param tableName name of table
1702    * @param columnName name of column to be deleted
1703    * @throws IOException if a remote or network exception occurs
1704    */
1705   @Override
1706   public void deleteColumn(final TableName tableName, final byte [] columnName)
1707   throws IOException {
1708     executeCallable(new MasterCallable<Void>(getConnection()) {
1709       @Override
1710       public Void call(int callTimeout) throws ServiceException {
1711         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1712         controller.setCallTimeout(callTimeout);
1713         controller.setPriority(tableName);
1714         DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(
1715           tableName, columnName, ng.getNonceGroup(), ng.newNonce());
1716         master.deleteColumn(controller, req);
1717         return null;
1718       }
1719     });
1720   }
1721 
1722   /**
1723    * Modify an existing column family on a table.
1724    * Asynchronous operation.
1725    *
1726    * @param tableName name of table
1727    * @param descriptor new column descriptor to use
1728    * @throws IOException if a remote or network exception occurs
1729    */
1730   public void modifyColumn(final String tableName, HColumnDescriptor descriptor)
1731   throws IOException {
1732     modifyColumn(TableName.valueOf(tableName), descriptor);
1733   }
1734 
1735   /**
1736    * Modify an existing column family on a table.
1737    * Asynchronous operation.
1738    *
1739    * @param tableName name of table
1740    * @param descriptor new column descriptor to use
1741    * @throws IOException if a remote or network exception occurs
1742    */
1743   public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor)
1744   throws IOException {
1745     modifyColumn(TableName.valueOf(tableName), descriptor);
1746   }
1747 
1748   /**
1749    * Modify an existing column family on a table.
1750    * Asynchronous operation.
1751    *
1752    * @param tableName name of table
1753    * @param descriptor new column descriptor to use
1754    * @throws IOException if a remote or network exception occurs
1755    */
1756   @Override
1757   public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
1758   throws IOException {
1759     executeCallable(new MasterCallable<Void>(getConnection()) {
1760       @Override
1761       public Void call(int callTimeout) throws ServiceException {
1762         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1763         controller.setCallTimeout(callTimeout);
1764         controller.setPriority(tableName);
1765         ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(
1766           tableName, descriptor, ng.getNonceGroup(), ng.newNonce());
1767         master.modifyColumn(controller, req);
1768         return null;
1769       }
1770     });
1771   }
1772 
1773   /**
1774    * Close a region. For expert-admins.  Runs close on the regionserver.  The
1775    * master will not be informed of the close.
1776    * @param regionname region name to close
1777    * @param serverName If supplied, we'll use this location rather than
1778    * the one currently in <code>hbase:meta</code>
1779    * @throws IOException if a remote or network exception occurs
1780    */
1781   @Override
1782   public void closeRegion(final String regionname, final String serverName)
1783   throws IOException {
1784     closeRegion(Bytes.toBytes(regionname), serverName);
1785   }
1786 
1787   /**
1788    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1789    * master will not be informed of the close.
1790    * @param regionname region name to close
1791    * @param serverName The servername of the regionserver.  If passed null we
1792    * will use servername found in the hbase:meta table. A server name
1793    * is made of host, port and startcode.  Here is an example:
1794    * <code> host187.example.com,60020,1289493121758</code>
1795    * @throws IOException if a remote or network exception occurs
1796    */
1797   @Override
1798   public void closeRegion(final byte [] regionname, final String serverName)
1799       throws IOException {
1800     if (serverName != null) {
1801       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1802       if (pair == null || pair.getFirst() == null) {
1803         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1804       } else {
1805         closeRegion(ServerName.valueOf(serverName), pair.getFirst());
1806       }
1807     } else {
1808       Pair<HRegionInfo, ServerName> pair = MetaTableAccessor.getRegion(connection, regionname);
1809       if (pair == null) {
1810         throw new UnknownRegionException(Bytes.toStringBinary(regionname));
1811       } else if (pair.getSecond() == null) {
1812         throw new NoServerForRegionException(Bytes.toStringBinary(regionname));
1813       } else {
1814         closeRegion(pair.getSecond(), pair.getFirst());
1815       }
1816     }
1817   }
1818 
1819   /**
1820    * For expert-admins. Runs close on the regionserver. Closes a region based on
1821    * the encoded region name. The region server name is mandatory. If the
1822    * servername is provided then based on the online regions in the specified
1823    * regionserver the specified region will be closed. The master will not be
1824    * informed of the close. Note that the regionname is the encoded regionname.
1825    *
1826    * @param encodedRegionName
1827    *          The encoded region name; i.e. the hash that makes up the region
1828    *          name suffix: e.g. if regionname is
1829    *          <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>
1830    *          , then the encoded region name is:
1831    *          <code>527db22f95c8a9e0116f0cc13c680396</code>.
1832    * @param serverName
1833    *          The servername of the regionserver. A server name is made of host,
1834    *          port and startcode. This is mandatory. Here is an example:
1835    *          <code> host187.example.com,60020,1289493121758</code>
1836    * @return true if the region was closed, false if not.
1837    * @throws IOException
1838    *           if a remote or network exception occurs
1839    */
1840   @Override
1841   public boolean closeRegionWithEncodedRegionName(final String encodedRegionName,
1842       final String serverName) throws IOException {
1843     if (null == serverName || ("").equals(serverName.trim())) {
1844       throw new IllegalArgumentException(
1845           "The servername cannot be null or empty.");
1846     }
1847     ServerName sn = ServerName.valueOf(serverName);
1848     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1849     // Close the region without updating zk state.
1850     CloseRegionRequest request =
1851       RequestConverter.buildCloseRegionRequest(sn, encodedRegionName, false);
1852     try {
1853       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1854 
1855       // TODO: this does not do retries, it should. Set priority and timeout in controller
1856       CloseRegionResponse response = admin.closeRegion(controller, request);
1857       boolean isRegionClosed = response.getClosed();
1858       if (false == isRegionClosed) {
1859         LOG.error("Not able to close the region " + encodedRegionName + ".");
1860       }
1861       return isRegionClosed;
1862     } catch (ServiceException se) {
1863       throw ProtobufUtil.getRemoteException(se);
1864     }
1865   }
1866 
1867   /**
1868    * Close a region.  For expert-admins  Runs close on the regionserver.  The
1869    * master will not be informed of the close.
1870    * @param sn
1871    * @param hri
1872    * @throws IOException
1873    */
1874   @Override
1875   public void closeRegion(final ServerName sn, final HRegionInfo hri)
1876   throws IOException {
1877     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1878     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1879 
1880     // Close the region without updating zk state.
1881     ProtobufUtil.closeRegion(controller, admin, sn, hri.getRegionName(), false);
1882   }
1883 
1884   /**
1885    * Get all the online regions on a region server.
1886    */
1887   @Override
1888   public List<HRegionInfo> getOnlineRegions(final ServerName sn) throws IOException {
1889     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1890     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1891     return ProtobufUtil.getOnlineRegions(controller, admin);
1892   }
1893 
1894   /**
1895    * {@inheritDoc}
1896    */
1897   @Override
1898   public void flush(final TableName tableName) throws IOException {
1899     checkTableExists(tableName);
1900     if (isTableDisabled(tableName)) {
1901       LOG.info("Table is disabled: " + tableName.getNameAsString());
1902       return;
1903     }
1904     execProcedure("flush-table-proc", tableName.getNameAsString(),
1905       new HashMap<String, String>());
1906   }
1907 
1908   /**
1909    * {@inheritDoc}
1910    */
1911   @Override
1912   public void flushRegion(final byte[] regionName) throws IOException {
1913     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
1914     if (regionServerPair == null) {
1915       throw new IllegalArgumentException("Unknown regionname: " + Bytes.toStringBinary(regionName));
1916     }
1917     if (regionServerPair.getSecond() == null) {
1918       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
1919     }
1920     flush(regionServerPair.getSecond(), regionServerPair.getFirst());
1921   }
1922 
1923   /**
1924    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1925    * (byte[])} instead.
1926    */
1927   @Deprecated
1928   public void flush(final String tableNameOrRegionName)
1929   throws IOException, InterruptedException {
1930     flush(Bytes.toBytes(tableNameOrRegionName));
1931   }
1932 
1933   /**
1934    * @deprecated Use {@link #flush(org.apache.hadoop.hbase.TableName)} or {@link #flushRegion
1935    * (byte[])} instead.
1936    */
1937   @Deprecated
1938   public void flush(final byte[] tableNameOrRegionName)
1939   throws IOException, InterruptedException {
1940     try {
1941       flushRegion(tableNameOrRegionName);
1942     } catch (IllegalArgumentException e) {
1943       // Unknown region.  Try table.
1944       flush(TableName.valueOf(tableNameOrRegionName));
1945     }
1946   }
1947 
1948   private void flush(final ServerName sn, final HRegionInfo hri)
1949   throws IOException {
1950     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
1951     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
1952     FlushRegionRequest request =
1953       RequestConverter.buildFlushRegionRequest(hri.getRegionName());
1954     try {
1955       admin.flushRegion(controller, request);
1956     } catch (ServiceException se) {
1957       throw ProtobufUtil.getRemoteException(se);
1958     }
1959   }
1960 
1961   /**
1962    * {@inheritDoc}
1963    */
1964   @Override
1965   public void compact(final TableName tableName)
1966     throws IOException {
1967     compact(tableName, null, false);
1968   }
1969 
1970   /**
1971    * {@inheritDoc}
1972    */
1973   @Override
1974   public void compactRegion(final byte[] regionName)
1975     throws IOException {
1976     compactRegion(regionName, null, false);
1977   }
1978 
1979   /**
1980    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1981    * (byte[])} instead.
1982    */
1983   @Deprecated
1984   public void compact(final String tableNameOrRegionName)
1985   throws IOException {
1986     compact(Bytes.toBytes(tableNameOrRegionName));
1987   }
1988 
1989   /**
1990    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
1991    * (byte[])} instead.
1992    */
1993   @Deprecated
1994   public void compact(final byte[] tableNameOrRegionName)
1995   throws IOException {
1996     try {
1997       compactRegion(tableNameOrRegionName, null, false);
1998     } catch (IllegalArgumentException e) {
1999       compact(TableName.valueOf(tableNameOrRegionName), null, false);
2000     }
2001   }
2002 
2003   /**
2004    * {@inheritDoc}
2005    */
2006   @Override
2007   public void compact(final TableName tableName, final byte[] columnFamily)
2008     throws IOException {
2009     compact(tableName, columnFamily, false);
2010   }
2011 
2012   /**
2013    * {@inheritDoc}
2014    */
2015   @Override
2016   public void compactRegion(final byte[] regionName, final byte[] columnFamily)
2017     throws IOException {
2018     compactRegion(regionName, columnFamily, false);
2019   }
2020 
2021   /**
2022    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2023    * (byte[], byte[])} instead.
2024    */
2025   @Deprecated
2026   public void compact(String tableOrRegionName, String columnFamily)
2027     throws IOException {
2028     compact(Bytes.toBytes(tableOrRegionName), Bytes.toBytes(columnFamily));
2029   }
2030 
2031   /**
2032    * @deprecated Use {@link #compact(org.apache.hadoop.hbase.TableName)} or {@link #compactRegion
2033    * (byte[], byte[])} instead.
2034    */
2035   @Deprecated
2036   public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2037   throws IOException {
2038     try {
2039       compactRegion(tableNameOrRegionName, columnFamily, false);
2040     } catch (IllegalArgumentException e) {
2041       // Bad region, try table
2042       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, false);
2043     }
2044   }
2045 
2046   /**
2047    * {@inheritDoc}
2048    */
2049   @Override
2050   public void compactRegionServer(final ServerName sn, boolean major)
2051   throws IOException, InterruptedException {
2052     for (HRegionInfo region : getOnlineRegions(sn)) {
2053       compact(sn, region, major, null);
2054     }
2055   }
2056 
2057   /**
2058    * {@inheritDoc}
2059    */
2060   @Override
2061   public void majorCompact(final TableName tableName)
2062   throws IOException {
2063     compact(tableName, null, true);
2064   }
2065 
2066   /**
2067    * {@inheritDoc}
2068    */
2069   @Override
2070   public void majorCompactRegion(final byte[] regionName)
2071   throws IOException {
2072     compactRegion(regionName, null, true);
2073   }
2074 
2075   /**
2076    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2077    * #majorCompactRegion(byte[])} instead.
2078    */
2079   @Deprecated
2080   public void majorCompact(final String tableNameOrRegionName)
2081   throws IOException {
2082     majorCompact(Bytes.toBytes(tableNameOrRegionName));
2083   }
2084 
2085   /**
2086    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName)} or {@link
2087    * #majorCompactRegion(byte[])} instead.
2088    */
2089   @Deprecated
2090   public void majorCompact(final byte[] tableNameOrRegionName)
2091   throws IOException {
2092     try {
2093       compactRegion(tableNameOrRegionName, null, true);
2094     } catch (IllegalArgumentException e) {
2095       // Invalid region, try table
2096       compact(TableName.valueOf(tableNameOrRegionName), null, true);
2097     }
2098   }
2099 
2100   /**
2101    * {@inheritDoc}
2102    */
2103   @Override
2104   public void majorCompact(final TableName tableName, final byte[] columnFamily)
2105   throws IOException {
2106     compact(tableName, columnFamily, true);
2107   }
2108 
2109   /**
2110    * {@inheritDoc}
2111    */
2112   @Override
2113   public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily)
2114   throws IOException {
2115     compactRegion(regionName, columnFamily, true);
2116   }
2117 
2118   /**
2119    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2120    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2121    */
2122   @Deprecated
2123   public void majorCompact(final String tableNameOrRegionName, final String columnFamily)
2124   throws IOException {
2125     majorCompact(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(columnFamily));
2126   }
2127 
2128   /**
2129    * @deprecated Use {@link #majorCompact(org.apache.hadoop.hbase.TableName,
2130    * byte[])} or {@link #majorCompactRegion(byte[], byte[])} instead.
2131    */
2132   @Deprecated
2133   public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily)
2134   throws IOException {
2135     try {
2136       compactRegion(tableNameOrRegionName, columnFamily, true);
2137     } catch (IllegalArgumentException e) {
2138       // Invalid region, try table
2139       compact(TableName.valueOf(tableNameOrRegionName), columnFamily, true);
2140     }
2141   }
2142 
2143   /**
2144    * Compact a table.
2145    * Asynchronous operation.
2146    *
2147    * @param tableName table or region to compact
2148    * @param columnFamily column family within a table or region
2149    * @param major True if we are to do a major compaction.
2150    * @throws IOException if a remote or network exception occurs
2151    * @throws InterruptedException
2152    */
2153   private void compact(final TableName tableName, final byte[] columnFamily,final boolean major)
2154   throws IOException {
2155     ZooKeeperWatcher zookeeper = null;
2156     try {
2157       checkTableExists(tableName);
2158       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2159           new ThrowableAbortable());
2160       List<Pair<HRegionInfo, ServerName>> pairs =
2161         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2162       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2163         if (pair.getFirst().isOffline()) continue;
2164         if (pair.getSecond() == null) continue;
2165         try {
2166           compact(pair.getSecond(), pair.getFirst(), major, columnFamily);
2167         } catch (NotServingRegionException e) {
2168           if (LOG.isDebugEnabled()) {
2169             LOG.debug("Trying to" + (major ? " major" : "") + " compact " +
2170               pair.getFirst() + ": " +
2171               StringUtils.stringifyException(e));
2172           }
2173         }
2174       }
2175     } finally {
2176       if (zookeeper != null) {
2177         zookeeper.close();
2178       }
2179     }
2180   }
2181 
2182   /**
2183    * Compact an individual region.
2184    * Asynchronous operation.
2185    *
2186    * @param regionName region to compact
2187    * @param columnFamily column family within a table or region
2188    * @param major True if we are to do a major compaction.
2189    * @throws IOException if a remote or network exception occurs
2190    * @throws InterruptedException
2191    */
2192   private void compactRegion(final byte[] regionName, final byte[] columnFamily,final boolean major)
2193   throws IOException {
2194     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2195     if (regionServerPair == null) {
2196       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2197     }
2198     if (regionServerPair.getSecond() == null) {
2199       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2200     }
2201     compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily);
2202   }
2203 
2204   private void compact(final ServerName sn, final HRegionInfo hri,
2205       final boolean major, final byte [] family)
2206   throws IOException {
2207     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2208     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2209     CompactRegionRequest request =
2210       RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, family);
2211     try {
2212       // TODO: this does not do retries, it should. Set priority and timeout in controller
2213       admin.compactRegion(controller, request);
2214     } catch (ServiceException se) {
2215       throw ProtobufUtil.getRemoteException(se);
2216     }
2217   }
2218 
2219   /**
2220    * Move the region <code>r</code> to <code>dest</code>.
2221    * @param encodedRegionName The encoded region name; i.e. the hash that makes
2222    * up the region name suffix: e.g. if regionname is
2223    * <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
2224    * then the encoded region name is: <code>527db22f95c8a9e0116f0cc13c680396</code>.
2225    * @param destServerName The servername of the destination regionserver.  If
2226    * passed the empty byte array we'll assign to a random server.  A server name
2227    * is made of host, port and startcode.  Here is an example:
2228    * <code> host187.example.com,60020,1289493121758</code>
2229    * @throws UnknownRegionException Thrown if we can't find a region named
2230    * <code>encodedRegionName</code>
2231    */
2232   @Override
2233   public void move(final byte [] encodedRegionName, final byte [] destServerName)
2234       throws IOException {
2235 
2236     executeCallable(new MasterCallable<Void>(getConnection()) {
2237       @Override
2238       public Void call(int callTimeout) throws ServiceException {
2239         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2240         controller.setCallTimeout(callTimeout);
2241         // Hard to know the table name, at least check if meta
2242         if (isMetaRegion(encodedRegionName)) {
2243           controller.setPriority(TableName.META_TABLE_NAME);
2244         }
2245 
2246         try {
2247           MoveRegionRequest request =
2248               RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
2249             master.moveRegion(controller, request);
2250         } catch (DeserializationException de) {
2251           LOG.error("Could not parse destination server name: " + de);
2252           throw new ServiceException(new DoNotRetryIOException(de));
2253         }
2254         return null;
2255       }
2256     });
2257   }
2258 
2259   private boolean isMetaRegion(final byte[] regionName) {
2260     return Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2261         || Bytes.equals(regionName, HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
2262   }
2263 
2264   /**
2265    * @param regionName
2266    *          Region name to assign.
2267    * @throws MasterNotRunningException
2268    * @throws ZooKeeperConnectionException
2269    * @throws IOException
2270    */
2271   @Override
2272   public void assign(final byte[] regionName) throws MasterNotRunningException,
2273       ZooKeeperConnectionException, IOException {
2274     final byte[] toBeAssigned = getRegionName(regionName);
2275     executeCallable(new MasterCallable<Void>(getConnection()) {
2276       @Override
2277       public Void call(int callTimeout) throws ServiceException {
2278         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2279         controller.setCallTimeout(callTimeout);
2280         // Hard to know the table name, at least check if meta
2281         if (isMetaRegion(regionName)) {
2282           controller.setPriority(TableName.META_TABLE_NAME);
2283         }
2284 
2285         AssignRegionRequest request =
2286           RequestConverter.buildAssignRegionRequest(toBeAssigned);
2287         master.assignRegion(controller,request);
2288         return null;
2289       }
2290     });
2291   }
2292 
2293   /**
2294    * Unassign a region from current hosting regionserver.  Region will then be
2295    * assigned to a regionserver chosen at random.  Region could be reassigned
2296    * back to the same server.  Use {@link #move(byte[], byte[])} if you want
2297    * to control the region movement.
2298    * @param regionName Region to unassign. Will clear any existing RegionPlan
2299    * if one found.
2300    * @param force If true, force unassign (Will remove region from
2301    * regions-in-transition too if present. If results in double assignment
2302    * use hbck -fix to resolve. To be used by experts).
2303    * @throws MasterNotRunningException
2304    * @throws ZooKeeperConnectionException
2305    * @throws IOException
2306    */
2307   @Override
2308   public void unassign(final byte [] regionName, final boolean force)
2309   throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
2310     final byte[] toBeUnassigned = getRegionName(regionName);
2311     executeCallable(new MasterCallable<Void>(getConnection()) {
2312       @Override
2313       public Void call(int callTimeout) throws ServiceException {
2314         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2315         controller.setCallTimeout(callTimeout);
2316         // Hard to know the table name, at least check if meta
2317         if (isMetaRegion(regionName)) {
2318           controller.setPriority(TableName.META_TABLE_NAME);
2319         }
2320         UnassignRegionRequest request =
2321           RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
2322         master.unassignRegion(controller, request);
2323         return null;
2324       }
2325     });
2326   }
2327 
2328   /**
2329    * Offline specified region from master's in-memory state. It will not attempt to reassign the
2330    * region as in unassign. This API can be used when a region not served by any region server and
2331    * still online as per Master's in memory state. If this API is incorrectly used on active region
2332    * then master will loose track of that region.
2333    *
2334    * This is a special method that should be used by experts or hbck.
2335    *
2336    * @param regionName
2337    *          Region to offline.
2338    * @throws IOException
2339    */
2340   @Override
2341   public void offline(final byte [] regionName)
2342   throws IOException {
2343     executeCallable(new MasterCallable<Void>(getConnection()) {
2344       @Override
2345       public Void call(int callTimeout) throws ServiceException {
2346         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2347         controller.setCallTimeout(callTimeout);
2348         // Hard to know the table name, at least check if meta
2349         if (isMetaRegion(regionName)) {
2350           controller.setPriority(TableName.META_TABLE_NAME);
2351         }
2352         master.offlineRegion(controller, RequestConverter.buildOfflineRegionRequest(regionName));
2353         return null;
2354       }
2355     });
2356   }
2357 
2358   /**
2359    * Turn the load balancer on or off.
2360    * @param on If true, enable balancer. If false, disable balancer.
2361    * @param synchronous If true, it waits until current balance() call, if outstanding, to return.
2362    * @return Previous balancer value
2363    */
2364   @Override
2365   public boolean setBalancerRunning(final boolean on, final boolean synchronous)
2366   throws IOException {
2367     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2368       @Override
2369       public Boolean call(int callTimeout) throws ServiceException {
2370         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2371         controller.setCallTimeout(callTimeout);
2372 
2373         SetBalancerRunningRequest req =
2374             RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
2375         return master.setBalancerRunning(controller, req).getPrevBalanceValue();
2376       }
2377     });
2378   }
2379 
2380   /**
2381    * Invoke the balancer.  Will run the balancer and if regions to move, it will
2382    * go ahead and do the reassignments.  Can NOT run for various reasons.  Check
2383    * logs.
2384    * @return True if balancer ran, false otherwise.
2385    */
2386   @Override
2387   public boolean balancer() throws IOException {
2388     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2389       @Override
2390       public Boolean call(int callTimeout) throws ServiceException {
2391         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2392         controller.setCallTimeout(callTimeout);
2393         return master.balance(controller, RequestConverter.buildBalanceRequest()).getBalancerRan();
2394       }
2395     });
2396   }
2397 
2398   /**
2399    * Query the state of the balancer from the Master. It's not a guarantee that the balancer is
2400    * actually running this very moment, but that it will run.
2401    *
2402    * @return True if the balancer is enabled, false otherwise.
2403    */
2404   @Override
2405   public boolean isBalancerEnabled() throws IOException {
2406     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2407       @Override
2408       public Boolean call(int callTimeout) throws ServiceException {
2409         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2410         controller.setCallTimeout(callTimeout);
2411 
2412         return master.isBalancerEnabled(controller,
2413           RequestConverter.buildIsBalancerEnabledRequest()).getEnabled();
2414       }
2415     });
2416   }
2417 
2418   /**
2419    * Invoke region normalizer. Can NOT run for various reasons.  Check logs.
2420    *
2421    * @return True if region normalizer ran, false otherwise.
2422    */
2423   @Override
2424   public boolean normalize() throws IOException {
2425     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2426       @Override
2427       public Boolean call(int callTimeout) throws ServiceException {
2428         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2429         controller.setCallTimeout(callTimeout);
2430 
2431         return master.normalize(controller,
2432           RequestConverter.buildNormalizeRequest()).getNormalizerRan();
2433       }
2434     });
2435   }
2436 
2437   /**
2438    * Query the current state of the region normalizer
2439    *
2440    * @return true if region normalizer is enabled, false otherwise.
2441    */
2442   public boolean isNormalizerEnabled() throws IOException {
2443     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2444       @Override
2445       public Boolean call(int callTimeout) throws ServiceException {
2446         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2447         controller.setCallTimeout(callTimeout);
2448 
2449         return master.isNormalizerEnabled(controller,
2450           RequestConverter.buildIsNormalizerEnabledRequest()).getEnabled();
2451       }
2452     });
2453   }
2454 
2455   /**
2456    * Turn region normalizer on or off.
2457    *
2458    * @return Previous normalizer value
2459    */
2460   public boolean setNormalizerRunning(final boolean on) throws IOException {
2461     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2462       @Override
2463       public Boolean call(int callTimeout) throws ServiceException {
2464         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2465         controller.setCallTimeout(callTimeout);
2466 
2467         SetNormalizerRunningRequest req =
2468           RequestConverter.buildSetNormalizerRunningRequest(on);
2469         return master.setNormalizerRunning(controller, req).getPrevNormalizerValue();
2470       }
2471     });
2472   }
2473 
2474   /**
2475    * Enable/Disable the catalog janitor
2476    * @param enable if true enables the catalog janitor
2477    * @return the previous state
2478    * @throws MasterNotRunningException
2479    */
2480   @Override
2481   public boolean enableCatalogJanitor(final boolean enable)
2482       throws IOException {
2483     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2484       @Override
2485       public Boolean call(int callTimeout) throws ServiceException {
2486         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2487         controller.setCallTimeout(callTimeout);
2488 
2489         return master.enableCatalogJanitor(controller,
2490           RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
2491       }
2492     });
2493   }
2494 
2495   /**
2496    * Ask for a scan of the catalog table
2497    * @return the number of entries cleaned
2498    * @throws MasterNotRunningException
2499    */
2500   @Override
2501   public int runCatalogScan() throws IOException {
2502     return executeCallable(new MasterCallable<Integer>(getConnection()) {
2503       @Override
2504       public Integer call(int callTimeout) throws ServiceException {
2505         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2506         controller.setCallTimeout(callTimeout);
2507 
2508         return master.runCatalogScan(controller,
2509           RequestConverter.buildCatalogScanRequest()).getScanResult();
2510       }
2511     });
2512   }
2513 
2514   /**
2515    * Query on the catalog janitor state (Enabled/Disabled?)
2516    * @throws org.apache.hadoop.hbase.MasterNotRunningException
2517    */
2518   @Override
2519   public boolean isCatalogJanitorEnabled() throws IOException {
2520     return executeCallable(new MasterCallable<Boolean>(getConnection()) {
2521       @Override
2522       public Boolean call(int callTimeout) throws ServiceException {
2523         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2524         controller.setCallTimeout(callTimeout);
2525 
2526         return master.isCatalogJanitorEnabled(controller,
2527           RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
2528       }
2529     });
2530   }
2531 
2532   private boolean isEncodedRegionName(byte[] regionName) throws IOException {
2533     try {
2534       HRegionInfo.parseRegionName(regionName);
2535       return false;
2536     } catch (IOException e) {
2537       if (StringUtils.stringifyException(e)
2538         .contains(HRegionInfo.INVALID_REGION_NAME_FORMAT_MESSAGE)) {
2539         return true;
2540       }
2541       throw e;
2542     }
2543   }
2544 
2545   /**
2546    * Merge two regions. Asynchronous operation.
2547    * @param nameOfRegionA encoded or full name of region a
2548    * @param nameOfRegionB encoded or full name of region b
2549    * @param forcible true if do a compulsory merge, otherwise we will only merge
2550    *          two adjacent regions
2551    * @throws IOException
2552    */
2553   @Override
2554   public void mergeRegions(final byte[] nameOfRegionA,
2555       final byte[] nameOfRegionB, final boolean forcible)
2556       throws IOException {
2557     final byte[] encodedNameOfRegionA = isEncodedRegionName(nameOfRegionA) ?
2558       nameOfRegionA : HRegionInfo.encodeRegionName(nameOfRegionA).getBytes();
2559     final byte[] encodedNameOfRegionB = isEncodedRegionName(nameOfRegionB) ?
2560       nameOfRegionB : HRegionInfo.encodeRegionName(nameOfRegionB).getBytes();
2561 
2562     Pair<HRegionInfo, ServerName> pair = getRegion(nameOfRegionA);
2563     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2564       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2565     pair = getRegion(nameOfRegionB);
2566     if (pair != null && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID)
2567       throw new IllegalArgumentException("Can't invoke merge on non-default regions directly");
2568     executeCallable(new MasterCallable<Void>(getConnection()) {
2569       @Override
2570       public Void call(int callTimeout) throws ServiceException {
2571         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2572         controller.setCallTimeout(callTimeout);
2573 
2574         try {
2575           DispatchMergingRegionsRequest request = RequestConverter
2576               .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
2577                 encodedNameOfRegionB, forcible);
2578           master.dispatchMergingRegions(controller, request);
2579         } catch (DeserializationException de) {
2580           LOG.error("Could not parse destination server name: " + de);
2581         }
2582         return null;
2583       }
2584     });
2585   }
2586 
2587   /**
2588    * {@inheritDoc}
2589    */
2590   @Override
2591   public void split(final TableName tableName)
2592     throws IOException {
2593     split(tableName, null);
2594   }
2595 
2596   /**
2597    * {@inheritDoc}
2598    */
2599   @Override
2600   public void splitRegion(final byte[] regionName)
2601     throws IOException {
2602     splitRegion(regionName, null);
2603   }
2604 
2605   /**
2606    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2607    * (byte[])} instead.
2608    */
2609   @Deprecated
2610   public void split(final String tableNameOrRegionName)
2611   throws IOException, InterruptedException {
2612     split(Bytes.toBytes(tableNameOrRegionName));
2613   }
2614 
2615   /**
2616    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName)} or {@link #splitRegion
2617    * (byte[])} instead.
2618    */
2619   @Deprecated
2620   public void split(final byte[] tableNameOrRegionName)
2621   throws IOException, InterruptedException {
2622     split(tableNameOrRegionName, null);
2623   }
2624 
2625   /**
2626    * {@inheritDoc}
2627    */
2628   @Override
2629   public void split(final TableName tableName, final byte [] splitPoint)
2630   throws IOException {
2631     ZooKeeperWatcher zookeeper = null;
2632     try {
2633       checkTableExists(tableName);
2634       zookeeper = new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
2635         new ThrowableAbortable());
2636       List<Pair<HRegionInfo, ServerName>> pairs =
2637         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
2638       for (Pair<HRegionInfo, ServerName> pair: pairs) {
2639         // May not be a server for a particular row
2640         if (pair.getSecond() == null) continue;
2641         HRegionInfo r = pair.getFirst();
2642         // check for parents
2643         if (r.isSplitParent()) continue;
2644         // if a split point given, only split that particular region
2645         if (r.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
2646            (splitPoint != null && !r.containsRow(splitPoint))) continue;
2647         // call out to region server to do split now
2648         split(pair.getSecond(), pair.getFirst(), splitPoint);
2649       }
2650     } finally {
2651       if (zookeeper != null) {
2652         zookeeper.close();
2653       }
2654     }
2655   }
2656 
2657   /**
2658    * {@inheritDoc}
2659    */
2660   @Override
2661   public void splitRegion(final byte[] regionName, final byte [] splitPoint)
2662   throws IOException {
2663     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
2664     if (regionServerPair == null) {
2665       throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
2666     }
2667     if (regionServerPair.getFirst() != null &&
2668         regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
2669       throw new IllegalArgumentException("Can't split replicas directly. "
2670           + "Replicas are auto-split when their primary is split.");
2671     }
2672     if (regionServerPair.getSecond() == null) {
2673       throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
2674     }
2675     split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint);
2676   }
2677 
2678   /**
2679    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2680    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2681    */
2682   @Deprecated
2683   public void split(final String tableNameOrRegionName,
2684     final String splitPoint) throws IOException {
2685     split(Bytes.toBytes(tableNameOrRegionName), Bytes.toBytes(splitPoint));
2686   }
2687 
2688   /**
2689    * @deprecated Use {@link #split(org.apache.hadoop.hbase.TableName,
2690    * byte[])} or {@link #splitRegion(byte[], byte[])} instead.
2691    */
2692   @Deprecated
2693   public void split(final byte[] tableNameOrRegionName,
2694       final byte [] splitPoint) throws IOException {
2695     try {
2696       splitRegion(tableNameOrRegionName, splitPoint);
2697     } catch (IllegalArgumentException e) {
2698       // Bad region, try table
2699       split(TableName.valueOf(tableNameOrRegionName), splitPoint);
2700     }
2701   }
2702 
2703   @VisibleForTesting
2704   public void split(final ServerName sn, final HRegionInfo hri,
2705       byte[] splitPoint) throws IOException {
2706     if (hri.getStartKey() != null && splitPoint != null &&
2707          Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) {
2708        throw new IOException("should not give a splitkey which equals to startkey!");
2709     }
2710     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2711     controller.setPriority(hri.getTable());
2712 
2713     // TODO: this does not do retries, it should. Set priority and timeout in controller
2714     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
2715     ProtobufUtil.split(controller, admin, hri, splitPoint);
2716   }
2717 
2718   /**
2719    * Modify an existing table, more IRB friendly version.
2720    * Asynchronous operation.  This means that it may be a while before your
2721    * schema change is updated across all of the table.
2722    *
2723    * @param tableName name of table.
2724    * @param htd modified description of the table
2725    * @throws IOException if a remote or network exception occurs
2726    */
2727   @Override
2728   public void modifyTable(final TableName tableName, final HTableDescriptor htd)
2729   throws IOException {
2730     if (!tableName.equals(htd.getTableName())) {
2731       throw new IllegalArgumentException("the specified table name '" + tableName +
2732         "' doesn't match with the HTD one: " + htd.getTableName());
2733     }
2734 
2735     executeCallable(new MasterCallable<Void>(getConnection()) {
2736       @Override
2737       public Void call(int callTimeout) throws ServiceException {
2738         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2739         controller.setCallTimeout(callTimeout);
2740         controller.setPriority(tableName);
2741         ModifyTableRequest request = RequestConverter.buildModifyTableRequest(
2742           tableName, htd, ng.getNonceGroup(), ng.newNonce());
2743         master.modifyTable(controller, request);
2744         return null;
2745       }
2746     });
2747   }
2748 
2749   public void modifyTable(final byte[] tableName, final HTableDescriptor htd)
2750   throws IOException {
2751     modifyTable(TableName.valueOf(tableName), htd);
2752   }
2753 
2754   public void modifyTable(final String tableName, final HTableDescriptor htd)
2755   throws IOException {
2756     modifyTable(TableName.valueOf(tableName), htd);
2757   }
2758 
2759   /**
2760    * @param regionName Name of a region.
2761    * @return a pair of HRegionInfo and ServerName if <code>regionName</code> is
2762    *  a verified region name (we call {@link
2763    *  MetaTableAccessor#getRegion(HConnection, byte[])}
2764    *  else null.
2765    * Throw IllegalArgumentException if <code>regionName</code> is null.
2766    * @throws IOException
2767    */
2768   Pair<HRegionInfo, ServerName> getRegion(final byte[] regionName) throws IOException {
2769     if (regionName == null) {
2770       throw new IllegalArgumentException("Pass a table name or region name");
2771     }
2772     Pair<HRegionInfo, ServerName> pair =
2773       MetaTableAccessor.getRegion(connection, regionName);
2774     if (pair == null) {
2775       final AtomicReference<Pair<HRegionInfo, ServerName>> result =
2776         new AtomicReference<Pair<HRegionInfo, ServerName>>(null);
2777       final String encodedName = Bytes.toString(regionName);
2778       MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
2779         @Override
2780         public boolean processRow(Result data) throws IOException {
2781           HRegionInfo info = HRegionInfo.getHRegionInfo(data);
2782           if (info == null) {
2783             LOG.warn("No serialized HRegionInfo in " + data);
2784             return true;
2785           }
2786           RegionLocations rl = MetaTableAccessor.getRegionLocations(data);
2787           boolean matched = false;
2788           ServerName sn = null;
2789           for (HRegionLocation h : rl.getRegionLocations()) {
2790             if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) {
2791               sn = h.getServerName();
2792               info = h.getRegionInfo();
2793               matched = true;
2794             }
2795           }
2796           if (!matched) return true;
2797           result.set(new Pair<HRegionInfo, ServerName>(info, sn));
2798           return false; // found the region, stop
2799         }
2800       };
2801 
2802       MetaScanner.metaScan(connection, visitor, null);
2803       pair = result.get();
2804     }
2805     return pair;
2806   }
2807 
2808   /**
2809    * If the input is a region name, it is returned as is. If it's an
2810    * encoded region name, the corresponding region is found from meta
2811    * and its region name is returned. If we can't find any region in
2812    * meta matching the input as either region name or encoded region
2813    * name, the input is returned as is. We don't throw unknown
2814    * region exception.
2815    */
2816   private byte[] getRegionName(
2817       final byte[] regionNameOrEncodedRegionName) throws IOException {
2818     if (Bytes.equals(regionNameOrEncodedRegionName,
2819         HRegionInfo.FIRST_META_REGIONINFO.getRegionName())
2820           || Bytes.equals(regionNameOrEncodedRegionName,
2821             HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) {
2822       return HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
2823     }
2824     byte[] tmp = regionNameOrEncodedRegionName;
2825     Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionNameOrEncodedRegionName);
2826     if (regionServerPair != null && regionServerPair.getFirst() != null) {
2827       tmp = regionServerPair.getFirst().getRegionName();
2828     }
2829     return tmp;
2830   }
2831 
2832   /**
2833    * Check if table exists or not
2834    * @param tableName Name of a table.
2835    * @return tableName instance
2836    * @throws IOException if a remote or network exception occurs.
2837    * @throws TableNotFoundException if table does not exist.
2838    */
2839   private TableName checkTableExists(final TableName tableName)
2840       throws IOException {
2841     if (!MetaTableAccessor.tableExists(connection, tableName)) {
2842       throw new TableNotFoundException(tableName);
2843     }
2844     return tableName;
2845   }
2846 
2847   /**
2848    * Shuts down the HBase cluster
2849    * @throws IOException if a remote or network exception occurs
2850    */
2851   @Override
2852   public synchronized void shutdown() throws IOException {
2853     executeCallable(new MasterCallable<Void>(getConnection()) {
2854       @Override
2855       public Void call(int callTimeout) throws ServiceException {
2856         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2857         controller.setCallTimeout(callTimeout);
2858         controller.setPriority(HConstants.HIGH_QOS);
2859         master.shutdown(controller, ShutdownRequest.newBuilder().build());
2860         return null;
2861       }
2862     });
2863   }
2864 
2865   /**
2866    * Shuts down the current HBase master only.
2867    * Does not shutdown the cluster.
2868    * @see #shutdown()
2869    * @throws IOException if a remote or network exception occurs
2870    */
2871   @Override
2872   public synchronized void stopMaster() throws IOException {
2873     executeCallable(new MasterCallable<Void>(getConnection()) {
2874       @Override
2875       public Void call(int callTimeout) throws ServiceException {
2876         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2877         controller.setCallTimeout(callTimeout);
2878         controller.setPriority(HConstants.HIGH_QOS);
2879         master.stopMaster(controller, StopMasterRequest.newBuilder().build());
2880         return null;
2881       }
2882     });
2883   }
2884 
2885   /**
2886    * Stop the designated regionserver
2887    * @param hostnamePort Hostname and port delimited by a <code>:</code> as in
2888    * <code>example.org:1234</code>
2889    * @throws IOException if a remote or network exception occurs
2890    */
2891   @Override
2892   public synchronized void stopRegionServer(final String hostnamePort)
2893   throws IOException {
2894     String hostname = Addressing.parseHostname(hostnamePort);
2895     int port = Addressing.parsePort(hostnamePort);
2896     AdminService.BlockingInterface admin =
2897       this.connection.getAdmin(ServerName.valueOf(hostname, port, 0));
2898     StopServerRequest request = RequestConverter.buildStopServerRequest(
2899       "Called by admin client " + this.connection.toString());
2900     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2901 
2902     controller.setPriority(HConstants.HIGH_QOS);
2903     try {
2904       // TODO: this does not do retries, it should. Set priority and timeout in controller
2905       admin.stopServer(controller, request);
2906     } catch (ServiceException se) {
2907       throw ProtobufUtil.getRemoteException(se);
2908     }
2909   }
2910 
2911 
2912   /**
2913    * @return cluster status
2914    * @throws IOException if a remote or network exception occurs
2915    */
2916   @Override
2917   public ClusterStatus getClusterStatus() throws IOException {
2918     return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
2919       @Override
2920       public ClusterStatus call(int callTimeout) throws ServiceException {
2921         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2922         controller.setCallTimeout(callTimeout);
2923         GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
2924         return ClusterStatus.convert(master.getClusterStatus(controller, req).getClusterStatus());
2925       }
2926     });
2927   }
2928 
2929   /**
2930    * @return Configuration used by the instance.
2931    */
2932   @Override
2933   public Configuration getConfiguration() {
2934     return this.conf;
2935   }
2936 
2937   /**
2938    * Create a new namespace
2939    * @param descriptor descriptor which describes the new namespace
2940    * @throws IOException
2941    */
2942   @Override
2943   public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
2944     executeCallable(new MasterCallable<Void>(getConnection()) {
2945       @Override
2946       public Void call(int callTimeout) throws Exception {
2947         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2948         controller.setCallTimeout(callTimeout);
2949         // TODO: set priority based on NS?
2950         master.createNamespace(controller,
2951           CreateNamespaceRequest.newBuilder()
2952             .setNamespaceDescriptor(ProtobufUtil
2953               .toProtoNamespaceDescriptor(descriptor)).build()
2954         );
2955         return null;
2956       }
2957     });
2958   }
2959 
2960   /**
2961    * Modify an existing namespace
2962    * @param descriptor descriptor which describes the new namespace
2963    * @throws IOException
2964    */
2965   @Override
2966   public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
2967     executeCallable(new MasterCallable<Void>(getConnection()) {
2968       @Override
2969       public Void call(int callTimeout) throws Exception {
2970         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2971         controller.setCallTimeout(callTimeout);
2972         master.modifyNamespace(controller, ModifyNamespaceRequest.newBuilder().
2973           setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
2974         return null;
2975       }
2976     });
2977   }
2978 
2979   /**
2980    * Delete an existing namespace. Only empty namespaces (no tables) can be removed.
2981    * @param name namespace name
2982    * @throws IOException
2983    */
2984   @Override
2985   public void deleteNamespace(final String name) throws IOException {
2986     executeCallable(new MasterCallable<Void>(getConnection()) {
2987       @Override
2988       public Void call(int callTimeout) throws Exception {
2989         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
2990         controller.setCallTimeout(callTimeout);
2991         master.deleteNamespace(controller, DeleteNamespaceRequest.newBuilder().
2992           setNamespaceName(name).build());
2993         return null;
2994       }
2995     });
2996   }
2997 
2998   /**
2999    * Get a namespace descriptor by name
3000    * @param name name of namespace descriptor
3001    * @return A descriptor
3002    * @throws IOException
3003    */
3004   @Override
3005   public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
3006     return
3007         executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
3008           @Override
3009           public NamespaceDescriptor call(int callTimeout) throws Exception {
3010             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3011             controller.setCallTimeout(callTimeout);
3012             return ProtobufUtil.toNamespaceDescriptor(
3013               master.getNamespaceDescriptor(controller, GetNamespaceDescriptorRequest.newBuilder().
3014                 setNamespaceName(name).build()).getNamespaceDescriptor());
3015           }
3016         });
3017   }
3018 
3019   /**
3020    * List available namespace descriptors
3021    * @return List of descriptors
3022    * @throws IOException
3023    */
3024   @Override
3025   public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
3026     return
3027         executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
3028           @Override
3029           public NamespaceDescriptor[] call(int callTimeout) throws Exception {
3030             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3031             controller.setCallTimeout(callTimeout);
3032             List<HBaseProtos.NamespaceDescriptor> list =
3033                 master.listNamespaceDescriptors(controller,
3034                   ListNamespaceDescriptorsRequest.newBuilder().build())
3035                 .getNamespaceDescriptorList();
3036             NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
3037             for(int i = 0; i < list.size(); i++) {
3038               res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
3039             }
3040             return res;
3041           }
3042         });
3043   }
3044 
3045   /**
3046    * List procedures
3047    * @return procedure list
3048    * @throws IOException
3049    */
3050   @Override
3051   public ProcedureInfo[] listProcedures() throws IOException {
3052     return
3053         executeCallable(new MasterCallable<ProcedureInfo[]>(getConnection()) {
3054           @Override
3055           public ProcedureInfo[] call(int callTimeout) throws Exception {
3056             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3057             controller.setCallTimeout(callTimeout);
3058             List<ProcedureProtos.Procedure> procList = master.listProcedures(
3059               controller, ListProceduresRequest.newBuilder().build()).getProcedureList();
3060             ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()];
3061             for (int i = 0; i < procList.size(); i++) {
3062               procInfoList[i] = ProcedureInfo.convert(procList.get(i));
3063             }
3064             return procInfoList;
3065           }
3066         });
3067   }
3068 
3069   /**
3070    * Get list of table descriptors by namespace
3071    * @param name namespace name
3072    * @return A descriptor
3073    * @throws IOException
3074    */
3075   @Override
3076   public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
3077     return
3078         executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3079           @Override
3080           public HTableDescriptor[] call(int callTimeout) throws Exception {
3081             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3082             controller.setCallTimeout(callTimeout);
3083             List<TableSchema> list =
3084                 master.listTableDescriptorsByNamespace(controller,
3085                   ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name)
3086                   .build()).getTableSchemaList();
3087             HTableDescriptor[] res = new HTableDescriptor[list.size()];
3088             for(int i=0; i < list.size(); i++) {
3089 
3090               res[i] = HTableDescriptor.convert(list.get(i));
3091             }
3092             return res;
3093           }
3094         });
3095   }
3096 
3097   /**
3098    * Get list of table names by namespace
3099    * @param name namespace name
3100    * @return The list of table names in the namespace
3101    * @throws IOException
3102    */
3103   @Override
3104   public TableName[] listTableNamesByNamespace(final String name) throws IOException {
3105     return
3106         executeCallable(new MasterCallable<TableName[]>(getConnection()) {
3107           @Override
3108           public TableName[] call(int callTimeout) throws Exception {
3109             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3110             controller.setCallTimeout(callTimeout);
3111             List<HBaseProtos.TableName> tableNames =
3112               master.listTableNamesByNamespace(controller, ListTableNamesByNamespaceRequest.
3113                 newBuilder().setNamespaceName(name).build())
3114                 .getTableNameList();
3115             TableName[] result = new TableName[tableNames.size()];
3116             for (int i = 0; i < tableNames.size(); i++) {
3117               result[i] = ProtobufUtil.toTableName(tableNames.get(i));
3118             }
3119             return result;
3120           }
3121         });
3122   }
3123 
3124   /**
3125    * Check to see if HBase is running. Throw an exception if not.
3126    * @param conf system configuration
3127    * @throws MasterNotRunningException if the master is not running
3128    * @throws ZooKeeperConnectionException if unable to connect to zookeeper
3129    */
3130   // Used by tests and by the Merge tool. Merge tool uses it to figure if HBase is up or not.
3131   public static void checkHBaseAvailable(Configuration conf)
3132   throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException {
3133     Configuration copyOfConf = HBaseConfiguration.create(conf);
3134     // We set it to make it fail as soon as possible if HBase is not available
3135     copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
3136     copyOfConf.setInt("zookeeper.recovery.retry", 0);
3137     try (ClusterConnection connection =
3138         (ClusterConnection)ConnectionFactory.createConnection(copyOfConf)) {
3139         // Check ZK first.
3140         // If the connection exists, we may have a connection to ZK that does not work anymore
3141         ZooKeeperKeepAliveConnection zkw = null;
3142         try {
3143           // This is NASTY. FIX!!!! Dependent on internal implementation! TODO
3144           zkw = ((ConnectionManager.HConnectionImplementation)connection).
3145             getKeepAliveZooKeeperWatcher();
3146           zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false);
3147         } catch (IOException e) {
3148           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3149         } catch (InterruptedException e) {
3150           throw (InterruptedIOException)
3151             new InterruptedIOException("Can't connect to ZooKeeper").initCause(e);
3152         } catch (KeeperException e) {
3153           throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
3154         } finally {
3155           if (zkw != null) {
3156             zkw.close();
3157           }
3158         }
3159       connection.isMasterRunning();
3160     }
3161   }
3162 
3163   /**
3164    * get the regions of a given table.
3165    *
3166    * @param tableName the name of the table
3167    * @return Ordered list of {@link HRegionInfo}.
3168    * @throws IOException
3169    */
3170   @Override
3171   public List<HRegionInfo> getTableRegions(final TableName tableName)
3172   throws IOException {
3173     ZooKeeperWatcher zookeeper =
3174       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3175         new ThrowableAbortable());
3176     List<HRegionInfo> Regions = null;
3177     try {
3178       Regions = MetaTableAccessor.getTableRegions(zookeeper, connection, tableName, true);
3179     } finally {
3180       zookeeper.close();
3181     }
3182     return Regions;
3183   }
3184 
3185   public List<HRegionInfo> getTableRegions(final byte[] tableName)
3186   throws IOException {
3187     return getTableRegions(TableName.valueOf(tableName));
3188   }
3189 
3190   @Override
3191   public synchronized void close() throws IOException {
3192     if (cleanupConnectionOnClose && this.connection != null && !this.closed) {
3193       this.connection.close();
3194       this.closed = true;
3195     }
3196   }
3197 
3198   /**
3199    * Get tableDescriptors
3200    * @param tableNames List of table names
3201    * @return HTD[] the tableDescriptor
3202    * @throws IOException if a remote or network exception occurs
3203    */
3204   @Override
3205   public HTableDescriptor[] getTableDescriptorsByTableName(final List<TableName> tableNames)
3206   throws IOException {
3207     return executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
3208       @Override
3209       public HTableDescriptor[] call(int callTimeout) throws Exception {
3210         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3211         controller.setCallTimeout(callTimeout);
3212         GetTableDescriptorsRequest req =
3213             RequestConverter.buildGetTableDescriptorsRequest(tableNames);
3214           return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(controller, req));
3215       }
3216     });
3217   }
3218 
3219   /**
3220    * Get tableDescriptor
3221    * @param tableName one table name
3222    * @return HTD the HTableDescriptor or null if the table not exists
3223    * @throws IOException if a remote or network exception occurs
3224    */
3225   private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
3226       throws IOException {
3227     List<TableName> tableNames = new ArrayList<TableName>(1);
3228     tableNames.add(tableName);
3229 
3230     HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
3231 
3232     if (htdl == null || htdl.length == 0) {
3233       return null;
3234     }
3235     else {
3236       return htdl[0];
3237     }
3238   }
3239 
3240   /**
3241    * Get tableDescriptors
3242    * @param names List of table names
3243    * @return HTD[] the tableDescriptor
3244    * @throws IOException if a remote or network exception occurs
3245    */
3246   @Override
3247   public HTableDescriptor[] getTableDescriptors(List<String> names)
3248   throws IOException {
3249     List<TableName> tableNames = new ArrayList<TableName>(names.size());
3250     for(String name : names) {
3251       tableNames.add(TableName.valueOf(name));
3252     }
3253     return getTableDescriptorsByTableName(tableNames);
3254   }
3255 
3256   private RollWALWriterResponse rollWALWriterImpl(final ServerName sn) throws IOException,
3257       FailedLogCloseException {
3258     AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3259     RollWALWriterRequest request = RequestConverter.buildRollWALWriterRequest();
3260     PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3261 
3262     try {
3263       // TODO: this does not do retries, it should. Set priority and timeout in controller
3264       return admin.rollWALWriter(controller, request);
3265     } catch (ServiceException se) {
3266       throw ProtobufUtil.getRemoteException(se);
3267     }
3268   }
3269 
3270   /**
3271    * Roll the log writer. I.e. when using a file system based write ahead log,
3272    * start writing log messages to a new file.
3273    *
3274    * Note that when talking to a version 1.0+ HBase deployment, the rolling is asynchronous.
3275    * This method will return as soon as the roll is requested and the return value will
3276    * always be null. Additionally, the named region server may schedule store flushes at the
3277    * request of the wal handling the roll request.
3278    *
3279    * When talking to a 0.98 or older HBase deployment, the rolling is synchronous and the
3280    * return value may be either null or a list of encoded region names.
3281    *
3282    * @param serverName
3283    *          The servername of the regionserver. A server name is made of host,
3284    *          port and startcode. This is mandatory. Here is an example:
3285    *          <code> host187.example.com,60020,1289493121758</code>
3286    * @return a set of {@link HRegionInfo#getEncodedName()} that would allow the wal to
3287    *         clean up some underlying files. null if there's nothing to flush.
3288    * @throws IOException if a remote or network exception occurs
3289    * @throws FailedLogCloseException
3290    * @deprecated use {@link #rollWALWriter(ServerName)}
3291    */
3292   @Deprecated
3293   public synchronized byte[][] rollHLogWriter(String serverName)
3294       throws IOException, FailedLogCloseException {
3295     ServerName sn = ServerName.valueOf(serverName);
3296     final RollWALWriterResponse response = rollWALWriterImpl(sn);
3297     int regionCount = response.getRegionToFlushCount();
3298     if (0 == regionCount) {
3299       return null;
3300     }
3301     byte[][] regionsToFlush = new byte[regionCount][];
3302     for (int i = 0; i < regionCount; i++) {
3303       ByteString region = response.getRegionToFlush(i);
3304       regionsToFlush[i] = region.toByteArray();
3305     }
3306     return regionsToFlush;
3307   }
3308 
3309   @Override
3310   public synchronized void rollWALWriter(ServerName serverName)
3311       throws IOException, FailedLogCloseException {
3312     rollWALWriterImpl(serverName);
3313   }
3314 
3315   @Override
3316   public String[] getMasterCoprocessors() {
3317     try {
3318       return getClusterStatus().getMasterCoprocessors();
3319     } catch (IOException e) {
3320       LOG.error("Could not getClusterStatus()",e);
3321       return null;
3322     }
3323   }
3324 
3325   /**
3326    * {@inheritDoc}
3327    */
3328   @Override
3329   public CompactionState getCompactionState(final TableName tableName)
3330   throws IOException {
3331     CompactionState state = CompactionState.NONE;
3332     ZooKeeperWatcher zookeeper =
3333       new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
3334         new ThrowableAbortable());
3335     try {
3336       checkTableExists(tableName);
3337       List<Pair<HRegionInfo, ServerName>> pairs =
3338         MetaTableAccessor.getTableRegionsAndLocations(zookeeper, connection, tableName);
3339       for (Pair<HRegionInfo, ServerName> pair: pairs) {
3340         if (pair.getFirst().isOffline()) continue;
3341         if (pair.getSecond() == null) continue;
3342         try {
3343           ServerName sn = pair.getSecond();
3344           AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3345           GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3346             pair.getFirst().getRegionName(), true);
3347           GetRegionInfoResponse response = admin.getRegionInfo(null, request);
3348           switch (response.getCompactionState()) {
3349           case MAJOR_AND_MINOR:
3350             return CompactionState.MAJOR_AND_MINOR;
3351           case MAJOR:
3352             if (state == CompactionState.MINOR) {
3353               return CompactionState.MAJOR_AND_MINOR;
3354             }
3355             state = CompactionState.MAJOR;
3356             break;
3357           case MINOR:
3358             if (state == CompactionState.MAJOR) {
3359               return CompactionState.MAJOR_AND_MINOR;
3360             }
3361             state = CompactionState.MINOR;
3362             break;
3363           case NONE:
3364           default: // nothing, continue
3365           }
3366         } catch (NotServingRegionException e) {
3367           if (LOG.isDebugEnabled()) {
3368             LOG.debug("Trying to get compaction state of " +
3369               pair.getFirst() + ": " +
3370               StringUtils.stringifyException(e));
3371           }
3372         } catch (RemoteException e) {
3373           if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
3374             if (LOG.isDebugEnabled()) {
3375               LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
3376                 + StringUtils.stringifyException(e));
3377             }
3378           } else {
3379             throw e;
3380           }
3381         }
3382       }
3383     } catch (ServiceException se) {
3384       throw ProtobufUtil.getRemoteException(se);
3385     } finally {
3386       zookeeper.close();
3387     }
3388     return state;
3389   }
3390 
3391   /**
3392    * {@inheritDoc}
3393    */
3394   @Override
3395   public CompactionState getCompactionStateForRegion(final byte[] regionName)
3396   throws IOException {
3397     try {
3398       Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
3399       if (regionServerPair == null) {
3400         throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
3401       }
3402       if (regionServerPair.getSecond() == null) {
3403         throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
3404       }
3405       ServerName sn = regionServerPair.getSecond();
3406       AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
3407       GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
3408         regionServerPair.getFirst().getRegionName(), true);
3409       PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3410       // TODO: this does not do retries, it should. Set priority and timeout in controller
3411       GetRegionInfoResponse response = admin.getRegionInfo(controller, request);
3412       return response.getCompactionState();
3413     } catch (ServiceException se) {
3414       throw ProtobufUtil.getRemoteException(se);
3415     }
3416   }
3417 
3418   /**
3419    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3420    * #getCompactionStateForRegion(byte[])} instead.
3421    */
3422   @Deprecated
3423   public CompactionState getCompactionState(final String tableNameOrRegionName)
3424   throws IOException, InterruptedException {
3425     return getCompactionState(Bytes.toBytes(tableNameOrRegionName));
3426   }
3427 
3428   /**
3429    * @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
3430    * #getCompactionStateForRegion(byte[])} instead.
3431    */
3432   @Deprecated
3433   public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
3434   throws IOException, InterruptedException {
3435     try {
3436       return getCompactionStateForRegion(tableNameOrRegionName);
3437     } catch (IllegalArgumentException e) {
3438       // Invalid region, try table
3439       return getCompactionState(TableName.valueOf(tableNameOrRegionName));
3440     }
3441   }
3442 
3443   /**
3444    * Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
3445    * taken. If the table is disabled, an offline snapshot is taken.
3446    * <p>
3447    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3448    * snapshot with the same name (even a different type or with different parameters) will fail with
3449    * a {@link SnapshotCreationException} indicating the duplicate naming.
3450    * <p>
3451    * Snapshot names follow the same naming constraints as tables in HBase. See
3452    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3453    * @param snapshotName name of the snapshot to be created
3454    * @param tableName name of the table for which snapshot is created
3455    * @throws IOException if a remote or network exception occurs
3456    * @throws SnapshotCreationException if snapshot creation failed
3457    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3458    */
3459   @Override
3460   public void snapshot(final String snapshotName,
3461                        final TableName tableName) throws IOException,
3462       SnapshotCreationException, IllegalArgumentException {
3463     snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH);
3464   }
3465 
3466   public void snapshot(final String snapshotName,
3467                        final String tableName) throws IOException,
3468       SnapshotCreationException, IllegalArgumentException {
3469     snapshot(snapshotName, TableName.valueOf(tableName),
3470         SnapshotDescription.Type.FLUSH);
3471   }
3472 
3473   /**
3474    * Create snapshot for the given table of given flush type.
3475    * <p>
3476    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3477    * snapshot with the same name (even a different type or with different parameters) will fail with
3478    * a {@link SnapshotCreationException} indicating the duplicate naming.
3479    * <p>
3480    * Snapshot names follow the same naming constraints as tables in HBase.
3481    * @param snapshotName name of the snapshot to be created
3482    * @param tableName name of the table for which snapshot is created
3483    * @param flushType if the snapshot should be taken without flush memstore first
3484    * @throws IOException if a remote or network exception occurs
3485    * @throws SnapshotCreationException if snapshot creation failed
3486    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3487    */
3488    public void snapshot(final byte[] snapshotName, final byte[] tableName,
3489                        final SnapshotDescription.Type flushType) throws
3490       IOException, SnapshotCreationException, IllegalArgumentException {
3491       snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName), flushType);
3492   }
3493   /**
3494    public void snapshot(final String snapshotName,
3495     * Create a timestamp consistent snapshot for the given table.
3496                         final byte[] tableName) throws IOException,
3497     * <p>
3498     * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3499     * snapshot with the same name (even a different type or with different parameters) will fail with
3500     * a {@link SnapshotCreationException} indicating the duplicate naming.
3501     * <p>
3502     * Snapshot names follow the same naming constraints as tables in HBase.
3503     * @param snapshotName name of the snapshot to be created
3504     * @param tableName name of the table for which snapshot is created
3505     * @throws IOException if a remote or network exception occurs
3506     * @throws SnapshotCreationException if snapshot creation failed
3507     * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3508     */
3509   @Override
3510   public void snapshot(final byte[] snapshotName,
3511                        final TableName tableName) throws IOException,
3512       SnapshotCreationException, IllegalArgumentException {
3513     snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH);
3514   }
3515 
3516   public void snapshot(final byte[] snapshotName,
3517                        final byte[] tableName) throws IOException,
3518       SnapshotCreationException, IllegalArgumentException {
3519     snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName),
3520       SnapshotDescription.Type.FLUSH);
3521   }
3522 
3523   /**
3524    * Create typed snapshot of the table.
3525    * <p>
3526    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3527    * snapshot with the same name (even a different type or with different parameters) will fail with
3528    * a {@link SnapshotCreationException} indicating the duplicate naming.
3529    * <p>
3530    * Snapshot names follow the same naming constraints as tables in HBase. See
3531    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3532    * <p>
3533    * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other
3534    *          snapshots stored on the cluster
3535    * @param tableName name of the table to snapshot
3536    * @param type type of snapshot to take
3537    * @throws IOException we fail to reach the master
3538    * @throws SnapshotCreationException if snapshot creation failed
3539    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3540    */
3541   @Override
3542   public void snapshot(final String snapshotName,
3543                        final TableName tableName,
3544                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3545       IllegalArgumentException {
3546     SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
3547     builder.setTable(tableName.getNameAsString());
3548     builder.setName(snapshotName);
3549     builder.setType(type);
3550     snapshot(builder.build());
3551   }
3552 
3553   public void snapshot(final String snapshotName,
3554                        final String tableName,
3555                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3556       IllegalArgumentException {
3557     snapshot(snapshotName, TableName.valueOf(tableName), type);
3558   }
3559 
3560   public void snapshot(final String snapshotName,
3561                        final byte[] tableName,
3562                       SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
3563       IllegalArgumentException {
3564     snapshot(snapshotName, TableName.valueOf(tableName), type);
3565   }
3566 
3567   /**
3568    * Take a snapshot and wait for the server to complete that snapshot (blocking).
3569    * <p>
3570    * Only a single snapshot should be taken at a time for an instance of HBase, or results may be
3571    * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a
3572    * time for a single cluster).
3573    * <p>
3574    * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a
3575    * snapshot with the same name (even a different type or with different parameters) will fail with
3576    * a {@link SnapshotCreationException} indicating the duplicate naming.
3577    * <p>
3578    * Snapshot names follow the same naming constraints as tables in HBase. See
3579    * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
3580    * <p>
3581    * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])}
3582    * unless you are sure about the type of snapshot that you want to take.
3583    * @param snapshot snapshot to take
3584    * @throws IOException or we lose contact with the master.
3585    * @throws SnapshotCreationException if snapshot failed to be taken
3586    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3587    */
3588   @Override
3589   public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException,
3590       IllegalArgumentException {
3591     // actually take the snapshot
3592     SnapshotResponse response = takeSnapshotAsync(snapshot);
3593     final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot)
3594         .build();
3595     IsSnapshotDoneResponse done = null;
3596     long start = EnvironmentEdgeManager.currentTime();
3597     long max = response.getExpectedTimeout();
3598     long maxPauseTime = max / this.numRetries;
3599     int tries = 0;
3600     LOG.debug("Waiting a max of " + max + " ms for snapshot '" +
3601         ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " +
3602         maxPauseTime + " ms per retry)");
3603     while (tries == 0
3604         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) {
3605       try {
3606         // sleep a backoff <= pauseTime amount
3607         long sleep = getPauseTime(tries++);
3608         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3609         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3610           "ms while waiting for snapshot completion.");
3611         Thread.sleep(sleep);
3612       } catch (InterruptedException e) {
3613         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
3614       }
3615       LOG.debug("Getting current status of snapshot from master...");
3616       done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3617         @Override
3618         public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3619           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3620           controller.setCallTimeout(callTimeout);
3621           return master.isSnapshotDone(controller, request);
3622         }
3623       });
3624     }
3625     if (!done.getDone()) {
3626       throw new SnapshotCreationException("Snapshot '" + snapshot.getName()
3627           + "' wasn't completed in expectedTime:" + max + " ms", snapshot);
3628     }
3629   }
3630 
3631   /**
3632    * Take a snapshot without waiting for the server to complete that snapshot (asynchronous)
3633    * <p>
3634    * Only a single snapshot should be taken at a time, or results may be undefined.
3635    * @param snapshot snapshot to take
3636    * @return response from the server indicating the max time to wait for the snapshot
3637    * @throws IOException if the snapshot did not succeed or we lose contact with the master.
3638    * @throws SnapshotCreationException if snapshot creation failed
3639    * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
3640    */
3641   @Override
3642   public SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot) throws IOException,
3643       SnapshotCreationException {
3644     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
3645     final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
3646         .build();
3647     // run the snapshot on the master
3648     return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
3649       @Override
3650       public SnapshotResponse call(int callTimeout) throws ServiceException {
3651         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3652         controller.setCallTimeout(callTimeout);
3653         return master.snapshot(controller, request);
3654       }
3655     });
3656   }
3657 
3658   /**
3659    * Check the current state of the passed snapshot.
3660    * <p>
3661    * There are three possible states:
3662    * <ol>
3663    * <li>running - returns <tt>false</tt></li>
3664    * <li>finished - returns <tt>true</tt></li>
3665    * <li>finished with error - throws the exception that caused the snapshot to fail</li>
3666    * </ol>
3667    * <p>
3668    * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been
3669    * run/started since the snapshot your are checking, you will recieve an
3670    * {@link UnknownSnapshotException}.
3671    * @param snapshot description of the snapshot to check
3672    * @return <tt>true</tt> if the snapshot is completed, <tt>false</tt> if the snapshot is still
3673    *         running
3674    * @throws IOException if we have a network issue
3675    * @throws HBaseSnapshotException if the snapshot failed
3676    * @throws UnknownSnapshotException if the requested snapshot is unknown
3677    */
3678   @Override
3679   public boolean isSnapshotFinished(final SnapshotDescription snapshot)
3680       throws IOException, HBaseSnapshotException, UnknownSnapshotException {
3681 
3682     return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
3683       @Override
3684       public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException {
3685         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3686         controller.setCallTimeout(callTimeout);
3687         return master.isSnapshotDone(controller,
3688           IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
3689       }
3690     }).getDone();
3691   }
3692 
3693   /**
3694    * Restore the specified snapshot on the original table. (The table must be disabled)
3695    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3696    * is set to true, a snapshot of the current table is taken
3697    * before executing the restore operation.
3698    * In case of restore failure, the failsafe snapshot will be restored.
3699    * If the restore completes without problem the failsafe snapshot is deleted.
3700    *
3701    * @param snapshotName name of the snapshot to restore
3702    * @throws IOException if a remote or network exception occurs
3703    * @throws RestoreSnapshotException if snapshot failed to be restored
3704    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3705    */
3706   @Override
3707   public void restoreSnapshot(final byte[] snapshotName)
3708       throws IOException, RestoreSnapshotException {
3709     restoreSnapshot(Bytes.toString(snapshotName));
3710   }
3711 
3712   /**
3713    * Restore the specified snapshot on the original table. (The table must be disabled)
3714    * If the "hbase.snapshot.restore.take.failsafe.snapshot" configuration property
3715    * is set to true, a snapshot of the current table is taken
3716    * before executing the restore operation.
3717    * In case of restore failure, the failsafe snapshot will be restored.
3718    * If the restore completes without problem the failsafe snapshot is deleted.
3719    *
3720    * @param snapshotName name of the snapshot to restore
3721    * @throws IOException if a remote or network exception occurs
3722    * @throws RestoreSnapshotException if snapshot failed to be restored
3723    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3724    */
3725   @Override
3726   public void restoreSnapshot(final String snapshotName)
3727       throws IOException, RestoreSnapshotException {
3728     boolean takeFailSafeSnapshot =
3729       conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false);
3730     restoreSnapshot(snapshotName, takeFailSafeSnapshot);
3731   }
3732 
3733   /**
3734    * Restore the specified snapshot on the original table. (The table must be disabled)
3735    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3736    * before executing the restore operation.
3737    * In case of restore failure, the failsafe snapshot will be restored.
3738    * If the restore completes without problem the failsafe snapshot is deleted.
3739    *
3740    * The failsafe snapshot name is configurable by using the property
3741    * "hbase.snapshot.restore.failsafe.name".
3742    *
3743    * @param snapshotName name of the snapshot to restore
3744    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3745    * @throws IOException if a remote or network exception occurs
3746    * @throws RestoreSnapshotException if snapshot failed to be restored
3747    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3748    */
3749   @Override
3750   public void restoreSnapshot(final byte[] snapshotName, final boolean takeFailSafeSnapshot)
3751       throws IOException, RestoreSnapshotException {
3752     restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot);
3753   }
3754 
3755   /**
3756    * Restore the specified snapshot on the original table. (The table must be disabled)
3757    * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
3758    * before executing the restore operation.
3759    * In case of restore failure, the failsafe snapshot will be restored.
3760    * If the restore completes without problem the failsafe snapshot is deleted.
3761    *
3762    * The failsafe snapshot name is configurable by using the property
3763    * "hbase.snapshot.restore.failsafe.name".
3764    *
3765    * @param snapshotName name of the snapshot to restore
3766    * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
3767    * @throws IOException if a remote or network exception occurs
3768    * @throws RestoreSnapshotException if snapshot failed to be restored
3769    * @throws IllegalArgumentException if the restore request is formatted incorrectly
3770    */
3771   @Override
3772   public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
3773       throws IOException, RestoreSnapshotException {
3774     TableName tableName = null;
3775     for (SnapshotDescription snapshotInfo: listSnapshots()) {
3776       if (snapshotInfo.getName().equals(snapshotName)) {
3777         tableName = TableName.valueOf(snapshotInfo.getTable());
3778         break;
3779       }
3780     }
3781 
3782     if (tableName == null) {
3783       throw new RestoreSnapshotException(
3784         "Unable to find the table name for snapshot=" + snapshotName);
3785     }
3786 
3787     // The table does not exists, switch to clone.
3788     if (!tableExists(tableName)) {
3789       cloneSnapshot(snapshotName, tableName);
3790       return;
3791     }
3792 
3793     // Check if the table is disabled
3794     if (!isTableDisabled(tableName)) {
3795       throw new TableNotDisabledException(tableName);
3796     }
3797 
3798     // Take a snapshot of the current state
3799     String failSafeSnapshotSnapshotName = null;
3800     if (takeFailSafeSnapshot) {
3801       failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name",
3802         "hbase-failsafe-{snapshot.name}-{restore.timestamp}");
3803       failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName
3804         .replace("{snapshot.name}", snapshotName)
3805         .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.'))
3806         .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime()));
3807       LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3808       snapshot(failSafeSnapshotSnapshotName, tableName);
3809     }
3810 
3811     try {
3812       // Restore snapshot
3813       internalRestoreSnapshot(snapshotName, tableName);
3814     } catch (IOException e) {
3815       // Somthing went wrong during the restore...
3816       // if the pre-restore snapshot is available try to rollback
3817       if (takeFailSafeSnapshot) {
3818         try {
3819           internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName);
3820           String msg = "Restore snapshot=" + snapshotName +
3821             " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
3822           LOG.error(msg, e);
3823           throw new RestoreSnapshotException(msg, e);
3824         } catch (IOException ex) {
3825           String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName;
3826           LOG.error(msg, ex);
3827           throw new RestoreSnapshotException(msg, e);
3828         }
3829       } else {
3830         throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e);
3831       }
3832     }
3833 
3834     // If the restore is succeeded, delete the pre-restore snapshot
3835     if (takeFailSafeSnapshot) {
3836       try {
3837         LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName);
3838         deleteSnapshot(failSafeSnapshotSnapshotName);
3839       } catch (IOException e) {
3840         LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e);
3841       }
3842     }
3843   }
3844 
3845   /**
3846    * Create a new table by cloning the snapshot content.
3847    *
3848    * @param snapshotName name of the snapshot to be cloned
3849    * @param tableName name of the table where the snapshot will be restored
3850    * @throws IOException if a remote or network exception occurs
3851    * @throws TableExistsException if table to be created already exists
3852    * @throws RestoreSnapshotException if snapshot failed to be cloned
3853    * @throws IllegalArgumentException if the specified table has not a valid name
3854    */
3855   public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName)
3856       throws IOException, TableExistsException, RestoreSnapshotException {
3857     cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName));
3858   }
3859 
3860   /**
3861    * Create a new table by cloning the snapshot content.
3862    *
3863    * @param snapshotName name of the snapshot to be cloned
3864    * @param tableName name of the table where the snapshot will be restored
3865    * @throws IOException if a remote or network exception occurs
3866    * @throws TableExistsException if table to be created already exists
3867    * @throws RestoreSnapshotException if snapshot failed to be cloned
3868    * @throws IllegalArgumentException if the specified table has not a valid name
3869    */
3870   @Override
3871   public void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
3872       throws IOException, TableExistsException, RestoreSnapshotException {
3873     cloneSnapshot(Bytes.toString(snapshotName), tableName);
3874   }
3875 
3876 
3877 
3878   /**
3879    * Create a new table by cloning the snapshot content.
3880    *
3881    * @param snapshotName name of the snapshot to be cloned
3882    * @param tableName name of the table where the snapshot will be restored
3883    * @throws IOException if a remote or network exception occurs
3884    * @throws TableExistsException if table to be created already exists
3885    * @throws RestoreSnapshotException if snapshot failed to be cloned
3886    * @throws IllegalArgumentException if the specified table has not a valid name
3887    */
3888   public void cloneSnapshot(final String snapshotName, final String tableName)
3889       throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException {
3890     cloneSnapshot(snapshotName, TableName.valueOf(tableName));
3891   }
3892 
3893   /**
3894    * Create a new table by cloning the snapshot content.
3895    *
3896    * @param snapshotName name of the snapshot to be cloned
3897    * @param tableName name of the table where the snapshot will be restored
3898    * @throws IOException if a remote or network exception occurs
3899    * @throws TableExistsException if table to be created already exists
3900    * @throws RestoreSnapshotException if snapshot failed to be cloned
3901    * @throws IllegalArgumentException if the specified table has not a valid name
3902    */
3903   @Override
3904   public void cloneSnapshot(final String snapshotName, final TableName tableName)
3905       throws IOException, TableExistsException, RestoreSnapshotException {
3906     if (tableExists(tableName)) {
3907       throw new TableExistsException(tableName);
3908     }
3909     internalRestoreSnapshot(snapshotName, tableName);
3910     waitUntilTableIsEnabled(tableName);
3911   }
3912 
3913   /**
3914    * Execute a distributed procedure on a cluster synchronously with return data
3915    *
3916    * @param signature A distributed procedure is uniquely identified
3917    * by its signature (default the root ZK node name of the procedure).
3918    * @param instance The instance name of the procedure. For some procedures, this parameter is
3919    * optional.
3920    * @param props Property/Value pairs of properties passing to the procedure
3921    * @return data returned after procedure execution. null if no return data.
3922    * @throws IOException
3923    */
3924   @Override
3925   public byte[] execProcedureWithRet(String signature, String instance,
3926       Map<String, String> props) throws IOException {
3927     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3928     builder.setSignature(signature).setInstance(instance);
3929     for (Entry<String, String> entry : props.entrySet()) {
3930       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3931           .setValue(entry.getValue()).build();
3932       builder.addConfiguration(pair);
3933     }
3934 
3935     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3936         .setProcedure(builder.build()).build();
3937     // run the procedure on the master
3938     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3939         getConnection()) {
3940       @Override
3941       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3942         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3943         controller.setCallTimeout(callTimeout);
3944         return master.execProcedureWithRet(controller, request);
3945       }
3946     });
3947 
3948     return response.hasReturnData() ? response.getReturnData().toByteArray() : null;
3949   }
3950   /**
3951    * Execute a distributed procedure on a cluster.
3952    *
3953    * @param signature A distributed procedure is uniquely identified
3954    * by its signature (default the root ZK node name of the procedure).
3955    * @param instance The instance name of the procedure. For some procedures, this parameter is
3956    * optional.
3957    * @param props Property/Value pairs of properties passing to the procedure
3958    * @throws IOException
3959    */
3960   @Override
3961   public void execProcedure(String signature, String instance,
3962       Map<String, String> props) throws IOException {
3963     ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
3964     builder.setSignature(signature).setInstance(instance);
3965     for (Entry<String, String> entry : props.entrySet()) {
3966       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
3967           .setValue(entry.getValue()).build();
3968       builder.addConfiguration(pair);
3969     }
3970 
3971     final ExecProcedureRequest request = ExecProcedureRequest.newBuilder()
3972         .setProcedure(builder.build()).build();
3973     // run the procedure on the master
3974     ExecProcedureResponse response = executeCallable(new MasterCallable<ExecProcedureResponse>(
3975         getConnection()) {
3976       @Override
3977       public ExecProcedureResponse call(int callTimeout) throws ServiceException {
3978         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
3979         controller.setCallTimeout(callTimeout);
3980         return master.execProcedure(controller, request);
3981       }
3982     });
3983 
3984     long start = EnvironmentEdgeManager.currentTime();
3985     long max = response.getExpectedTimeout();
3986     long maxPauseTime = max / this.numRetries;
3987     int tries = 0;
3988     LOG.debug("Waiting a max of " + max + " ms for procedure '" +
3989         signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)");
3990     boolean done = false;
3991     while (tries == 0
3992         || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) {
3993       try {
3994         // sleep a backoff <= pauseTime amount
3995         long sleep = getPauseTime(tries++);
3996         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
3997         LOG.debug("(#" + tries + ") Sleeping: " + sleep +
3998           "ms while waiting for procedure completion.");
3999         Thread.sleep(sleep);
4000       } catch (InterruptedException e) {
4001         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
4002       }
4003       LOG.debug("Getting current status of procedure from master...");
4004       done = isProcedureFinished(signature, instance, props);
4005     }
4006     if (!done) {
4007       throw new IOException("Procedure '" + signature + " : " + instance
4008           + "' wasn't completed in expectedTime:" + max + " ms");
4009     }
4010   }
4011 
4012   /**
4013    * Check the current state of the specified procedure.
4014    * <p>
4015    * There are three possible states:
4016    * <ol>
4017    * <li>running - returns <tt>false</tt></li>
4018    * <li>finished - returns <tt>true</tt></li>
4019    * <li>finished with error - throws the exception that caused the procedure to fail</li>
4020    * </ol>
4021    * <p>
4022    *
4023    * @param signature The signature that uniquely identifies a procedure
4024    * @param instance The instance name of the procedure
4025    * @param props Property/Value pairs of properties passing to the procedure
4026    * @return true if the specified procedure is finished successfully, false if it is still running
4027    * @throws IOException if the specified procedure finished with error
4028    */
4029   @Override
4030   public boolean isProcedureFinished(String signature, String instance, Map<String, String> props)
4031       throws IOException {
4032     final ProcedureDescription.Builder builder = ProcedureDescription.newBuilder();
4033     builder.setSignature(signature).setInstance(instance);
4034     for (Entry<String, String> entry : props.entrySet()) {
4035       NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey())
4036           .setValue(entry.getValue()).build();
4037       builder.addConfiguration(pair);
4038     }
4039     final ProcedureDescription desc = builder.build();
4040     return executeCallable(
4041         new MasterCallable<IsProcedureDoneResponse>(getConnection()) {
4042           @Override
4043           public IsProcedureDoneResponse call(int callTimeout) throws ServiceException {
4044             PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4045             controller.setCallTimeout(callTimeout);
4046             return master.isProcedureDone(controller, IsProcedureDoneRequest
4047                 .newBuilder().setProcedure(desc).build());
4048           }
4049         }).getDone();
4050   }
4051 
4052   /**
4053    * Execute Restore/Clone snapshot and wait for the server to complete (blocking).
4054    * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
4055    * create an HTable instance to this table before it is available.
4056    * @param snapshotName snapshot to restore
4057    * @param tableName table name to restore the snapshot on
4058    * @throws IOException if a remote or network exception occurs
4059    * @throws RestoreSnapshotException if snapshot failed to be restored
4060    * @throws IllegalArgumentException if the restore request is formatted incorrectly
4061    */
4062   private void internalRestoreSnapshot(final String snapshotName, final TableName
4063       tableName)
4064       throws IOException, RestoreSnapshotException {
4065     SnapshotDescription snapshot = SnapshotDescription.newBuilder()
4066         .setName(snapshotName).setTable(tableName.getNameAsString()).build();
4067 
4068     // actually restore the snapshot
4069     internalRestoreSnapshotAsync(snapshot);
4070 
4071     final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
4072         .setSnapshot(snapshot).build();
4073     IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder()
4074         .setDone(false).buildPartial();
4075     final long maxPauseTime = 5000;
4076     int tries = 0;
4077     while (!done.getDone()) {
4078       try {
4079         // sleep a backoff <= pauseTime amount
4080         long sleep = getPauseTime(tries++);
4081         sleep = sleep > maxPauseTime ? maxPauseTime : sleep;
4082         LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete.");
4083         Thread.sleep(sleep);
4084       } catch (InterruptedException e) {
4085         throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e);
4086       }
4087       LOG.debug("Getting current status of snapshot restore from master...");
4088       done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
4089           getConnection()) {
4090         @Override
4091         public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException {
4092           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4093           controller.setCallTimeout(callTimeout);
4094           return master.isRestoreSnapshotDone(controller, request);
4095         }
4096       });
4097     }
4098     if (!done.getDone()) {
4099       throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored.");
4100     }
4101   }
4102 
4103   /**
4104    * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous)
4105    * <p>
4106    * Only a single snapshot should be restored at a time, or results may be undefined.
4107    * @param snapshot snapshot to restore
4108    * @return response from the server indicating the max time to wait for the snapshot
4109    * @throws IOException if a remote or network exception occurs
4110    * @throws RestoreSnapshotException if snapshot failed to be restored
4111    * @throws IllegalArgumentException if the restore request is formatted incorrectly
4112    */
4113   private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
4114       throws IOException, RestoreSnapshotException {
4115     ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
4116 
4117     final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
4118         .build();
4119 
4120     // run the snapshot restore on the master
4121     return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
4122       @Override
4123       public RestoreSnapshotResponse call(int callTimeout) throws ServiceException {
4124         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4125         controller.setCallTimeout(callTimeout);
4126         return master.restoreSnapshot(controller, request);
4127       }
4128     });
4129   }
4130 
4131   /**
4132    * List completed snapshots.
4133    * @return a list of snapshot descriptors for completed snapshots
4134    * @throws IOException if a network error occurs
4135    */
4136   @Override
4137   public List<SnapshotDescription> listSnapshots() throws IOException {
4138     return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
4139       @Override
4140       public List<SnapshotDescription> call(int callTimeout) throws ServiceException {
4141         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4142         controller.setCallTimeout(callTimeout);
4143         return master.getCompletedSnapshots(controller,
4144           GetCompletedSnapshotsRequest.newBuilder().build()).getSnapshotsList();
4145       }
4146     });
4147   }
4148 
4149   /**
4150    * List all the completed snapshots matching the given regular expression.
4151    *
4152    * @param regex The regular expression to match against
4153    * @return - returns a List of SnapshotDescription
4154    * @throws IOException if a remote or network exception occurs
4155    */
4156   @Override
4157   public List<SnapshotDescription> listSnapshots(String regex) throws IOException {
4158     return listSnapshots(Pattern.compile(regex));
4159   }
4160 
4161   /**
4162    * List all the completed snapshots matching the given pattern.
4163    *
4164    * @param pattern The compiled regular expression to match against
4165    * @return - returns a List of SnapshotDescription
4166    * @throws IOException if a remote or network exception occurs
4167    */
4168   @Override
4169   public List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException {
4170     List<SnapshotDescription> matched = new LinkedList<SnapshotDescription>();
4171     List<SnapshotDescription> snapshots = listSnapshots();
4172     for (SnapshotDescription snapshot : snapshots) {
4173       if (pattern.matcher(snapshot.getName()).matches()) {
4174         matched.add(snapshot);
4175       }
4176     }
4177     return matched;
4178   }
4179 
4180   /**
4181    * Delete an existing snapshot.
4182    * @param snapshotName name of the snapshot
4183    * @throws IOException if a remote or network exception occurs
4184    */
4185   @Override
4186   public void deleteSnapshot(final byte[] snapshotName) throws IOException {
4187     deleteSnapshot(Bytes.toString(snapshotName));
4188   }
4189 
4190   /**
4191    * Delete an existing snapshot.
4192    * @param snapshotName name of the snapshot
4193    * @throws IOException if a remote or network exception occurs
4194    */
4195   @Override
4196   public void deleteSnapshot(final String snapshotName) throws IOException {
4197     // make sure the snapshot is possibly valid
4198     TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
4199     // do the delete
4200     executeCallable(new MasterCallable<Void>(getConnection()) {
4201       @Override
4202       public Void call(int callTimeout) throws ServiceException {
4203         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4204         controller.setCallTimeout(callTimeout);
4205         master.deleteSnapshot(controller,
4206           DeleteSnapshotRequest.newBuilder().
4207             setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build()
4208         );
4209         return null;
4210       }
4211     });
4212   }
4213 
4214   /**
4215    * Delete existing snapshots whose names match the pattern passed.
4216    * @param regex The regular expression to match against
4217    * @throws IOException if a remote or network exception occurs
4218    */
4219   @Override
4220   public void deleteSnapshots(final String regex) throws IOException {
4221     deleteSnapshots(Pattern.compile(regex));
4222   }
4223 
4224   /**
4225    * Delete existing snapshots whose names match the pattern passed.
4226    * @param pattern pattern for names of the snapshot to match
4227    * @throws IOException if a remote or network exception occurs
4228    */
4229   @Override
4230   public void deleteSnapshots(final Pattern pattern) throws IOException {
4231     List<SnapshotDescription> snapshots = listSnapshots(pattern);
4232     for (final SnapshotDescription snapshot : snapshots) {
4233       try {
4234         internalDeleteSnapshot(snapshot);
4235       } catch (IOException ex) {
4236         LOG.info(
4237           "Failed to delete snapshot " + snapshot.getName() + " for table " + snapshot.getTable(),
4238           ex);
4239       }
4240     }
4241   }
4242 
4243   private void internalDeleteSnapshot(final SnapshotDescription snapshot) throws IOException {
4244     executeCallable(new MasterCallable<Void>(getConnection()) {
4245       @Override
4246       public Void call(int callTimeout) throws ServiceException {
4247         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4248         controller.setCallTimeout(callTimeout);
4249         this.master.deleteSnapshot(controller, DeleteSnapshotRequest.newBuilder()
4250           .setSnapshot(snapshot).build());
4251         return null;
4252       }
4253     });
4254   }
4255 
4256   /**
4257    * Apply the new quota settings.
4258    * @param quota the quota settings
4259    * @throws IOException if a remote or network exception occurs
4260    */
4261   @Override
4262   public void setQuota(final QuotaSettings quota) throws IOException {
4263     executeCallable(new MasterCallable<Void>(getConnection()) {
4264       @Override
4265       public Void call(int callTimeout) throws ServiceException {
4266         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4267         controller.setCallTimeout(callTimeout);
4268         this.master.setQuota(controller, QuotaSettings.buildSetQuotaRequestProto(quota));
4269         return null;
4270       }
4271     });
4272   }
4273 
4274   /**
4275    * Return a Quota Scanner to list the quotas based on the filter.
4276    * @param filter the quota settings filter
4277    * @return the quota scanner
4278    * @throws IOException if a remote or network exception occurs
4279    */
4280   @Override
4281   public QuotaRetriever getQuotaRetriever(final QuotaFilter filter) throws IOException {
4282     return QuotaRetriever.open(conf, filter);
4283   }
4284 
4285   private <V> V executeCallable(MasterCallable<V> callable) throws IOException {
4286     return executeCallable(callable, rpcCallerFactory, operationTimeout);
4287   }
4288 
4289   private static <V> V executeCallable(MasterCallable<V> callable,
4290              RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout) throws IOException {
4291     RpcRetryingCaller<V> caller = rpcCallerFactory.newCaller();
4292     try {
4293       return caller.callWithRetries(callable, operationTimeout);
4294     } finally {
4295       callable.close();
4296     }
4297   }
4298 
4299   /**
4300    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4301    * connected to the active master.
4302    *
4303    * <p>
4304    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4305    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4306    * </p>
4307    *
4308    * <div style="background-color: #cccccc; padding: 2px">
4309    * <blockquote><pre>
4310    * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
4311    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4312    * MyCallRequest request = MyCallRequest.newBuilder()
4313    *     ...
4314    *     .build();
4315    * MyCallResponse response = service.myCall(null, request);
4316    * </pre></blockquote></div>
4317    *
4318    * @return A MasterCoprocessorRpcChannel instance
4319    */
4320   @Override
4321   public CoprocessorRpcChannel coprocessorService() {
4322     return new MasterCoprocessorRpcChannel(connection);
4323   }
4324 
4325   /**
4326    * Simple {@link Abortable}, throwing RuntimeException on abort.
4327    */
4328   private static class ThrowableAbortable implements Abortable {
4329 
4330     @Override
4331     public void abort(String why, Throwable e) {
4332       throw new RuntimeException(why, e);
4333     }
4334 
4335     @Override
4336     public boolean isAborted() {
4337       return true;
4338     }
4339   }
4340 
4341   /**
4342    * Creates and returns a {@link com.google.protobuf.RpcChannel} instance
4343    * connected to the passed region server.
4344    *
4345    * <p>
4346    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
4347    * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
4348    * </p>
4349    *
4350    * <div style="background-color: #cccccc; padding: 2px">
4351    * <blockquote><pre>
4352    * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
4353    * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
4354    * MyCallRequest request = MyCallRequest.newBuilder()
4355    *     ...
4356    *     .build();
4357    * MyCallResponse response = service.myCall(null, request);
4358    * </pre></blockquote></div>
4359    *
4360    * @param sn the server name to which the endpoint call is made
4361    * @return A RegionServerCoprocessorRpcChannel instance
4362    */
4363   @Override
4364   public CoprocessorRpcChannel coprocessorService(ServerName sn) {
4365     return new RegionServerCoprocessorRpcChannel(connection, sn);
4366   }
4367 
4368   @Override
4369   public void updateConfiguration(ServerName server) throws IOException {
4370     try {
4371       this.connection.getAdmin(server).updateConfiguration(null,
4372         UpdateConfigurationRequest.getDefaultInstance());
4373     } catch (ServiceException e) {
4374       throw ProtobufUtil.getRemoteException(e);
4375     }
4376   }
4377 
4378   @Override
4379   public void updateConfiguration() throws IOException {
4380     for (ServerName server : this.getClusterStatus().getServers()) {
4381       updateConfiguration(server);
4382     }
4383   }
4384 
4385   @Override
4386   public int getMasterInfoPort() throws IOException {
4387     // TODO: Fix!  Reaching into internal implementation!!!!
4388     ConnectionManager.HConnectionImplementation connection =
4389         (ConnectionManager.HConnectionImplementation)this.connection;
4390     ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher();
4391     try {
4392       return MasterAddressTracker.getMasterInfoPort(zkw);
4393     } catch (KeeperException e) {
4394       throw new IOException("Failed to get master info port from MasterAddressTracker", e);
4395     }
4396   }
4397 
4398   @Override
4399   public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
4400     return executeCallable(new MasterCallable<Long>(getConnection()) {
4401       @Override
4402       public Long call(int callTimeout) throws ServiceException {
4403         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4404         controller.setCallTimeout(callTimeout);
4405         MajorCompactionTimestampRequest req =
4406             MajorCompactionTimestampRequest.newBuilder()
4407                 .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
4408         return master.getLastMajorCompactionTimestamp(controller, req).getCompactionTimestamp();
4409       }
4410     });
4411   }
4412 
4413   @Override
4414   public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
4415     return executeCallable(new MasterCallable<Long>(getConnection()) {
4416       @Override
4417       public Long call(int callTimeout) throws ServiceException {
4418         PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4419         controller.setCallTimeout(callTimeout);
4420         MajorCompactionTimestampForRegionRequest req =
4421             MajorCompactionTimestampForRegionRequest
4422                 .newBuilder()
4423                 .setRegion(
4424                   RequestConverter
4425                       .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
4426         return master.getLastMajorCompactionTimestampForRegion(controller, req)
4427             .getCompactionTimestamp();
4428       }
4429     });
4430   }
4431 
4432   /**
4433    * Future that waits on a procedure result.
4434    * Returned by the async version of the Admin calls,
4435    * and used internally by the sync calls to wait on the result of the procedure.
4436    */
4437   @InterfaceAudience.Private
4438   @InterfaceStability.Evolving
4439   protected static class ProcedureFuture<V> implements Future<V> {
4440     private ExecutionException exception = null;
4441     private boolean procResultFound = false;
4442     private boolean done = false;
4443     private boolean cancelled = false;
4444     private V result = null;
4445 
4446     private final HBaseAdmin admin;
4447     private final Long procId;
4448 
4449     public ProcedureFuture(final HBaseAdmin admin, final Long procId) {
4450       this.admin = admin;
4451       this.procId = procId;
4452     }
4453 
4454     @Override
4455     public boolean cancel(boolean mayInterruptIfRunning) {
4456       AbortProcedureRequest abortProcRequest = AbortProcedureRequest.newBuilder()
4457           .setProcId(procId).setMayInterruptIfRunning(mayInterruptIfRunning).build();
4458       try {
4459         cancelled = abortProcedureResult(abortProcRequest).getIsProcedureAborted();
4460         if (cancelled) {
4461           done = true;
4462         }
4463       } catch (IOException e) {
4464         // Cancell thrown exception for some reason. At this time, we are not sure whether
4465         // the cancell succeeds or fails. We assume that it is failed, but print out a warning
4466         // for debugging purpose.
4467         LOG.warn(
4468           "Cancelling the procedure with procId=" + procId + " throws exception " + e.getMessage(),
4469           e);
4470         cancelled = false;
4471       }
4472       return cancelled;
4473     }
4474 
4475     @Override
4476     public boolean isCancelled() {
4477       return cancelled;
4478     }
4479 
4480     protected AbortProcedureResponse abortProcedureResult(
4481         final AbortProcedureRequest request) throws IOException {
4482       return admin.executeCallable(new MasterCallable<AbortProcedureResponse>(
4483           admin.getConnection()) {
4484         @Override
4485         public AbortProcedureResponse call(int callTimeout) throws ServiceException {
4486           PayloadCarryingRpcController controller = admin.getRpcControllerFactory().newController();
4487           controller.setCallTimeout(callTimeout);
4488           return master.abortProcedure(controller, request);
4489         }
4490       });
4491     }
4492 
4493     @Override
4494     public V get() throws InterruptedException, ExecutionException {
4495       // TODO: should we ever spin forever?
4496       throw new UnsupportedOperationException();
4497     }
4498 
4499     @Override
4500     public V get(long timeout, TimeUnit unit)
4501         throws InterruptedException, ExecutionException, TimeoutException {
4502       if (!done) {
4503         long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout);
4504         try {
4505           try {
4506             // if the master support procedures, try to wait the result
4507             if (procId != null) {
4508               result = waitProcedureResult(procId, deadlineTs);
4509             }
4510             // if we don't have a proc result, try the compatibility wait
4511             if (!procResultFound) {
4512               result = waitOperationResult(deadlineTs);
4513             }
4514             result = postOperationResult(result, deadlineTs);
4515             done = true;
4516           } catch (IOException e) {
4517             result = postOpeartionFailure(e, deadlineTs);
4518             done = true;
4519           }
4520         } catch (IOException e) {
4521           exception = new ExecutionException(e);
4522           done = true;
4523         }
4524       }
4525       if (exception != null) {
4526         throw exception;
4527       }
4528       return result;
4529     }
4530 
4531     @Override
4532     public boolean isDone() {
4533       return done;
4534     }
4535 
4536     protected HBaseAdmin getAdmin() {
4537       return admin;
4538     }
4539 
4540     private V waitProcedureResult(long procId, long deadlineTs)
4541         throws IOException, TimeoutException, InterruptedException {
4542       GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder()
4543           .setProcId(procId)
4544           .build();
4545 
4546       int tries = 0;
4547       IOException serviceEx = null;
4548       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4549         GetProcedureResultResponse response = null;
4550         try {
4551           // Try to fetch the result
4552           response = getProcedureResult(request);
4553         } catch (IOException e) {
4554           serviceEx = unwrapException(e);
4555 
4556           // the master may be down
4557           LOG.warn("failed to get the procedure result procId=" + procId, serviceEx);
4558 
4559           // Not much to do, if we have a DoNotRetryIOException
4560           if (serviceEx instanceof DoNotRetryIOException ||
4561               serviceEx instanceof NeedUnmanagedConnectionException) {
4562             // TODO: looks like there is no way to unwrap this exception and get the proper
4563             // UnsupportedOperationException aside from looking at the message.
4564             // anyway, if we fail here we just failover to the compatibility side
4565             // and that is always a valid solution.
4566             LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx);
4567             procResultFound = false;
4568             return null;
4569           }
4570         }
4571 
4572         // If the procedure is no longer running, we should have a result
4573         if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) {
4574           procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND;
4575           return convertResult(response);
4576         }
4577 
4578         try {
4579           Thread.sleep(getAdmin().getPauseTime(tries++));
4580         } catch (InterruptedException e) {
4581           throw new InterruptedException(
4582             "Interrupted while waiting for the result of proc " + procId);
4583         }
4584       }
4585       if (serviceEx != null) {
4586         throw serviceEx;
4587       } else {
4588         throw new TimeoutException("The procedure " + procId + " is still running");
4589       }
4590     }
4591 
4592     private static IOException unwrapException(IOException e) {
4593       if (e instanceof RemoteException) {
4594         return ((RemoteException)e).unwrapRemoteException();
4595       }
4596       return e;
4597     }
4598 
4599     protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request)
4600         throws IOException {
4601       return admin.executeCallable(new MasterCallable<GetProcedureResultResponse>(
4602           admin.getConnection()) {
4603         @Override
4604         public GetProcedureResultResponse call(int callTimeout) throws ServiceException {
4605           return master.getProcedureResult(null, request);
4606         }
4607       });
4608     }
4609 
4610     /**
4611      * Convert the procedure result response to a specified type.
4612      * @param response the procedure result object to parse
4613      * @return the result data of the procedure.
4614      */
4615     protected V convertResult(final GetProcedureResultResponse response) throws IOException {
4616       if (response.hasException()) {
4617         throw ForeignExceptionUtil.toIOException(response.getException());
4618       }
4619       return null;
4620     }
4621 
4622     /**
4623      * Fallback implementation in case the procedure is not supported by the server.
4624      * It should try to wait until the operation is completed.
4625      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4626      * @return the result data of the operation
4627      */
4628     protected V waitOperationResult(final long deadlineTs)
4629         throws IOException, TimeoutException {
4630       return null;
4631     }
4632 
4633     /**
4634      * Called after the operation is completed and the result fetched.
4635      * this allows to perform extra steps after the procedure is completed.
4636      * it allows to apply transformations to the result that will be returned by get().
4637      * @param result the result of the procedure
4638      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4639      * @return the result of the procedure, which may be the same as the passed one
4640      */
4641     protected V postOperationResult(final V result, final long deadlineTs)
4642         throws IOException, TimeoutException {
4643       return result;
4644     }
4645 
4646     /**
4647      * Called after the operation is terminated with a failure.
4648      * this allows to perform extra steps after the procedure is terminated.
4649      * it allows to apply transformations to the result that will be returned by get().
4650      * The default implementation will rethrow the exception
4651      * @param exception the exception got from fetching the result
4652      * @param deadlineTs the timestamp after which this method should throw a TimeoutException
4653      * @return the result of the procedure, which may be the same as the passed one
4654      */
4655     protected V postOpeartionFailure(final IOException exception, final long deadlineTs)
4656         throws IOException, TimeoutException {
4657       throw exception;
4658     }
4659 
4660     protected interface WaitForStateCallable {
4661       boolean checkState(int tries) throws IOException;
4662       void throwInterruptedException() throws InterruptedIOException;
4663       void throwTimeoutException(long elapsed) throws TimeoutException;
4664     }
4665 
4666     protected void waitForState(final long deadlineTs, final WaitForStateCallable callable)
4667         throws IOException, TimeoutException {
4668       int tries = 0;
4669       IOException serverEx = null;
4670       long startTime = EnvironmentEdgeManager.currentTime();
4671       while (EnvironmentEdgeManager.currentTime() < deadlineTs) {
4672         serverEx = null;
4673         try {
4674           if (callable.checkState(tries)) {
4675             return;
4676           }
4677         } catch (IOException e) {
4678           serverEx = e;
4679         }
4680         try {
4681           Thread.sleep(getAdmin().getPauseTime(tries++));
4682         } catch (InterruptedException e) {
4683           callable.throwInterruptedException();
4684         }
4685       }
4686       if (serverEx != null) {
4687         throw unwrapException(serverEx);
4688       } else {
4689         callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime);
4690       }
4691     }
4692   }
4693 
4694   @Override
4695   public List<SecurityCapability> getSecurityCapabilities() throws IOException {
4696     try {
4697       return executeCallable(new MasterCallable<List<SecurityCapability>>(getConnection()) {
4698         @Override
4699         public List<SecurityCapability> call(int callTimeout) throws ServiceException {
4700           PayloadCarryingRpcController controller = rpcControllerFactory.newController();
4701           controller.setCallTimeout(callTimeout);
4702           SecurityCapabilitiesRequest req = SecurityCapabilitiesRequest.newBuilder().build();
4703           return ProtobufUtil.toSecurityCapabilityList(
4704             master.getSecurityCapabilities(controller, req).getCapabilitiesList());
4705         }
4706       });
4707     } catch (IOException e) {
4708       if (e instanceof RemoteException) {
4709         e = ((RemoteException)e).unwrapRemoteException();
4710       }
4711       throw e;
4712     }
4713   }
4714 
4715   private RpcControllerFactory getRpcControllerFactory() {
4716     return rpcControllerFactory;
4717   }
4718 }