View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertTrue;
23  
24  import java.io.File;
25  import java.io.IOException;
26  import java.util.ArrayList;
27  import java.util.Arrays;
28  import java.util.List;
29  
30  import junit.framework.Assert;
31  
32  import org.apache.commons.logging.Log;
33  import org.apache.commons.logging.LogFactory;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.FileUtil;
37  import org.apache.hadoop.fs.FsShell;
38  import org.apache.hadoop.fs.Path;
39  import org.apache.hadoop.hbase.client.HConnection;
40  import org.apache.hadoop.hbase.migration.NamespaceUpgrade;
41  import org.apache.hadoop.hbase.client.HTable;
42  import org.apache.hadoop.hbase.client.Put;
43  import org.apache.hadoop.hbase.client.Result;
44  import org.apache.hadoop.hbase.client.ResultScanner;
45  import org.apache.hadoop.hbase.client.Scan;
46  import org.apache.hadoop.hbase.client.Durability;
47  import org.apache.hadoop.hbase.master.HMaster;
48  import org.apache.hadoop.hbase.testclassification.MediumTests;
49  import org.apache.hadoop.hbase.util.Bytes;
50  import org.apache.hadoop.io.DataOutputBuffer;
51  import org.apache.hadoop.util.ToolRunner;
52  import org.junit.AfterClass;
53  import org.junit.BeforeClass;
54  import org.junit.Test;
55  import org.junit.experimental.categories.Category;
56  
57  /**
58   * Test migration that changes HRI serialization into PB. Tests by bringing up a cluster from actual
59   * data from a 0.92 cluster, as well as manually downgrading and then upgrading the hbase:meta info.
60   * @deprecated Remove after 0.96
61   */
62  @Category(MediumTests.class)
63  @Deprecated
64  public class TestMetaMigrationConvertingToPB {
65    static final Log LOG = LogFactory.getLog(TestMetaMigrationConvertingToPB.class);
66    private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
67  
68    private final static String TESTTABLE = "TestTable";
69  
70    private final static int ROW_COUNT = 100;
71    private final static int REGION_COUNT = 9; //initial number of regions of the TestTable
72  
73    private static final int META_VERSION_092 = 0;
74  
75    /*
76     * This test uses a tgz file named "TestMetaMigrationConvertingToPB.tgz" under
77     * hbase-server/src/test/data which contains file data from a 0.92 cluster.
78     * The cluster has a table named "TestTable", which has 100 rows. 0.94 has same
79     * hbase:meta structure, so it should be the same.
80     *
81     * hbase(main):001:0> create 'TestTable', 'f1'
82     * hbase(main):002:0> for i in 1..100
83     * hbase(main):003:1> put 'TestTable', "row#{i}", "f1:c1", i
84     * hbase(main):004:1> end
85     *
86     * There are 9 regions in the table
87     */
88  
89    @BeforeClass
90    public static void setUpBeforeClass() throws Exception {
91      // Start up our mini cluster on top of an 0.92 root.dir that has data from
92      // a 0.92 hbase run -- it has a table with 100 rows in it  -- and see if
93      // we can migrate from 0.92
94      TEST_UTIL.startMiniZKCluster();
95      TEST_UTIL.startMiniDFSCluster(1);
96      Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationConvertToPB");
97      // Untar our test dir.
98      File untar = untar(new File(testdir.toString()));
99      // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
100     Configuration conf = TEST_UTIL.getConfiguration();
101     FsShell shell = new FsShell(conf);
102     FileSystem fs = FileSystem.get(conf);
103     // find where hbase will root itself, so we can copy filesystem there
104     Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
105     if (!fs.isDirectory(hbaseRootDir.getParent())) {
106       // mkdir at first
107       fs.mkdirs(hbaseRootDir.getParent());
108     }
109     doFsCommand(shell,
110       new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()});
111 
112     // windows fix: tgz file has hbase:meta directory renamed as -META- since the original
113     // is an illegal name under windows. So we rename it back.
114     // See src/test/data//TestMetaMigrationConvertingToPB.README and
115     // https://issues.apache.org/jira/browse/HBASE-6821
116     doFsCommand(shell, new String [] {"-mv", new Path(hbaseRootDir, "-META-").toString(),
117       new Path(hbaseRootDir, ".META.").toString()});
118     // See whats in minihdfs.
119     doFsCommand(shell, new String [] {"-lsr", "/"});
120 
121     //upgrade to namespace as well
122     Configuration toolConf = TEST_UTIL.getConfiguration();
123     conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
124     ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
125 
126     TEST_UTIL.startMiniHBaseCluster(1, 1);
127     // Assert we are running against the copied-up filesystem.  The copied-up
128     // rootdir should have had a table named 'TestTable' in it.  Assert it
129     // present.
130     HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE);
131     ResultScanner scanner = t.getScanner(new Scan());
132     int count = 0;
133     while (scanner.next() != null) {
134       count++;
135     }
136     // Assert that we find all 100 rows that are in the data we loaded.  If
137     // so then we must have migrated it from 0.90 to 0.92.
138     Assert.assertEquals(ROW_COUNT, count);
139     scanner.close();
140     t.close();
141   }
142 
143   private static File untar(final File testdir) throws IOException {
144     // Find the src data under src/test/data
145     final String datafile = "TestMetaMigrationConvertToPB";
146     String srcTarFile =
147       System.getProperty("project.build.testSourceDirectory", "src/test") +
148       File.separator + "data" + File.separator + datafile + ".tgz";
149     File homedir = new File(testdir.toString());
150     File tgtUntarDir = new File(homedir, datafile);
151     if (tgtUntarDir.exists()) {
152       if (!FileUtil.fullyDelete(tgtUntarDir)) {
153         throw new IOException("Failed delete of " + tgtUntarDir.toString());
154       }
155     }
156     LOG.info("Untarring " + srcTarFile + " into " + homedir.toString());
157     FileUtil.unTar(new File(srcTarFile), homedir);
158     Assert.assertTrue(tgtUntarDir.exists());
159     return tgtUntarDir;
160   }
161 
162   private static void doFsCommand(final FsShell shell, final String [] args)
163   throws Exception {
164     // Run the 'put' command.
165     int errcode = shell.run(args);
166     if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode);
167   }
168 
169   /**
170    * @throws java.lang.Exception
171    */
172   @AfterClass
173   public static void tearDownAfterClass() throws Exception {
174     TEST_UTIL.shutdownMiniCluster();
175   }
176 
177   @Test
178   public void testMetaUpdatedFlagInROOT() throws Exception {
179     HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
180     boolean metaUpdated = MetaMigrationConvertingToPB.
181       isMetaTableUpdated(master.getConnection());
182     assertEquals(true, metaUpdated);
183     verifyMetaRowsAreUpdated(master.getConnection());
184   }
185 
186   @Test
187   public void testMetaMigration() throws Exception {
188     LOG.info("Starting testMetaMigration");
189     final byte [] FAMILY = Bytes.toBytes("family");
190     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testMetaMigration"));
191     HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
192       htd.addFamily(hcd);
193     Configuration conf = TEST_UTIL.getConfiguration();
194     byte[][] regionNames = new byte[][]{
195         HConstants.EMPTY_START_ROW,
196         Bytes.toBytes("region_a"),
197         Bytes.toBytes("region_b")};
198     createMultiRegionsWithWritableSerialization(conf,
199         htd.getTableName().getName(),
200         regionNames);
201     HConnection masterHConnection =
202       TEST_UTIL.getMiniHBaseCluster().getMaster().getConnection();
203     // Erase the current version of root meta for this test.
204     undoVersionInRoot();
205     MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
206     LOG.info("Meta Print completed.testMetaMigration");
207 
208     long numMigratedRows = MetaMigrationConvertingToPB.updateMeta(
209         TEST_UTIL.getHBaseCluster().getMaster());
210     MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
211 
212     // Should be one entry only and it should be for the table we just added.
213     assertEquals(regionNames.length, numMigratedRows);
214 
215     // Assert that the flag in ROOT is updated to reflect the correct status
216     boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
217     assertEquals(true, metaUpdated);
218     verifyMetaRowsAreUpdated(masterHConnection);
219   }
220 
221   /**
222    * This test assumes a master crash/failure during the meta migration process
223    * and attempts to continue the meta migration process when a new master takes over.
224    * When a master dies during the meta migration we will have some rows of
225    * META.CatalogFamily updated with PB serialization and some
226    * still hanging with writable serialization. When the backup master/ or
227    * fresh start of master attempts the migration it will encounter some rows of META
228    * already updated with new HRI and some still legacy. This test will simulate this
229    * scenario and validates that the migration process can safely skip the updated
230    * rows and migrate any pending rows at startup.
231    * @throws Exception
232    */
233   @Test
234   public void testMasterCrashDuringMetaMigration() throws Exception {
235     final byte[] FAMILY = Bytes.toBytes("family");
236     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf
237         ("testMasterCrashDuringMetaMigration"));
238     HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
239       htd.addFamily(hcd);
240     Configuration conf = TEST_UTIL.getConfiguration();
241     // Create 10 New regions.
242     createMultiRegionsWithPBSerialization(conf, htd.getTableName().getName(), 10);
243     // Create 10 Legacy regions.
244     createMultiRegionsWithWritableSerialization(conf,
245         htd.getTableName().getName(), 10);
246     HConnection masterHConnection =
247       TEST_UTIL.getMiniHBaseCluster().getMaster().getConnection();
248     // Erase the current version of root meta for this test.
249     undoVersionInRoot();
250 
251     MetaTableAccessor.fullScanMetaAndPrint(masterHConnection);
252     LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI");
253 
254     long numMigratedRows =
255         MetaMigrationConvertingToPB.updateMetaIfNecessary(
256             TEST_UTIL.getHBaseCluster().getMaster());
257     assertEquals(numMigratedRows, 10);
258 
259     // Assert that the flag in ROOT is updated to reflect the correct status
260     boolean metaUpdated = MetaMigrationConvertingToPB.isMetaTableUpdated(masterHConnection);
261     assertEquals(true, metaUpdated);
262 
263     verifyMetaRowsAreUpdated(masterHConnection);
264 
265     LOG.info("END testMasterCrashDuringMetaMigration");
266   }
267 
268   /**
269    * Verify that every hbase:meta row is updated
270    */
271   void verifyMetaRowsAreUpdated(HConnection hConnection)
272       throws IOException {
273     List<Result> results = MetaTableAccessor.fullScan(hConnection);
274     assertTrue(results.size() >= REGION_COUNT);
275 
276     for (Result result : results) {
277       byte[] hriBytes = result.getValue(HConstants.CATALOG_FAMILY,
278           HConstants.REGIONINFO_QUALIFIER);
279       assertTrue(hriBytes != null && hriBytes.length > 0);
280       assertTrue(MetaMigrationConvertingToPB.isMigrated(hriBytes));
281 
282       byte[] splitA = result.getValue(HConstants.CATALOG_FAMILY,
283           HConstants.SPLITA_QUALIFIER);
284       if (splitA != null && splitA.length > 0) {
285         assertTrue(MetaMigrationConvertingToPB.isMigrated(splitA));
286       }
287 
288       byte[] splitB = result.getValue(HConstants.CATALOG_FAMILY,
289           HConstants.SPLITB_QUALIFIER);
290       if (splitB != null && splitB.length > 0) {
291         assertTrue(MetaMigrationConvertingToPB.isMigrated(splitB));
292       }
293     }
294   }
295 
296   /** Changes the version of hbase:meta to 0 to simulate 0.92 and 0.94 clusters*/
297   private void undoVersionInRoot() throws IOException {
298     Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
299 
300     p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
301         Bytes.toBytes(META_VERSION_092));
302 
303     // TODO wire this MetaEditor.putToRootTable(ct, p);
304     LOG.info("Downgraded -ROOT- meta version=" + META_VERSION_092);
305   }
306 
307   /**
308    * Inserts multiple regions into hbase:meta using Writable serialization instead of PB
309    */
310   public int createMultiRegionsWithWritableSerialization(final Configuration c,
311       final byte[] tableName, int numRegions) throws IOException {
312     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
313     byte [] startKey = Bytes.toBytes("aaaaa");
314     byte [] endKey = Bytes.toBytes("zzzzz");
315     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
316     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
317     for (int i=0;i<splitKeys.length;i++) {
318       regionStartKeys[i+1] = splitKeys[i];
319     }
320     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
321     return createMultiRegionsWithWritableSerialization(c, tableName, regionStartKeys);
322   }
323 
324   public int createMultiRegionsWithWritableSerialization(final Configuration c,
325       final byte[] tableName, byte [][] startKeys)
326   throws IOException {
327     return createMultiRegionsWithWritableSerialization(c,
328         TableName.valueOf(tableName), startKeys);
329   }
330 
331   /**
332    * Inserts multiple regions into hbase:meta using Writable serialization instead of PB
333    */
334   public int createMultiRegionsWithWritableSerialization(final Configuration c,
335       final TableName tableName, byte [][] startKeys)
336   throws IOException {
337     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
338     HTable meta = new HTable(c, TableName.META_TABLE_NAME);
339 
340     List<HRegionInfo> newRegions
341         = new ArrayList<HRegionInfo>(startKeys.length);
342     int count = 0;
343     for (int i = 0; i < startKeys.length; i++) {
344       int j = (i + 1) % startKeys.length;
345       HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
346       Put put = new Put(hri.getRegionName());
347       put.setDurability(Durability.SKIP_WAL);
348       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
349         getBytes(hri)); //this is the old Writable serialization
350 
351       //also add the region as it's daughters
352       put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER,
353           getBytes(hri)); //this is the old Writable serialization
354 
355       put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
356           getBytes(hri)); //this is the old Writable serialization
357 
358       meta.put(put);
359       LOG.info("createMultiRegionsWithWritableSerialization: PUT inserted " + hri.toString());
360 
361       newRegions.add(hri);
362       count++;
363     }
364     meta.close();
365     return count;
366   }
367 
368   @Deprecated
369   private byte[] getBytes(HRegionInfo hri) throws IOException {
370     DataOutputBuffer out = new DataOutputBuffer();
371     try {
372       hri.write(out);
373       return out.getData();
374     } finally {
375       if (out != null) {
376         out.close();
377       }
378     }
379   }
380 
381   /**
382    * Inserts multiple regions into hbase:meta using PB serialization
383    */
384   int createMultiRegionsWithPBSerialization(final Configuration c,
385       final byte[] tableName, int numRegions)
386   throws IOException {
387     if (numRegions < 3) throw new IOException("Must create at least 3 regions");
388     byte [] startKey = Bytes.toBytes("aaaaa");
389     byte [] endKey = Bytes.toBytes("zzzzz");
390     byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
391     byte [][] regionStartKeys = new byte[splitKeys.length+1][];
392     for (int i=0;i<splitKeys.length;i++) {
393       regionStartKeys[i+1] = splitKeys[i];
394     }
395     regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
396     return createMultiRegionsWithPBSerialization(c, tableName, regionStartKeys);
397   }
398 
399   /**
400    * Inserts multiple regions into hbase:meta using PB serialization
401    */
402   int createMultiRegionsWithPBSerialization(final Configuration c, final byte[] tableName,
403       byte [][] startKeys) throws IOException {
404     return createMultiRegionsWithPBSerialization(c,
405         TableName.valueOf(tableName), startKeys);
406   }
407 
408   int createMultiRegionsWithPBSerialization(final Configuration c,
409       final TableName tableName,
410       byte [][] startKeys) throws IOException {
411     Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
412     HTable meta = new HTable(c, TableName.META_TABLE_NAME);
413 
414     List<HRegionInfo> newRegions
415         = new ArrayList<HRegionInfo>(startKeys.length);
416     int count = 0;
417     for (int i = 0; i < startKeys.length; i++) {
418       int j = (i + 1) % startKeys.length;
419       HRegionInfo hri = new HRegionInfo(tableName, startKeys[i], startKeys[j]);
420       Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
421       put.setDurability(Durability.SKIP_WAL);
422       meta.put(put);
423       LOG.info("createMultiRegionsWithPBSerialization: PUT inserted " + hri.toString());
424 
425       newRegions.add(hri);
426       count++;
427     }
428     meta.close();
429     return count;
430   }
431 
432 
433 }