View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertNull;
23  
24  import java.io.IOException;
25  
26  import org.apache.hadoop.conf.Configuration;
27  import org.apache.hadoop.fs.FileSystem;
28  import org.apache.hadoop.fs.Path;
29  import org.apache.hadoop.hbase.HBaseTestingUtility;
30  import org.apache.hadoop.hbase.HColumnDescriptor;
31  import org.apache.hadoop.hbase.HRegionInfo;
32  import org.apache.hadoop.hbase.HTableDescriptor;
33  import org.apache.hadoop.hbase.testclassification.MediumTests;
34  import org.apache.hadoop.hbase.TableName;
35  import org.apache.hadoop.hbase.client.Durability;
36  import org.apache.hadoop.hbase.client.Increment;
37  import org.apache.hadoop.hbase.client.Put;
38  import org.apache.hadoop.hbase.client.Result;
39  import org.apache.hadoop.hbase.regionserver.HRegion;
40  import org.apache.hadoop.hbase.util.Bytes;
41  import org.apache.hadoop.hbase.util.FSUtils;
42  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
43  import org.apache.hadoop.hbase.wal.WAL;
44  import org.apache.hadoop.hbase.wal.WALFactory;
45  import org.apache.hadoop.hdfs.MiniDFSCluster;
46  import org.junit.AfterClass;
47  import org.junit.BeforeClass;
48  import org.junit.Test;
49  import org.junit.experimental.categories.Category;
50  
51  /**
52   * Tests for WAL write durability
53   */
54  @Category(MediumTests.class)
55  public class TestDurability {
56    private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
57    private static FileSystem FS;
58    private static MiniDFSCluster CLUSTER;
59    private static Configuration CONF;
60    private static Path DIR;
61  
62    private static byte[] FAMILY = Bytes.toBytes("family");
63    private static byte[] ROW = Bytes.toBytes("row");
64    private static byte[] COL = Bytes.toBytes("col");
65  
66  
67    @BeforeClass
68    public static void setUpBeforeClass() throws Exception {
69      CONF = TEST_UTIL.getConfiguration();
70      TEST_UTIL.startMiniDFSCluster(1);
71  
72      CLUSTER = TEST_UTIL.getDFSCluster();
73      FS = CLUSTER.getFileSystem();
74      DIR = TEST_UTIL.getDataTestDirOnTestFS("TestDurability");
75      FSUtils.setRootDir(CONF, DIR);
76    }
77  
78    @AfterClass
79    public static void tearDownAfterClass() throws Exception {
80      TEST_UTIL.shutdownMiniCluster();
81    }
82  
83    @Test
84    public void testDurability() throws Exception {
85      final WALFactory wals = new WALFactory(CONF, null, "TestDurability");
86      byte[] tableName = Bytes.toBytes("TestDurability");
87      final WAL wal = wals.getWAL(tableName);
88      HRegion region = createHRegion(tableName, "region", wal, Durability.USE_DEFAULT);
89      HRegion deferredRegion = createHRegion(tableName, "deferredRegion", wal, Durability.ASYNC_WAL);
90  
91      region.put(newPut(null));
92      verifyWALCount(wals, wal, 1);
93  
94      // a put through the deferred table does not write to the wal immediately,
95      // but maybe has been successfully sync-ed by the underlying AsyncWriter +
96      // AsyncFlusher thread
97      deferredRegion.put(newPut(null));
98      // but will after we sync the wal
99      wal.sync();
100     verifyWALCount(wals, wal, 2);
101 
102     // a put through a deferred table will be sync with the put sync'ed put
103     deferredRegion.put(newPut(null));
104     wal.sync();
105     verifyWALCount(wals, wal, 3);
106     region.put(newPut(null));
107     verifyWALCount(wals, wal, 4);
108 
109     // a put through a deferred table will be sync with the put sync'ed put
110     deferredRegion.put(newPut(Durability.USE_DEFAULT));
111     wal.sync();
112     verifyWALCount(wals, wal, 5);
113     region.put(newPut(Durability.USE_DEFAULT));
114     verifyWALCount(wals, wal, 6);
115 
116     // SKIP_WAL never writes to the wal
117     region.put(newPut(Durability.SKIP_WAL));
118     deferredRegion.put(newPut(Durability.SKIP_WAL));
119     verifyWALCount(wals, wal, 6);
120     wal.sync();
121     verifyWALCount(wals, wal, 6);
122 
123     // Async overrides sync table default
124     region.put(newPut(Durability.ASYNC_WAL));
125     deferredRegion.put(newPut(Durability.ASYNC_WAL));
126     wal.sync();
127     verifyWALCount(wals, wal, 8);
128 
129     // sync overrides async table default
130     region.put(newPut(Durability.SYNC_WAL));
131     deferredRegion.put(newPut(Durability.SYNC_WAL));
132     verifyWALCount(wals, wal, 10);
133 
134     // fsync behaves like sync
135     region.put(newPut(Durability.FSYNC_WAL));
136     deferredRegion.put(newPut(Durability.FSYNC_WAL));
137     verifyWALCount(wals, wal, 12);
138   }
139 
140   @Test
141   public void testIncrement() throws Exception {
142     byte[] row1 = Bytes.toBytes("row1");
143     byte[] col1 = Bytes.toBytes("col1");
144     byte[] col2 = Bytes.toBytes("col2");
145     byte[] col3 = Bytes.toBytes("col3");
146 
147     // Setting up region
148     final WALFactory wals = new WALFactory(CONF, null, "TestIncrement");
149     byte[] tableName = Bytes.toBytes("TestIncrement");
150     final WAL wal = wals.getWAL(tableName);
151     HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
152 
153     // col1: amount = 1, 1 write back to WAL
154     Increment inc1 = new Increment(row1);
155     inc1.addColumn(FAMILY, col1, 1);
156     Result res = region.increment(inc1);
157     assertEquals(1, res.size());
158     assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
159     verifyWALCount(wals, wal, 1);
160 
161     // col1: amount = 0, 0 write back to WAL
162     inc1 = new Increment(row1);
163     inc1.addColumn(FAMILY, col1, 0);
164     res = region.increment(inc1);
165     assertEquals(1, res.size());
166     assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
167     verifyWALCount(wals, wal, 1);
168 
169     // col1: amount = 0, col2: amount = 0, col3: amount = 0
170     // 0 write back to WAL
171     inc1 = new Increment(row1);
172     inc1.addColumn(FAMILY, col1, 0);
173     inc1.addColumn(FAMILY, col2, 0);
174     inc1.addColumn(FAMILY, col3, 0);
175     res = region.increment(inc1);
176     assertEquals(3, res.size());
177     assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
178     assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2)));
179     assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3)));
180     verifyWALCount(wals, wal, 1);
181 
182     // col1: amount = 5, col2: amount = 4, col3: amount = 3
183     // 1 write back to WAL
184     inc1 = new Increment(row1);
185     inc1.addColumn(FAMILY, col1, 5);
186     inc1.addColumn(FAMILY, col2, 4);
187     inc1.addColumn(FAMILY, col3, 3);
188     res = region.increment(inc1);
189     assertEquals(3, res.size());
190     assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1)));
191     assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2)));
192     assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
193     verifyWALCount(wals, wal, 2);
194   }
195   
196   /*
197    * Test when returnResults set to false in increment it should not return the result instead it
198    * resturn null.
199    */
200   @Test
201   public void testIncrementWithReturnResultsSetToFalse() throws Exception {
202     byte[] row1 = Bytes.toBytes("row1");
203     byte[] col1 = Bytes.toBytes("col1");
204 
205     // Setting up region
206     final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse");
207     byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse");
208     final WAL wal = wals.getWAL(tableName);
209     HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);
210 
211     Increment inc1 = new Increment(row1);
212     inc1.setReturnResults(false);
213     inc1.addColumn(FAMILY, col1, 1);
214     Result res = region.increment(inc1);
215     assertNull(res);
216   }
217 
218   private Put newPut(Durability durability) {
219     Put p = new Put(ROW);
220     p.add(FAMILY, COL, COL);
221     if (durability != null) {
222       p.setDurability(durability);
223     }
224     return p;
225   }
226 
227   private void verifyWALCount(WALFactory wals, WAL log, int expected) throws Exception {
228     Path walPath = DefaultWALProvider.getCurrentFileName(log);
229     WAL.Reader reader = wals.createReader(FS, walPath);
230     int count = 0;
231     WAL.Entry entry = new WAL.Entry();
232     while (reader.next(entry) != null) count++;
233     reader.close();
234     assertEquals(expected, count);
235   }
236 
237   // lifted from TestAtomicOperation
238   private HRegion createHRegion (byte [] tableName, String callingMethod,
239       WAL log, Durability durability)
240     throws IOException {
241       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
242       htd.setDurability(durability);
243       HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
244       htd.addFamily(hcd);
245       HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
246       Path path = new Path(DIR + callingMethod);
247       if (FS.exists(path)) {
248         if (!FS.delete(path, true)) {
249           throw new IOException("Failed delete of " + path);
250         }
251       }
252       return HRegion.createHRegion(info, path, CONF, htd, log);
253     }
254 
255 }