View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.mapreduce;
19  
20  import static org.junit.Assert.assertTrue;
21  import static org.junit.Assert.fail;
22  
23  import java.io.File;
24  import java.io.IOException;
25  import java.util.Iterator;
26  import java.util.Map;
27  import java.util.NavigableMap;
28  
29  import org.apache.commons.logging.Log;
30  import org.apache.commons.logging.LogFactory;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.fs.FileUtil;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.hbase.HConstants;
35  import org.apache.hadoop.hbase.Cell;
36  import org.apache.hadoop.hbase.CellUtil;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HConstants;
39  import org.apache.hadoop.hbase.TableName;
40  import org.apache.hadoop.hbase.client.HTable;
41  import org.apache.hadoop.hbase.client.Put;
42  import org.apache.hadoop.hbase.client.Result;
43  import org.apache.hadoop.hbase.client.ResultScanner;
44  import org.apache.hadoop.hbase.client.Scan;
45  import org.apache.hadoop.hbase.client.Table;
46  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
47  import org.apache.hadoop.hbase.testclassification.LargeTests;
48  import org.apache.hadoop.hbase.util.Bytes;
49  import org.apache.hadoop.mapreduce.Job;
50  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
51  import org.junit.AfterClass;
52  import org.junit.BeforeClass;
53  import org.junit.Test;
54  import org.junit.experimental.categories.Category;
55  
56  /**
57   * Test Map/Reduce job over HBase tables. The map/reduce process we're testing
58   * on our tables is simple - take every row in the table, reverse the value of
59   * a particular cell, and write it back to the table.
60   */
61  @Category(LargeTests.class)
62  public class TestMultithreadedTableMapper {
63    private static final Log LOG = LogFactory.getLog(TestMultithreadedTableMapper.class);
64    private static final HBaseTestingUtility UTIL =
65        new HBaseTestingUtility();
66    static final TableName MULTI_REGION_TABLE_NAME = TableName.valueOf("mrtest");
67    static final byte[] INPUT_FAMILY = Bytes.toBytes("contents");
68    static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text");
69    static final int    NUMBER_OF_THREADS = 10;
70  
71    @BeforeClass
72    public static void beforeClass() throws Exception {
73      // Up the handlers; this test needs more than usual.
74      UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
75      UTIL.setJobWithoutMRCluster();
76      UTIL.startMiniCluster();
77      HTable table =
78          UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY,
79              OUTPUT_FAMILY });
80      UTIL.loadTable(table, INPUT_FAMILY, false);
81      UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME);
82    }
83  
84    @AfterClass
85    public static void afterClass() throws Exception {
86      UTIL.shutdownMiniCluster();
87    }
88  
89    /**
90     * Pass the given key and processed record reduce
91     */
92    public static class ProcessContentsMapper
93    extends TableMapper<ImmutableBytesWritable, Put> {
94  
95      /**
96       * Pass the key, and reversed value to reduce
97       *
98       * @param key
99       * @param value
100      * @param context
101      * @throws IOException
102      */
103     public void map(ImmutableBytesWritable key, Result value,
104         Context context)
105             throws IOException, InterruptedException {
106       if (value.size() != 1) {
107         throw new IOException("There should only be one input column");
108       }
109       Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>>
110       cf = value.getMap();
111       if(!cf.containsKey(INPUT_FAMILY)) {
112         throw new IOException("Wrong input columns. Missing: '" +
113             Bytes.toString(INPUT_FAMILY) + "'.");
114       }
115       // Get the original value and reverse it
116       String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, null));
117       StringBuilder newValue = new StringBuilder(originalValue);
118       newValue.reverse();
119       // Now set the value to be collected
120       Put outval = new Put(key.get());
121       outval.add(OUTPUT_FAMILY, null, Bytes.toBytes(newValue.toString()));
122       context.write(key, outval);
123     }
124   }
125 
126   /**
127    * Test multithreadedTableMappper map/reduce against a multi-region table
128    * @throws IOException
129    * @throws ClassNotFoundException
130    * @throws InterruptedException
131    */
132   @Test
133   public void testMultithreadedTableMapper()
134       throws IOException, InterruptedException, ClassNotFoundException {
135     runTestOnTable(new HTable(new Configuration(UTIL.getConfiguration()),
136         MULTI_REGION_TABLE_NAME));
137   }
138 
139   private void runTestOnTable(HTable table)
140       throws IOException, InterruptedException, ClassNotFoundException {
141     Job job = null;
142     try {
143       LOG.info("Before map/reduce startup");
144       job = new Job(table.getConfiguration(), "process column contents");
145       job.setNumReduceTasks(1);
146       Scan scan = new Scan();
147       scan.addFamily(INPUT_FAMILY);
148       TableMapReduceUtil.initTableMapperJob(
149           table.getTableName(), scan,
150           MultithreadedTableMapper.class, ImmutableBytesWritable.class,
151           Put.class, job);
152       MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class);
153       MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS);
154       TableMapReduceUtil.initTableReducerJob(
155           Bytes.toString(table.getTableName()),
156           IdentityTableReducer.class, job);
157       FileOutputFormat.setOutputPath(job, new Path("test"));
158       LOG.info("Started " + table.getTableName());
159       assertTrue(job.waitForCompletion(true));
160       LOG.info("After map/reduce completion");
161       // verify map-reduce results
162       verify(table.getName());
163     } finally {
164       table.close();
165       if (job != null) {
166         FileUtil.fullyDelete(
167             new File(job.getConfiguration().get("hadoop.tmp.dir")));
168       }
169     }
170   }
171 
172   private void verify(TableName tableName) throws IOException {
173     Table table = new HTable(new Configuration(UTIL.getConfiguration()), tableName);
174     boolean verified = false;
175     long pause = UTIL.getConfiguration().getLong("hbase.client.pause", 5 * 1000);
176     int numRetries = UTIL.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
177     for (int i = 0; i < numRetries; i++) {
178       try {
179         LOG.info("Verification attempt #" + i);
180         verifyAttempt(table);
181         verified = true;
182         break;
183       } catch (NullPointerException e) {
184         // If here, a cell was empty.  Presume its because updates came in
185         // after the scanner had been opened.  Wait a while and retry.
186         LOG.debug("Verification attempt failed: " + e.getMessage());
187       }
188       try {
189         Thread.sleep(pause);
190       } catch (InterruptedException e) {
191         // continue
192       }
193     }
194     assertTrue(verified);
195     table.close();
196   }
197 
198   /**
199    * Looks at every value of the mapreduce output and verifies that indeed
200    * the values have been reversed.
201    *
202    * @param table Table to scan.
203    * @throws IOException
204    * @throws NullPointerException if we failed to find a cell value
205    */
206   private void verifyAttempt(final Table table)
207       throws IOException, NullPointerException {
208     Scan scan = new Scan();
209     scan.addFamily(INPUT_FAMILY);
210     scan.addFamily(OUTPUT_FAMILY);
211     ResultScanner scanner = table.getScanner(scan);
212     try {
213       Iterator<Result> itr = scanner.iterator();
214       assertTrue(itr.hasNext());
215       while(itr.hasNext()) {
216         Result r = itr.next();
217         if (LOG.isDebugEnabled()) {
218           if (r.size() > 2 ) {
219             throw new IOException("Too many results, expected 2 got " +
220                 r.size());
221           }
222         }
223         byte[] firstValue = null;
224         byte[] secondValue = null;
225         int count = 0;
226         for(Cell kv : r.listCells()) {
227           if (count == 0) {
228             firstValue = CellUtil.cloneValue(kv);
229           }else if (count == 1) {
230             secondValue = CellUtil.cloneValue(kv);
231           }else if (count == 2) {
232             break;
233           }
234           count++;
235         }
236         String first = "";
237         if (firstValue == null) {
238           throw new NullPointerException(Bytes.toString(r.getRow()) +
239               ": first value is null");
240         }
241         first = Bytes.toString(firstValue);
242         String second = "";
243         if (secondValue == null) {
244           throw new NullPointerException(Bytes.toString(r.getRow()) +
245               ": second value is null");
246         }
247         byte[] secondReversed = new byte[secondValue.length];
248         for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) {
249           secondReversed[i] = secondValue[j];
250         }
251         second = Bytes.toString(secondReversed);
252         if (first.compareTo(second) != 0) {
253           if (LOG.isDebugEnabled()) {
254             LOG.debug("second key is not the reverse of first. row=" +
255                 Bytes.toStringBinary(r.getRow()) + ", first value=" + first +
256                 ", second value=" + second);
257           }
258           fail();
259         }
260       }
261     } finally {
262       scanner.close();
263     }
264   }
265 
266 }
267