View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import org.apache.hadoop.fs.Path;
23  import org.apache.hadoop.hbase.*;
24  import org.apache.hadoop.hbase.client.Get;
25  import org.apache.hadoop.hbase.client.Put;
26  import org.apache.hadoop.hbase.testclassification.MediumTests;
27  import org.apache.hadoop.hbase.util.Bytes;
28  import org.junit.AfterClass;
29  import org.junit.BeforeClass;
30  import org.junit.Test;
31  import org.junit.experimental.categories.Category;
32  
33  import java.io.IOException;
34  
35  /**
36   * Test case to check HRS throws {@link RowTooBigException}
37   * when row size exceeds configured limits.
38   */
39  @Category(MediumTests.class)
40  public class TestRowTooBig {
41    private final static HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU();
42    private static Path rootRegionDir;
43    private static final HTableDescriptor TEST_HTD =
44      new HTableDescriptor(TableName.valueOf(TestRowTooBig.class.getSimpleName()));
45  
46    @BeforeClass
47    public static void before() throws Exception {
48      HTU.startMiniCluster();
49      HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY,
50        10 * 1024 * 1024L);
51      rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig");
52    }
53  
54    @AfterClass
55    public static void after() throws Exception {
56      HTU.shutdownMiniCluster();
57    }
58  
59    /**
60     * Usecase:
61     *  - create a row with 5 large  cells (5 Mb each)
62     *  - flush memstore but don't compact storefiles.
63     *  - try to Get whole row.
64     *
65     * OOME happened before we actually get to reading results, but
66     * during seeking, as each StoreFile gets it's own scanner,
67     * and each scanner seeks after the first KV.
68     * @throws IOException
69     */
70    @Test(expected = RowTooBigException.class)
71    public void testScannersSeekOnFewLargeCells() throws IOException {
72      byte[] row1 = Bytes.toBytes("row1");
73      byte[] fam1 = Bytes.toBytes("fam1");
74  
75      HTableDescriptor htd = TEST_HTD;
76      HColumnDescriptor hcd = new HColumnDescriptor(fam1);
77      if (htd.hasFamily(hcd.getName())) {
78        htd.modifyFamily(hcd);
79      } else {
80        htd.addFamily(hcd);
81      }
82  
83      final HRegionInfo hri =
84        new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
85          HConstants.EMPTY_END_ROW);
86      Region region = HTU.createHRegion(hri, rootRegionDir, HTU.getConfiguration(), htd);
87      try {
88        // Add 5 cells to memstore
89        for (int i = 0; i < 5 ; i++) {
90          Put put = new Put(row1);
91  
92          put.add(fam1, Bytes.toBytes("col_" + i ), new byte[5 * 1024 * 1024]);
93          region.put(put);
94          region.flush(true);
95        }
96  
97        Get get = new Get(row1);
98        region.get(get);
99      } finally {
100       HBaseTestingUtility.closeRegion(region);
101     }
102   }
103 
104   /**
105    * Usecase:
106    *
107    *  - create a row with 1M cells, 10 bytes in each
108    *  - flush & run major compaction
109    *  - try to Get whole row.
110    *
111    *  OOME happened in StoreScanner.next(..).
112    *
113    * @throws IOException
114    */
115   @Test(expected = RowTooBigException.class)
116   public void testScanAcrossManySmallColumns() throws IOException {
117     byte[] row1 = Bytes.toBytes("row1");
118     byte[] fam1 = Bytes.toBytes("fam1");
119 
120     HTableDescriptor htd = TEST_HTD;
121     HColumnDescriptor hcd = new HColumnDescriptor(fam1);
122     if (htd.hasFamily(hcd.getName())) {
123       htd.modifyFamily(hcd);
124     } else {
125       htd.addFamily(hcd);
126     }
127 
128     final HRegionInfo hri =
129       new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
130         HConstants.EMPTY_END_ROW);
131     Region region = HTU.createHRegion(hri, rootRegionDir, HTU.getConfiguration(), htd);
132     try {
133       // Add to memstore
134       for (int i = 0; i < 10; i++) {
135         Put put = new Put(row1);
136         for (int j = 0; j < 10 * 10000; j++) {
137           put.add(fam1, Bytes.toBytes("col_" + i + "_" + j), new byte[10]);
138         }
139         region.put(put);
140         region.flush(true);
141       }
142       region.compact(true);
143 
144       Get get = new Get(row1);
145       region.get(get);
146     } finally {
147       HBaseTestingUtility.closeRegion(region);
148     }
149   }
150 }