View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.mapreduce;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertNotSame;
25  import static org.junit.Assert.assertTrue;
26  import static org.junit.Assert.fail;
27  
28  import java.io.IOException;
29  import java.util.Arrays;
30  import java.util.HashMap;
31  import java.util.Map;
32  import java.util.Map.Entry;
33  import java.util.Random;
34  import java.util.Set;
35  import java.util.concurrent.Callable;
36  
37  import org.apache.commons.logging.Log;
38  import org.apache.commons.logging.LogFactory;
39  import org.apache.hadoop.conf.Configuration;
40  import org.apache.hadoop.fs.FileStatus;
41  import org.apache.hadoop.fs.FileSystem;
42  import org.apache.hadoop.fs.Path;
43  import org.apache.hadoop.hbase.Cell;
44  import org.apache.hadoop.hbase.CellUtil;
45  import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
46  import org.apache.hadoop.hbase.HBaseConfiguration;
47  import org.apache.hadoop.hbase.HBaseTestingUtility;
48  import org.apache.hadoop.hbase.HColumnDescriptor;
49  import org.apache.hadoop.hbase.HConstants;
50  import org.apache.hadoop.hbase.HTableDescriptor;
51  import org.apache.hadoop.hbase.HadoopShims;
52  import org.apache.hadoop.hbase.KeyValue;
53  import org.apache.hadoop.hbase.testclassification.LargeTests;
54  import org.apache.hadoop.hbase.PerformanceEvaluation;
55  import org.apache.hadoop.hbase.TableName;
56  import org.apache.hadoop.hbase.client.HBaseAdmin;
57  import org.apache.hadoop.hbase.client.HTable;
58  import org.apache.hadoop.hbase.client.Put;
59  import org.apache.hadoop.hbase.client.Result;
60  import org.apache.hadoop.hbase.client.ResultScanner;
61  import org.apache.hadoop.hbase.client.Scan;
62  import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
63  import org.apache.hadoop.hbase.io.compress.Compression;
64  import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
65  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
66  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
67  import org.apache.hadoop.hbase.io.hfile.HFile;
68  import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
69  import org.apache.hadoop.hbase.regionserver.BloomType;
70  import org.apache.hadoop.hbase.regionserver.StoreFile;
71  import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
72  import org.apache.hadoop.hbase.util.Bytes;
73  import org.apache.hadoop.hbase.util.FSUtils;
74  import org.apache.hadoop.hbase.util.Threads;
75  import org.apache.hadoop.hbase.util.Writables;
76  import org.apache.hadoop.io.NullWritable;
77  import org.apache.hadoop.mapreduce.Job;
78  import org.apache.hadoop.mapreduce.Mapper;
79  import org.apache.hadoop.mapreduce.RecordWriter;
80  import org.apache.hadoop.mapreduce.TaskAttemptContext;
81  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
82  import org.junit.Ignore;
83  import org.junit.Test;
84  import org.junit.experimental.categories.Category;
85  import org.mockito.Mockito;
86  
87  /**
88   * Simple test for {@link CellSortReducer} and {@link HFileOutputFormat2}.
89   * Sets up and runs a mapreduce job that writes hfile output.
90   * Creates a few inner classes to implement splits and an inputformat that
91   * emits keys and values like those of {@link PerformanceEvaluation}.
92   */
93  @Category(LargeTests.class)
94  public class TestHFileOutputFormat2  {
95    private final static int ROWSPERSPLIT = 1024;
96  
97    private static final byte[][] FAMILIES
98      = { Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-A"))
99        , Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-B"))};
100   private static final TableName TABLE_NAME =
101       TableName.valueOf("TestTable");
102 
103   private HBaseTestingUtility util = new HBaseTestingUtility();
104 
105   private static Log LOG = LogFactory.getLog(TestHFileOutputFormat2.class);
106 
107   /**
108    * Simple mapper that makes KeyValue output.
109    */
110   static class RandomKVGeneratingMapper
111       extends Mapper<NullWritable, NullWritable,
112                  ImmutableBytesWritable, Cell> {
113 
114     private int keyLength;
115     private static final int KEYLEN_DEFAULT=10;
116     private static final String KEYLEN_CONF="randomkv.key.length";
117 
118     private int valLength;
119     private static final int VALLEN_DEFAULT=10;
120     private static final String VALLEN_CONF="randomkv.val.length";
121 
122     @Override
123     protected void setup(Context context) throws IOException,
124         InterruptedException {
125       super.setup(context);
126 
127       Configuration conf = context.getConfiguration();
128       keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT);
129       valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT);
130     }
131 
132     protected void map(
133         NullWritable n1, NullWritable n2,
134         Mapper<NullWritable, NullWritable,
135                ImmutableBytesWritable,Cell>.Context context)
136         throws java.io.IOException ,InterruptedException
137     {
138 
139       byte keyBytes[] = new byte[keyLength];
140       byte valBytes[] = new byte[valLength];
141 
142       int taskId = context.getTaskAttemptID().getTaskID().getId();
143       assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
144 
145       Random random = new Random();
146       for (int i = 0; i < ROWSPERSPLIT; i++) {
147 
148         random.nextBytes(keyBytes);
149         // Ensure that unique tasks generate unique keys
150         keyBytes[keyLength - 1] = (byte)(taskId & 0xFF);
151         random.nextBytes(valBytes);
152         ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
153 
154         for (byte[] family : TestHFileOutputFormat2.FAMILIES) {
155           Cell kv = new KeyValue(keyBytes, family,
156               PerformanceEvaluation.QUALIFIER_NAME, valBytes);
157           context.write(key, kv);
158         }
159       }
160     }
161   }
162 
163   private void setupRandomGeneratorMapper(Job job) {
164     job.setInputFormatClass(NMapInputFormat.class);
165     job.setMapperClass(RandomKVGeneratingMapper.class);
166     job.setMapOutputKeyClass(ImmutableBytesWritable.class);
167     job.setMapOutputValueClass(KeyValue.class);
168   }
169 
170   /**
171    * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if
172    * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}.
173    * @see <a href="https://issues.apache.org/jira/browse/HBASE-2615">HBASE-2615</a>
174    */
175   @Test
176   public void test_LATEST_TIMESTAMP_isReplaced()
177   throws Exception {
178     Configuration conf = new Configuration(this.util.getConfiguration());
179     RecordWriter<ImmutableBytesWritable, Cell> writer = null;
180     TaskAttemptContext context = null;
181     Path dir =
182       util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced");
183     try {
184       Job job = new Job(conf);
185       FileOutputFormat.setOutputPath(job, dir);
186       context = createTestTaskAttemptContext(job);
187       HFileOutputFormat2 hof = new HFileOutputFormat2();
188       writer = hof.getRecordWriter(context);
189       final byte [] b = Bytes.toBytes("b");
190 
191       // Test 1.  Pass a KV that has a ts of LATEST_TIMESTAMP.  It should be
192       // changed by call to write.  Check all in kv is same but ts.
193       KeyValue kv = new KeyValue(b, b, b);
194       KeyValue original = kv.clone();
195       writer.write(new ImmutableBytesWritable(), kv);
196       assertFalse(original.equals(kv));
197       assertTrue(Bytes.equals(CellUtil.cloneRow(original), CellUtil.cloneRow(kv)));
198       assertTrue(Bytes.equals(CellUtil.cloneFamily(original), CellUtil.cloneFamily(kv)));
199       assertTrue(Bytes.equals(CellUtil.cloneQualifier(original), CellUtil.cloneQualifier(kv)));
200       assertNotSame(original.getTimestamp(), kv.getTimestamp());
201       assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp());
202 
203       // Test 2. Now test passing a kv that has explicit ts.  It should not be
204       // changed by call to record write.
205       kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b);
206       original = kv.clone();
207       writer.write(new ImmutableBytesWritable(), kv);
208       assertTrue(original.equals(kv));
209     } finally {
210       if (writer != null && context != null) writer.close(context);
211       dir.getFileSystem(conf).delete(dir, true);
212     }
213   }
214 
215   private TaskAttemptContext createTestTaskAttemptContext(final Job job)
216   throws IOException, Exception {
217     HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class);
218     TaskAttemptContext context = hadoop.createTestTaskAttemptContext(
219       job, "attempt_201402131733_0001_m_000000_0");
220     return context;
221   }
222 
223   /*
224    * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE
225    * metadata used by time-restricted scans.
226    */
227   @Test
228   public void test_TIMERANGE() throws Exception {
229     Configuration conf = new Configuration(this.util.getConfiguration());
230     RecordWriter<ImmutableBytesWritable, Cell> writer = null;
231     TaskAttemptContext context = null;
232     Path dir =
233       util.getDataTestDir("test_TIMERANGE_present");
234     LOG.info("Timerange dir writing to dir: "+ dir);
235     try {
236       // build a record writer using HFileOutputFormat2
237       Job job = new Job(conf);
238       FileOutputFormat.setOutputPath(job, dir);
239       context = createTestTaskAttemptContext(job);
240       HFileOutputFormat2 hof = new HFileOutputFormat2();
241       writer = hof.getRecordWriter(context);
242 
243       // Pass two key values with explicit times stamps
244       final byte [] b = Bytes.toBytes("b");
245 
246       // value 1 with timestamp 2000
247       KeyValue kv = new KeyValue(b, b, b, 2000, b);
248       KeyValue original = kv.clone();
249       writer.write(new ImmutableBytesWritable(), kv);
250       assertEquals(original,kv);
251 
252       // value 2 with timestamp 1000
253       kv = new KeyValue(b, b, b, 1000, b);
254       original = kv.clone();
255       writer.write(new ImmutableBytesWritable(), kv);
256       assertEquals(original, kv);
257 
258       // verify that the file has the proper FileInfo.
259       writer.close(context);
260 
261       // the generated file lives 1 directory down from the attempt directory
262       // and is the only file, e.g.
263       // _attempt__0000_r_000000_0/b/1979617994050536795
264       FileSystem fs = FileSystem.get(conf);
265       Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
266       FileStatus[] sub1 = fs.listStatus(attemptDirectory);
267       FileStatus[] file = fs.listStatus(sub1[0].getPath());
268 
269       // open as HFile Reader and pull out TIMERANGE FileInfo.
270       HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
271           new CacheConfig(conf), conf);
272       Map<byte[],byte[]> finfo = rd.loadFileInfo();
273       byte[] range = finfo.get("TIMERANGE".getBytes());
274       assertNotNull(range);
275 
276       // unmarshall and check values.
277       TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
278       Writables.copyWritable(range, timeRangeTracker);
279       LOG.info(timeRangeTracker.getMinimumTimestamp() +
280           "...." + timeRangeTracker.getMaximumTimestamp());
281       assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
282       assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
283       rd.close();
284     } finally {
285       if (writer != null && context != null) writer.close(context);
286       dir.getFileSystem(conf).delete(dir, true);
287     }
288   }
289 
290   /**
291    * Run small MR job.
292    */
293   @Test
294   public void testWritingPEData() throws Exception {
295     Configuration conf = util.getConfiguration();
296     Path testDir = util.getDataTestDirOnTestFS("testWritingPEData");
297     FileSystem fs = testDir.getFileSystem(conf);
298 
299     // Set down this value or we OOME in eclipse.
300     conf.setInt("io.sort.mb", 20);
301     // Write a few files.
302     conf.setLong(HConstants.HREGION_MAX_FILESIZE, 64 * 1024);
303 
304     Job job = new Job(conf, "testWritingPEData");
305     setupRandomGeneratorMapper(job);
306     // This partitioner doesn't work well for number keys but using it anyways
307     // just to demonstrate how to configure it.
308     byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
309     byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT];
310 
311     Arrays.fill(startKey, (byte)0);
312     Arrays.fill(endKey, (byte)0xff);
313 
314     job.setPartitionerClass(SimpleTotalOrderPartitioner.class);
315     // Set start and end rows for partitioner.
316     SimpleTotalOrderPartitioner.setStartKey(job.getConfiguration(), startKey);
317     SimpleTotalOrderPartitioner.setEndKey(job.getConfiguration(), endKey);
318     job.setReducerClass(KeyValueSortReducer.class);
319     job.setOutputFormatClass(HFileOutputFormat2.class);
320     job.setNumReduceTasks(4);
321     job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
322         MutationSerialization.class.getName(), ResultSerialization.class.getName(),
323         KeyValueSerialization.class.getName());
324 
325     FileOutputFormat.setOutputPath(job, testDir);
326     assertTrue(job.waitForCompletion(false));
327     FileStatus [] files = fs.listStatus(testDir);
328     assertTrue(files.length > 0);
329   }
330 
331   @Test
332   public void testJobConfiguration() throws Exception {
333     Job job = new Job(util.getConfiguration());
334     job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration"));
335     HTable table = Mockito.mock(HTable.class);
336     setupMockStartKeys(table);
337     HFileOutputFormat2.configureIncrementalLoad(job, table);
338     assertEquals(job.getNumReduceTasks(), 4);
339   }
340 
341   private byte [][] generateRandomStartKeys(int numKeys) {
342     Random random = new Random();
343     byte[][] ret = new byte[numKeys][];
344     // first region start key is always empty
345     ret[0] = HConstants.EMPTY_BYTE_ARRAY;
346     for (int i = 1; i < numKeys; i++) {
347       ret[i] = PerformanceEvaluation.generateData(random, PerformanceEvaluation.VALUE_LENGTH);
348     }
349     return ret;
350   }
351 
352   @Test
353   public void testMRIncrementalLoad() throws Exception {
354     LOG.info("\nStarting test testMRIncrementalLoad\n");
355     doIncrementalLoadTest(false);
356   }
357 
358   @Test
359   public void testMRIncrementalLoadWithSplit() throws Exception {
360     LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n");
361     doIncrementalLoadTest(true);
362   }
363 
364   private void doIncrementalLoadTest(
365       boolean shouldChangeRegions) throws Exception {
366     util = new HBaseTestingUtility();
367     Configuration conf = util.getConfiguration();
368     byte[][] startKeys = generateRandomStartKeys(5);
369     HBaseAdmin admin = null;
370     try {
371       util.startMiniCluster();
372       Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
373       admin = new HBaseAdmin(conf);
374       HTable table = util.createTable(TABLE_NAME, FAMILIES);
375       assertEquals("Should start with empty table",
376           0, util.countRows(table));
377       int numRegions = util.createMultiRegions(
378           util.getConfiguration(), table, FAMILIES[0], startKeys);
379       assertEquals("Should make 5 regions", numRegions, 5);
380 
381       // Generate the bulk load files
382       util.startMiniMapReduceCluster();
383       runIncrementalPELoad(conf, table, testDir);
384       // This doesn't write into the table, just makes files
385       assertEquals("HFOF should not touch actual table",
386           0, util.countRows(table));
387 
388 
389       // Make sure that a directory was created for every CF
390       int dir = 0;
391       for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
392         for (byte[] family : FAMILIES) {
393           if (Bytes.toString(family).equals(f.getPath().getName())) {
394             ++dir;
395           }
396         }
397       }
398       assertEquals("Column family not found in FS.", FAMILIES.length, dir);
399 
400       // handle the split case
401       if (shouldChangeRegions) {
402         LOG.info("Changing regions in table");
403         admin.disableTable(table.getTableName());
404         while(util.getMiniHBaseCluster().getMaster().getAssignmentManager().
405             getRegionStates().isRegionsInTransition()) {
406           Threads.sleep(200);
407           LOG.info("Waiting on table to finish disabling");
408         }
409         byte[][] newStartKeys = generateRandomStartKeys(15);
410         util.createMultiRegions(
411             util.getConfiguration(), table, FAMILIES[0], newStartKeys);
412         admin.enableTable(table.getTableName());
413         while (table.getRegionLocations().size() != 15 ||
414             !admin.isTableAvailable(table.getTableName())) {
415           Thread.sleep(200);
416           LOG.info("Waiting for new region assignment to happen");
417         }
418       }
419 
420       // Perform the actual load
421       new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
422 
423       // Ensure data shows up
424       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
425       assertEquals("LoadIncrementalHFiles should put expected data in table",
426           expectedRows, util.countRows(table));
427       Scan scan = new Scan();
428       ResultScanner results = table.getScanner(scan);
429       for (Result res : results) {
430         assertEquals(FAMILIES.length, res.rawCells().length);
431         Cell first = res.rawCells()[0];
432         for (Cell kv : res.rawCells()) {
433           assertTrue(CellUtil.matchingRow(first, kv));
434           assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
435         }
436       }
437       results.close();
438       String tableDigestBefore = util.checksumRows(table);
439 
440       // Cause regions to reopen
441       admin.disableTable(TABLE_NAME);
442       while (!admin.isTableDisabled(TABLE_NAME)) {
443         Thread.sleep(200);
444         LOG.info("Waiting for table to disable");
445       }
446       admin.enableTable(TABLE_NAME);
447       util.waitTableAvailable(TABLE_NAME.getName());
448       assertEquals("Data should remain after reopening of regions",
449           tableDigestBefore, util.checksumRows(table));
450     } finally {
451       if (admin != null) admin.close();
452       util.shutdownMiniMapReduceCluster();
453       util.shutdownMiniCluster();
454     }
455   }
456 
457   private void runIncrementalPELoad(
458       Configuration conf, HTable table, Path outDir)
459   throws Exception {
460     Job job = new Job(conf, "testLocalMRIncrementalLoad");
461     job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
462     job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
463         MutationSerialization.class.getName(), ResultSerialization.class.getName(),
464         KeyValueSerialization.class.getName());
465     setupRandomGeneratorMapper(job);
466     HFileOutputFormat2.configureIncrementalLoad(job, table);
467     FileOutputFormat.setOutputPath(job, outDir);
468 
469     assertFalse(util.getTestFileSystem().exists(outDir)) ;
470 
471     assertEquals(table.getRegionLocations().size(), job.getNumReduceTasks());
472 
473     assertTrue(job.waitForCompletion(true));
474   }
475 
476   /**
477    * Test for {@link HFileOutputFormat2#configureCompression(HTable,
478    * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap
479    * (Configuration)}.
480    * Tests that the compression map is correctly serialized into
481    * and deserialized from configuration
482    *
483    * @throws IOException
484    */
485   @Test
486   public void testSerializeDeserializeFamilyCompressionMap() throws IOException {
487     for (int numCfs = 0; numCfs <= 3; numCfs++) {
488       Configuration conf = new Configuration(this.util.getConfiguration());
489       Map<String, Compression.Algorithm> familyToCompression =
490           getMockColumnFamiliesForCompression(numCfs);
491       HTable table = Mockito.mock(HTable.class);
492       setupMockColumnFamiliesForCompression(table, familyToCompression);
493       HFileOutputFormat2.configureCompression(table, conf);
494 
495       // read back family specific compression setting from the configuration
496       Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
497           .createFamilyCompressionMap(conf);
498 
499       // test that we have a value for all column families that matches with the
500       // used mock values
501       for (Entry<String, Algorithm> entry : familyToCompression.entrySet()) {
502         assertEquals("Compression configuration incorrect for column family:"
503             + entry.getKey(), entry.getValue(),
504             retrievedFamilyToCompressionMap.get(entry.getKey().getBytes()));
505       }
506     }
507   }
508 
509   private void setupMockColumnFamiliesForCompression(HTable table,
510       Map<String, Compression.Algorithm> familyToCompression) throws IOException {
511     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
512     for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
513       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
514           .setMaxVersions(1)
515           .setCompressionType(entry.getValue())
516           .setBlockCacheEnabled(false)
517           .setTimeToLive(0));
518     }
519     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
520   }
521 
522   /**
523    * @return a map from column family names to compression algorithms for
524    *         testing column family compression. Column family names have special characters
525    */
526   private Map<String, Compression.Algorithm>
527       getMockColumnFamiliesForCompression (int numCfs) {
528     Map<String, Compression.Algorithm> familyToCompression
529       = new HashMap<String, Compression.Algorithm>();
530     // use column family names having special characters
531     if (numCfs-- > 0) {
532       familyToCompression.put("Family1!@#!@#&", Compression.Algorithm.LZO);
533     }
534     if (numCfs-- > 0) {
535       familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
536     }
537     if (numCfs-- > 0) {
538       familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
539     }
540     if (numCfs-- > 0) {
541       familyToCompression.put("Family3", Compression.Algorithm.NONE);
542     }
543     return familyToCompression;
544   }
545 
546 
547   /**
548    * Test for {@link HFileOutputFormat2#configureBloomType(HTable,
549    * Configuration)} and {@link HFileOutputFormat2#createFamilyBloomTypeMap
550    * (Configuration)}.
551    * Tests that the compression map is correctly serialized into
552    * and deserialized from configuration
553    *
554    * @throws IOException
555    */
556   @Test
557   public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException {
558     for (int numCfs = 0; numCfs <= 2; numCfs++) {
559       Configuration conf = new Configuration(this.util.getConfiguration());
560       Map<String, BloomType> familyToBloomType =
561           getMockColumnFamiliesForBloomType(numCfs);
562       HTable table = Mockito.mock(HTable.class);
563       setupMockColumnFamiliesForBloomType(table,
564           familyToBloomType);
565       HFileOutputFormat2.configureBloomType(table, conf);
566 
567       // read back family specific data block encoding settings from the
568       // configuration
569       Map<byte[], BloomType> retrievedFamilyToBloomTypeMap =
570           HFileOutputFormat2
571               .createFamilyBloomTypeMap(conf);
572 
573       // test that we have a value for all column families that matches with the
574       // used mock values
575       for (Entry<String, BloomType> entry : familyToBloomType.entrySet()) {
576         assertEquals("BloomType configuration incorrect for column family:"
577             + entry.getKey(), entry.getValue(),
578             retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes()));
579       }
580     }
581   }
582 
583   private void setupMockColumnFamiliesForBloomType(HTable table,
584       Map<String, BloomType> familyToDataBlockEncoding) throws IOException {
585     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
586     for (Entry<String, BloomType> entry : familyToDataBlockEncoding.entrySet()) {
587       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
588           .setMaxVersions(1)
589           .setBloomFilterType(entry.getValue())
590           .setBlockCacheEnabled(false)
591           .setTimeToLive(0));
592     }
593     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
594   }
595 
596   /**
597    * @return a map from column family names to compression algorithms for
598    *         testing column family compression. Column family names have special characters
599    */
600   private Map<String, BloomType>
601   getMockColumnFamiliesForBloomType (int numCfs) {
602     Map<String, BloomType> familyToBloomType =
603         new HashMap<String, BloomType>();
604     // use column family names having special characters
605     if (numCfs-- > 0) {
606       familyToBloomType.put("Family1!@#!@#&", BloomType.ROW);
607     }
608     if (numCfs-- > 0) {
609       familyToBloomType.put("Family2=asdads&!AASD",
610           BloomType.ROWCOL);
611     }
612     if (numCfs-- > 0) {
613       familyToBloomType.put("Family3", BloomType.NONE);
614     }
615     return familyToBloomType;
616   }
617 
618   /**
619    * Test for {@link HFileOutputFormat2#configureBlockSize(HTable,
620    * Configuration)} and {@link HFileOutputFormat2#createFamilyBlockSizeMap
621    * (Configuration)}.
622    * Tests that the compression map is correctly serialized into
623    * and deserialized from configuration
624    *
625    * @throws IOException
626    */
627   @Test
628   public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException {
629     for (int numCfs = 0; numCfs <= 3; numCfs++) {
630       Configuration conf = new Configuration(this.util.getConfiguration());
631       Map<String, Integer> familyToBlockSize =
632           getMockColumnFamiliesForBlockSize(numCfs);
633       HTable table = Mockito.mock(HTable.class);
634       setupMockColumnFamiliesForBlockSize(table,
635           familyToBlockSize);
636       HFileOutputFormat2.configureBlockSize(table, conf);
637 
638       // read back family specific data block encoding settings from the
639       // configuration
640       Map<byte[], Integer> retrievedFamilyToBlockSizeMap =
641           HFileOutputFormat2
642               .createFamilyBlockSizeMap(conf);
643 
644       // test that we have a value for all column families that matches with the
645       // used mock values
646       for (Entry<String, Integer> entry : familyToBlockSize.entrySet()
647           ) {
648         assertEquals("BlockSize configuration incorrect for column family:"
649             + entry.getKey(), entry.getValue(),
650             retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes()));
651       }
652     }
653   }
654 
655   private void setupMockColumnFamiliesForBlockSize(HTable table,
656       Map<String, Integer> familyToDataBlockEncoding) throws IOException {
657     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
658     for (Entry<String, Integer> entry : familyToDataBlockEncoding.entrySet()) {
659       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
660           .setMaxVersions(1)
661           .setBlocksize(entry.getValue())
662           .setBlockCacheEnabled(false)
663           .setTimeToLive(0));
664     }
665     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
666   }
667 
668   /**
669    * @return a map from column family names to compression algorithms for
670    *         testing column family compression. Column family names have special characters
671    */
672   private Map<String, Integer>
673   getMockColumnFamiliesForBlockSize (int numCfs) {
674     Map<String, Integer> familyToBlockSize =
675         new HashMap<String, Integer>();
676     // use column family names having special characters
677     if (numCfs-- > 0) {
678       familyToBlockSize.put("Family1!@#!@#&", 1234);
679     }
680     if (numCfs-- > 0) {
681       familyToBlockSize.put("Family2=asdads&!AASD",
682           Integer.MAX_VALUE);
683     }
684     if (numCfs-- > 0) {
685       familyToBlockSize.put("Family2=asdads&!AASD",
686           Integer.MAX_VALUE);
687     }
688     if (numCfs-- > 0) {
689       familyToBlockSize.put("Family3", 0);
690     }
691     return familyToBlockSize;
692   }
693 
694     /**
695    * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTable,
696    * Configuration)} and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap
697    * (Configuration)}.
698    * Tests that the compression map is correctly serialized into
699    * and deserialized from configuration
700    *
701    * @throws IOException
702    */
703   @Test
704   public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
705     for (int numCfs = 0; numCfs <= 3; numCfs++) {
706       Configuration conf = new Configuration(this.util.getConfiguration());
707       Map<String, DataBlockEncoding> familyToDataBlockEncoding =
708           getMockColumnFamiliesForDataBlockEncoding(numCfs);
709       HTable table = Mockito.mock(HTable.class);
710       setupMockColumnFamiliesForDataBlockEncoding(table,
711           familyToDataBlockEncoding);
712       HFileOutputFormat2.configureDataBlockEncoding(table, conf);
713 
714       // read back family specific data block encoding settings from the
715       // configuration
716       Map<byte[], DataBlockEncoding> retrievedFamilyToDataBlockEncodingMap =
717           HFileOutputFormat2
718           .createFamilyDataBlockEncodingMap(conf);
719 
720       // test that we have a value for all column families that matches with the
721       // used mock values
722       for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
723         assertEquals("DataBlockEncoding configuration incorrect for column family:"
724             + entry.getKey(), entry.getValue(),
725             retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes()));
726       }
727     }
728   }
729 
730   private void setupMockColumnFamiliesForDataBlockEncoding(HTable table,
731       Map<String, DataBlockEncoding> familyToDataBlockEncoding) throws IOException {
732     HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAME);
733     for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
734       mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey())
735           .setMaxVersions(1)
736           .setDataBlockEncoding(entry.getValue())
737           .setBlockCacheEnabled(false)
738           .setTimeToLive(0));
739     }
740     Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
741   }
742 
743   /**
744    * @return a map from column family names to compression algorithms for
745    *         testing column family compression. Column family names have special characters
746    */
747   private Map<String, DataBlockEncoding>
748       getMockColumnFamiliesForDataBlockEncoding (int numCfs) {
749     Map<String, DataBlockEncoding> familyToDataBlockEncoding =
750         new HashMap<String, DataBlockEncoding>();
751     // use column family names having special characters
752     if (numCfs-- > 0) {
753       familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF);
754     }
755     if (numCfs-- > 0) {
756       familyToDataBlockEncoding.put("Family2=asdads&!AASD",
757           DataBlockEncoding.FAST_DIFF);
758     }
759     if (numCfs-- > 0) {
760       familyToDataBlockEncoding.put("Family2=asdads&!AASD",
761           DataBlockEncoding.PREFIX);
762     }
763     if (numCfs-- > 0) {
764       familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE);
765     }
766     return familyToDataBlockEncoding;
767   }
768 
769   private void setupMockStartKeys(HTable table) throws IOException {
770     byte[][] mockKeys = new byte[][] {
771         HConstants.EMPTY_BYTE_ARRAY,
772         Bytes.toBytes("aaa"),
773         Bytes.toBytes("ggg"),
774         Bytes.toBytes("zzz")
775     };
776     Mockito.doReturn(mockKeys).when(table).getStartKeys();
777   }
778 
779   /**
780    * Test that {@link HFileOutputFormat2} RecordWriter uses compression and
781    * bloom filter settings from the column family descriptor
782    */
783   @Test
784   public void testColumnFamilySettings() throws Exception {
785     Configuration conf = new Configuration(this.util.getConfiguration());
786     RecordWriter<ImmutableBytesWritable, Cell> writer = null;
787     TaskAttemptContext context = null;
788     Path dir = util.getDataTestDir("testColumnFamilySettings");
789 
790     // Setup table descriptor
791     HTable table = Mockito.mock(HTable.class);
792     HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
793     Mockito.doReturn(htd).when(table).getTableDescriptor();
794     for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) {
795       htd.addFamily(hcd);
796     }
797 
798     // set up the table to return some mock keys
799     setupMockStartKeys(table);
800 
801     try {
802       // partial map red setup to get an operational writer for testing
803       // We turn off the sequence file compression, because DefaultCodec
804       // pollutes the GZip codec pool with an incompatible compressor.
805       conf.set("io.seqfile.compression.type", "NONE");
806       Job job = new Job(conf, "testLocalMRIncrementalLoad");
807       job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
808       setupRandomGeneratorMapper(job);
809       HFileOutputFormat2.configureIncrementalLoad(job, table);
810       FileOutputFormat.setOutputPath(job, dir);
811       context = createTestTaskAttemptContext(job);
812       HFileOutputFormat2 hof = new HFileOutputFormat2();
813       writer = hof.getRecordWriter(context);
814 
815       // write out random rows
816       writeRandomKeyValues(writer, context, htd.getFamiliesKeys(), ROWSPERSPLIT);
817       writer.close(context);
818 
819       // Make sure that a directory was created for every CF
820       FileSystem fs = dir.getFileSystem(conf);
821 
822       // commit so that the filesystem has one directory per column family
823       hof.getOutputCommitter(context).commitTask(context);
824       hof.getOutputCommitter(context).commitJob(context);
825       FileStatus[] families = FSUtils.listStatus(fs, dir, new FSUtils.FamilyDirFilter(fs));
826       assertEquals(htd.getFamilies().size(), families.length);
827       for (FileStatus f : families) {
828         String familyStr = f.getPath().getName();
829         HColumnDescriptor hcd = htd.getFamily(Bytes.toBytes(familyStr));
830         // verify that the compression on this file matches the configured
831         // compression
832         Path dataFilePath = fs.listStatus(f.getPath())[0].getPath();
833         Reader reader = HFile.createReader(fs, dataFilePath, new CacheConfig(conf), conf);
834         Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
835 
836         byte[] bloomFilter = fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY);
837         if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE");
838         assertEquals("Incorrect bloom filter used for column family " + familyStr +
839           "(reader: " + reader + ")",
840           hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter)));
841         assertEquals("Incorrect compression used for column family " + familyStr +
842           "(reader: " + reader + ")", hcd.getCompression(), reader.getFileContext().getCompression());
843       }
844     } finally {
845       dir.getFileSystem(conf).delete(dir, true);
846     }
847   }
848 
849   /**
850    * Write random values to the writer assuming a table created using
851    * {@link #FAMILIES} as column family descriptors
852    */
853   private void writeRandomKeyValues(RecordWriter<ImmutableBytesWritable, Cell> writer,
854       TaskAttemptContext context, Set<byte[]> families, int numRows)
855       throws IOException, InterruptedException {
856     byte keyBytes[] = new byte[Bytes.SIZEOF_INT];
857     int valLength = 10;
858     byte valBytes[] = new byte[valLength];
859 
860     int taskId = context.getTaskAttemptID().getTaskID().getId();
861     assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
862 
863     Random random = new Random();
864     for (int i = 0; i < numRows; i++) {
865 
866       Bytes.putInt(keyBytes, 0, i);
867       random.nextBytes(valBytes);
868       ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
869 
870       for (byte[] family : families) {
871         Cell kv = new KeyValue(keyBytes, family,
872             PerformanceEvaluation.QUALIFIER_NAME, valBytes);
873         writer.write(key, kv);
874       }
875     }
876   }
877 
878   /**
879    * This test is to test the scenario happened in HBASE-6901.
880    * All files are bulk loaded and excluded from minor compaction.
881    * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
882    * will be thrown.
883    */
884   @Ignore ("Flakey: See HBASE-9051") @Test
885   public void testExcludeAllFromMinorCompaction() throws Exception {
886     Configuration conf = util.getConfiguration();
887     conf.setInt("hbase.hstore.compaction.min", 2);
888     generateRandomStartKeys(5);
889 
890     try {
891       util.startMiniCluster();
892       final FileSystem fs = util.getDFSCluster().getFileSystem();
893       HBaseAdmin admin = new HBaseAdmin(conf);
894       HTable table = util.createTable(TABLE_NAME, FAMILIES);
895       assertEquals("Should start with empty table", 0, util.countRows(table));
896 
897       // deep inspection: get the StoreFile dir
898       final Path storePath = new Path(
899         FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
900           new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
901             Bytes.toString(FAMILIES[0])));
902       assertEquals(0, fs.listStatus(storePath).length);
903 
904       // Generate two bulk load files
905       conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
906           true);
907       util.startMiniMapReduceCluster();
908 
909       for (int i = 0; i < 2; i++) {
910         Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
911         runIncrementalPELoad(conf, table, testDir);
912         // Perform the actual load
913         new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
914       }
915 
916       // Ensure data shows up
917       int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
918       assertEquals("LoadIncrementalHFiles should put expected data in table",
919           expectedRows, util.countRows(table));
920 
921       // should have a second StoreFile now
922       assertEquals(2, fs.listStatus(storePath).length);
923 
924       // minor compactions shouldn't get rid of the file
925       admin.compact(TABLE_NAME.getName());
926       try {
927         quickPoll(new Callable<Boolean>() {
928           public Boolean call() throws Exception {
929             return fs.listStatus(storePath).length == 1;
930           }
931         }, 5000);
932         throw new IOException("SF# = " + fs.listStatus(storePath).length);
933       } catch (AssertionError ae) {
934         // this is expected behavior
935       }
936 
937       // a major compaction should work though
938       admin.majorCompact(TABLE_NAME.getName());
939       quickPoll(new Callable<Boolean>() {
940         public Boolean call() throws Exception {
941           return fs.listStatus(storePath).length == 1;
942         }
943       }, 5000);
944 
945     } finally {
946       util.shutdownMiniMapReduceCluster();
947       util.shutdownMiniCluster();
948     }
949   }
950 
951   @Test
952   public void testExcludeMinorCompaction() throws Exception {
953     Configuration conf = util.getConfiguration();
954     conf.setInt("hbase.hstore.compaction.min", 2);
955     generateRandomStartKeys(5);
956 
957     try {
958       util.startMiniCluster();
959       Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
960       final FileSystem fs = util.getDFSCluster().getFileSystem();
961       HBaseAdmin admin = new HBaseAdmin(conf);
962       HTable table = util.createTable(TABLE_NAME, FAMILIES);
963       assertEquals("Should start with empty table", 0, util.countRows(table));
964 
965       // deep inspection: get the StoreFile dir
966       final Path storePath = new Path(
967         FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
968           new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
969             Bytes.toString(FAMILIES[0])));
970       assertEquals(0, fs.listStatus(storePath).length);
971 
972       // put some data in it and flush to create a storefile
973       Put p = new Put(Bytes.toBytes("test"));
974       p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
975       table.put(p);
976       admin.flush(TABLE_NAME.getName());
977       assertEquals(1, util.countRows(table));
978       quickPoll(new Callable<Boolean>() {
979         public Boolean call() throws Exception {
980           return fs.listStatus(storePath).length == 1;
981         }
982       }, 5000);
983 
984       // Generate a bulk load file with more rows
985       conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
986           true);
987       util.startMiniMapReduceCluster();
988       runIncrementalPELoad(conf, table, testDir);
989 
990       // Perform the actual load
991       new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
992 
993       // Ensure data shows up
994       int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
995       assertEquals("LoadIncrementalHFiles should put expected data in table",
996           expectedRows + 1, util.countRows(table));
997 
998       // should have a second StoreFile now
999       assertEquals(2, fs.listStatus(storePath).length);
1000 
1001       // minor compactions shouldn't get rid of the file
1002       admin.compact(TABLE_NAME.getName());
1003       try {
1004         quickPoll(new Callable<Boolean>() {
1005           public Boolean call() throws Exception {
1006             return fs.listStatus(storePath).length == 1;
1007           }
1008         }, 5000);
1009         throw new IOException("SF# = " + fs.listStatus(storePath).length);
1010       } catch (AssertionError ae) {
1011         // this is expected behavior
1012       }
1013 
1014       // a major compaction should work though
1015       admin.majorCompact(TABLE_NAME.getName());
1016       quickPoll(new Callable<Boolean>() {
1017         public Boolean call() throws Exception {
1018           return fs.listStatus(storePath).length == 1;
1019         }
1020       }, 5000);
1021 
1022     } finally {
1023       util.shutdownMiniMapReduceCluster();
1024       util.shutdownMiniCluster();
1025     }
1026   }
1027 
1028   private void quickPoll(Callable<Boolean> c, int waitMs) throws Exception {
1029     int sleepMs = 10;
1030     int retries = (int) Math.ceil(((double) waitMs) / sleepMs);
1031     while (retries-- > 0) {
1032       if (c.call().booleanValue()) {
1033         return;
1034       }
1035       Thread.sleep(sleepMs);
1036     }
1037     fail();
1038   }
1039 
1040   public static void main(String args[]) throws Exception {
1041     new TestHFileOutputFormat2().manualTest(args);
1042   }
1043 
1044   public void manualTest(String args[]) throws Exception {
1045     Configuration conf = HBaseConfiguration.create();
1046     util = new HBaseTestingUtility(conf);
1047     if ("newtable".equals(args[0])) {
1048       byte[] tname = args[1].getBytes();
1049       HTable table = util.createTable(tname, FAMILIES);
1050       HBaseAdmin admin = new HBaseAdmin(conf);
1051       admin.disableTable(tname);
1052       byte[][] startKeys = generateRandomStartKeys(5);
1053       util.createMultiRegions(conf, table, FAMILIES[0], startKeys);
1054       admin.enableTable(tname);
1055     } else if ("incremental".equals(args[0])) {
1056       byte[] tname = args[1].getBytes();
1057       HTable table = new HTable(conf, tname);
1058       Path outDir = new Path("incremental-out");
1059       runIncrementalPELoad(conf, table, outDir);
1060     } else {
1061       throw new RuntimeException(
1062           "usage: TestHFileOutputFormat2 newtable | incremental");
1063     }
1064   }
1065 
1066 }
1067