1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import java.io.ByteArrayInputStream;
22 import java.io.DataInputStream;
23 import java.io.EOFException;
24 import java.io.FileNotFoundException;
25 import java.io.IOException;
26 import java.io.InputStream;
27 import java.io.InterruptedIOException;
28 import java.lang.reflect.InvocationTargetException;
29 import java.lang.reflect.Method;
30 import java.net.InetSocketAddress;
31 import java.net.URI;
32 import java.net.URISyntaxException;
33 import java.util.ArrayList;
34 import java.util.Collections;
35 import java.util.HashMap;
36 import java.util.LinkedList;
37 import java.util.List;
38 import java.util.Map;
39 import java.util.concurrent.ArrayBlockingQueue;
40 import java.util.concurrent.ConcurrentHashMap;
41 import java.util.concurrent.ThreadPoolExecutor;
42 import java.util.concurrent.TimeUnit;
43 import java.util.regex.Pattern;
44
45 import org.apache.commons.logging.Log;
46 import org.apache.commons.logging.LogFactory;
47 import org.apache.hadoop.hbase.classification.InterfaceAudience;
48 import org.apache.hadoop.conf.Configuration;
49 import org.apache.hadoop.fs.BlockLocation;
50 import org.apache.hadoop.fs.FSDataInputStream;
51 import org.apache.hadoop.fs.FSDataOutputStream;
52 import org.apache.hadoop.fs.FileStatus;
53 import org.apache.hadoop.fs.FileSystem;
54 import org.apache.hadoop.fs.Path;
55 import org.apache.hadoop.fs.PathFilter;
56 import org.apache.hadoop.fs.permission.FsAction;
57 import org.apache.hadoop.fs.permission.FsPermission;
58 import org.apache.hadoop.hbase.ClusterId;
59 import org.apache.hadoop.hbase.HColumnDescriptor;
60 import org.apache.hadoop.hbase.HConstants;
61 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
62 import org.apache.hadoop.hbase.HRegionInfo;
63 import org.apache.hadoop.hbase.RemoteExceptionHandler;
64 import org.apache.hadoop.hbase.TableName;
65 import org.apache.hadoop.hbase.exceptions.DeserializationException;
66 import org.apache.hadoop.hbase.fs.HFileSystem;
67 import org.apache.hadoop.hbase.master.HMaster;
68 import org.apache.hadoop.hbase.master.RegionPlacementMaintainer;
69 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
70 import org.apache.hadoop.hbase.security.AccessDeniedException;
71 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
72 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
73 import org.apache.hadoop.hbase.regionserver.HRegion;
74 import org.apache.hadoop.hdfs.DistributedFileSystem;
75 import org.apache.hadoop.hdfs.protocol.FSConstants;
76 import org.apache.hadoop.io.IOUtils;
77 import org.apache.hadoop.io.SequenceFile;
78 import org.apache.hadoop.security.UserGroupInformation;
79 import org.apache.hadoop.util.Progressable;
80 import org.apache.hadoop.util.ReflectionUtils;
81 import org.apache.hadoop.util.StringUtils;
82
83 import com.google.common.primitives.Ints;
84 import com.google.protobuf.InvalidProtocolBufferException;
85
86
87
88
89 @InterfaceAudience.Private
90 public abstract class FSUtils {
91 private static final Log LOG = LogFactory.getLog(FSUtils.class);
92
93
94 public static final String FULL_RWX_PERMISSIONS = "777";
95 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
96 private static final int DEFAULT_THREAD_POOLSIZE = 2;
97
98
99 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
100
101 protected FSUtils() {
102 super();
103 }
104
105
106
107
108
109
110
111
112 public static boolean isStartingWithPath(final Path rootPath, final String path) {
113 String uriRootPath = rootPath.toUri().getPath();
114 String tailUriPath = (new Path(path)).toUri().getPath();
115 return tailUriPath.startsWith(uriRootPath);
116 }
117
118
119
120
121
122
123
124
125
126 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
127 return isMatchingTail(pathToSearch, new Path(pathTail));
128 }
129
130
131
132
133
134
135
136
137
138 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
139 if (pathToSearch.depth() != pathTail.depth()) return false;
140 Path tailPath = pathTail;
141 String tailName;
142 Path toSearch = pathToSearch;
143 String toSearchName;
144 boolean result = false;
145 do {
146 tailName = tailPath.getName();
147 if (tailName == null || tailName.length() <= 0) {
148 result = true;
149 break;
150 }
151 toSearchName = toSearch.getName();
152 if (toSearchName == null || toSearchName.length() <= 0) break;
153
154 tailPath = tailPath.getParent();
155 toSearch = toSearch.getParent();
156 } while(tailName.equals(toSearchName));
157 return result;
158 }
159
160 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
161 String scheme = fs.getUri().getScheme();
162 if (scheme == null) {
163 LOG.warn("Could not find scheme for uri " +
164 fs.getUri() + ", default to hdfs");
165 scheme = "hdfs";
166 }
167 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
168 scheme + ".impl", FSHDFSUtils.class);
169 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
170 return fsUtils;
171 }
172
173
174
175
176
177
178
179
180 public static boolean deleteDirectory(final FileSystem fs, final Path dir)
181 throws IOException {
182 return fs.exists(dir) && fs.delete(dir, true);
183 }
184
185
186
187
188
189
190
191
192 public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
193 throws IOException {
194 Path rootDir = getRootDir(conf);
195 FileSystem fs = rootDir.getFileSystem(conf);
196 return deleteDirectory(fs,
197 new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
198 }
199
200
201
202
203
204
205
206
207
208
209
210
211 public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
212 Method m = null;
213 Class<? extends FileSystem> cls = fs.getClass();
214 try {
215 m = cls.getMethod("getDefaultBlockSize", new Class<?>[] { Path.class });
216 } catch (NoSuchMethodException e) {
217 LOG.info("FileSystem doesn't support getDefaultBlockSize");
218 } catch (SecurityException e) {
219 LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
220 m = null;
221 }
222 if (m == null) {
223 return fs.getDefaultBlockSize();
224 } else {
225 try {
226 Object ret = m.invoke(fs, path);
227 return ((Long)ret).longValue();
228 } catch (Exception e) {
229 throw new IOException(e);
230 }
231 }
232 }
233
234
235
236
237
238
239
240
241
242
243
244
245 public static short getDefaultReplication(final FileSystem fs, final Path path) throws IOException {
246 Method m = null;
247 Class<? extends FileSystem> cls = fs.getClass();
248 try {
249 m = cls.getMethod("getDefaultReplication", new Class<?>[] { Path.class });
250 } catch (NoSuchMethodException e) {
251 LOG.info("FileSystem doesn't support getDefaultReplication");
252 } catch (SecurityException e) {
253 LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
254 m = null;
255 }
256 if (m == null) {
257 return fs.getDefaultReplication();
258 } else {
259 try {
260 Object ret = m.invoke(fs, path);
261 return ((Number)ret).shortValue();
262 } catch (Exception e) {
263 throw new IOException(e);
264 }
265 }
266 }
267
268
269
270
271
272
273
274
275
276
277
278 public static int getDefaultBufferSize(final FileSystem fs) {
279 return fs.getConf().getInt("io.file.buffer.size", 4096);
280 }
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300 public static FSDataOutputStream create(FileSystem fs, Path path,
301 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
302 if (fs instanceof HFileSystem) {
303 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
304 if (backingFs instanceof DistributedFileSystem) {
305
306
307 try {
308 return (FSDataOutputStream) (DistributedFileSystem.class
309 .getDeclaredMethod("create", Path.class, FsPermission.class,
310 boolean.class, int.class, short.class, long.class,
311 Progressable.class, InetSocketAddress[].class)
312 .invoke(backingFs, path, perm, true,
313 getDefaultBufferSize(backingFs),
314 getDefaultReplication(backingFs, path),
315 getDefaultBlockSize(backingFs, path),
316 null, favoredNodes));
317 } catch (InvocationTargetException ite) {
318
319 throw new IOException(ite.getCause());
320 } catch (NoSuchMethodException e) {
321 LOG.debug("DFS Client does not support most favored nodes create; using default create");
322 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
323 } catch (IllegalArgumentException e) {
324 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
325 } catch (SecurityException e) {
326 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
327 } catch (IllegalAccessException e) {
328 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
329 }
330 }
331 }
332 return create(fs, path, perm, true);
333 }
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352 public static FSDataOutputStream create(FileSystem fs, Path path,
353 FsPermission perm, boolean overwrite) throws IOException {
354 if (LOG.isTraceEnabled()) {
355 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
356 }
357 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
358 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
359 }
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 public static FsPermission getFilePermissions(final FileSystem fs,
375 final Configuration conf, final String permssionConfKey) {
376 boolean enablePermissions = conf.getBoolean(
377 HConstants.ENABLE_DATA_FILE_UMASK, false);
378
379 if (enablePermissions) {
380 try {
381 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
382
383 String mask = conf.get(permssionConfKey);
384 if (mask == null)
385 return getFileDefault();
386
387 FsPermission umask = new FsPermission(mask);
388 return perm.applyUMask(umask);
389 } catch (IllegalArgumentException e) {
390 LOG.warn(
391 "Incorrect umask attempted to be created: "
392 + conf.get(permssionConfKey)
393 + ", using default file permissions.", e);
394 return getFileDefault();
395 }
396 }
397 return getFileDefault();
398 }
399
400
401
402
403
404
405
406
407
408
409 public static FsPermission getFileDefault() {
410 return new FsPermission((short)00666);
411 }
412
413
414
415
416
417
418
419 public static void checkFileSystemAvailable(final FileSystem fs)
420 throws IOException {
421 if (!(fs instanceof DistributedFileSystem)) {
422 return;
423 }
424 IOException exception = null;
425 DistributedFileSystem dfs = (DistributedFileSystem) fs;
426 try {
427 if (dfs.exists(new Path("/"))) {
428 return;
429 }
430 } catch (IOException e) {
431 exception = RemoteExceptionHandler.checkIOException(e);
432 }
433 try {
434 fs.close();
435 } catch (Exception e) {
436 LOG.error("file system close failed: ", e);
437 }
438 IOException io = new IOException("File system is not available");
439 io.initCause(exception);
440 throw io;
441 }
442
443
444
445
446
447
448
449
450
451 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
452 boolean inSafeMode = false;
453 try {
454 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
455 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.class, boolean.class});
456 inSafeMode = (Boolean) m.invoke(dfs,
457 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET, true);
458 } catch (Exception e) {
459 if (e instanceof IOException) throw (IOException) e;
460
461
462 inSafeMode = dfs.setSafeMode(
463 org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET);
464 }
465 return inSafeMode;
466 }
467
468
469
470
471
472
473 public static void checkDfsSafeMode(final Configuration conf)
474 throws IOException {
475 boolean isInSafeMode = false;
476 FileSystem fs = FileSystem.get(conf);
477 if (fs instanceof DistributedFileSystem) {
478 DistributedFileSystem dfs = (DistributedFileSystem)fs;
479 isInSafeMode = isInSafeMode(dfs);
480 }
481 if (isInSafeMode) {
482 throw new IOException("File system is in safemode, it can't be written now");
483 }
484 }
485
486
487
488
489
490
491
492
493
494
495 public static String getVersion(FileSystem fs, Path rootdir)
496 throws IOException, DeserializationException {
497 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
498 FileStatus[] status = null;
499 try {
500
501
502 status = fs.listStatus(versionFile);
503 } catch (FileNotFoundException fnfe) {
504 return null;
505 }
506 if (status == null || status.length == 0) return null;
507 String version = null;
508 byte [] content = new byte [(int)status[0].getLen()];
509 FSDataInputStream s = fs.open(versionFile);
510 try {
511 IOUtils.readFully(s, content, 0, content.length);
512 if (ProtobufUtil.isPBMagicPrefix(content)) {
513 version = parseVersionFrom(content);
514 } else {
515
516 InputStream is = new ByteArrayInputStream(content);
517 DataInputStream dis = new DataInputStream(is);
518 try {
519 version = dis.readUTF();
520 } finally {
521 dis.close();
522 }
523 }
524 } catch (EOFException eof) {
525 LOG.warn("Version file was empty, odd, will try to set it.");
526 } finally {
527 s.close();
528 }
529 return version;
530 }
531
532
533
534
535
536
537
538 static String parseVersionFrom(final byte [] bytes)
539 throws DeserializationException {
540 ProtobufUtil.expectPBMagicPrefix(bytes);
541 int pblen = ProtobufUtil.lengthOfPBMagic();
542 FSProtos.HBaseVersionFileContent.Builder builder =
543 FSProtos.HBaseVersionFileContent.newBuilder();
544 FSProtos.HBaseVersionFileContent fileContent;
545 try {
546 fileContent = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
547 return fileContent.getVersion();
548 } catch (InvalidProtocolBufferException e) {
549
550 throw new DeserializationException(e);
551 }
552 }
553
554
555
556
557
558
559 static byte [] toVersionByteArray(final String version) {
560 FSProtos.HBaseVersionFileContent.Builder builder =
561 FSProtos.HBaseVersionFileContent.newBuilder();
562 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
563 }
564
565
566
567
568
569
570
571
572
573
574
575 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
576 throws IOException, DeserializationException {
577 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
578 }
579
580
581
582
583
584
585
586
587
588
589
590
591
592 public static void checkVersion(FileSystem fs, Path rootdir,
593 boolean message, int wait, int retries)
594 throws IOException, DeserializationException {
595 String version = getVersion(fs, rootdir);
596 if (version == null) {
597 if (!metaRegionExists(fs, rootdir)) {
598
599
600 setVersion(fs, rootdir, wait, retries);
601 return;
602 }
603 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
604
605
606
607 String msg = "HBase file layout needs to be upgraded."
608 + " You have version " + version
609 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
610 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
611 + " Is your hbase.rootdir valid? If so, you may need to run "
612 + "'hbase hbck -fixVersionFile'.";
613 if (message) {
614 System.out.println("WARNING! " + msg);
615 }
616 throw new FileSystemVersionException(msg);
617 }
618
619
620
621
622
623
624
625
626 public static void setVersion(FileSystem fs, Path rootdir)
627 throws IOException {
628 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
629 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
630 }
631
632
633
634
635
636
637
638
639
640
641 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
642 throws IOException {
643 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
644 }
645
646
647
648
649
650
651
652
653
654
655
656
657 public static void setVersion(FileSystem fs, Path rootdir, String version,
658 int wait, int retries) throws IOException {
659 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
660 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
661 HConstants.VERSION_FILE_NAME);
662 while (true) {
663 try {
664
665 FSDataOutputStream s = fs.create(tempVersionFile);
666 try {
667 s.write(toVersionByteArray(version));
668 s.close();
669 s = null;
670
671
672 if (!fs.rename(tempVersionFile, versionFile)) {
673 throw new IOException("Unable to move temp version file to " + versionFile);
674 }
675 } finally {
676
677
678
679
680
681 try {
682 if (s != null) s.close();
683 } catch (IOException ignore) { }
684 }
685 LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
686 return;
687 } catch (IOException e) {
688 if (retries > 0) {
689 LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
690 fs.delete(versionFile, false);
691 try {
692 if (wait > 0) {
693 Thread.sleep(wait);
694 }
695 } catch (InterruptedException ex) {
696
697 }
698 retries--;
699 } else {
700 throw e;
701 }
702 }
703 }
704 }
705
706
707
708
709
710
711
712
713
714 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
715 int wait) throws IOException {
716 while (true) {
717 try {
718 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
719 return fs.exists(filePath);
720 } catch (IOException ioe) {
721 if (wait > 0) {
722 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
723 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
724 try {
725 Thread.sleep(wait);
726 } catch (InterruptedException ie) {
727 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
728 }
729 } else {
730 throw ioe;
731 }
732 }
733 }
734 }
735
736
737
738
739
740
741
742
743 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
744 throws IOException {
745 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
746 ClusterId clusterId = null;
747 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
748 if (status != null) {
749 int len = Ints.checkedCast(status.getLen());
750 byte [] content = new byte[len];
751 FSDataInputStream in = fs.open(idPath);
752 try {
753 in.readFully(content);
754 } catch (EOFException eof) {
755 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
756 } finally{
757 in.close();
758 }
759 try {
760 clusterId = ClusterId.parseFrom(content);
761 } catch (DeserializationException e) {
762 throw new IOException("content=" + Bytes.toString(content), e);
763 }
764
765 if (!ProtobufUtil.isPBMagicPrefix(content)) {
766 String cid = null;
767 in = fs.open(idPath);
768 try {
769 cid = in.readUTF();
770 clusterId = new ClusterId(cid);
771 } catch (EOFException eof) {
772 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
773 } finally {
774 in.close();
775 }
776 rewriteAsPb(fs, rootdir, idPath, clusterId);
777 }
778 return clusterId;
779 } else {
780 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
781 }
782 return clusterId;
783 }
784
785
786
787
788
789 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
790 final ClusterId cid)
791 throws IOException {
792
793
794 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
795 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
796 setClusterId(fs, rootdir, cid, 100);
797 if (!fs.delete(movedAsideName, false)) {
798 throw new IOException("Failed delete of " + movedAsideName);
799 }
800 LOG.debug("Rewrote the hbase.id file as pb");
801 }
802
803
804
805
806
807
808
809
810
811
812 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
813 int wait) throws IOException {
814 while (true) {
815 try {
816 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
817 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
818 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
819
820 FSDataOutputStream s = fs.create(tempIdFile);
821 try {
822 s.write(clusterId.toByteArray());
823 s.close();
824 s = null;
825
826
827 if (!fs.rename(tempIdFile, idFile)) {
828 throw new IOException("Unable to move temp version file to " + idFile);
829 }
830 } finally {
831
832 try {
833 if (s != null) s.close();
834 } catch (IOException ignore) { }
835 }
836 if (LOG.isDebugEnabled()) {
837 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
838 }
839 return;
840 } catch (IOException ioe) {
841 if (wait > 0) {
842 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
843 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
844 try {
845 Thread.sleep(wait);
846 } catch (InterruptedException ie) {
847 Thread.currentThread().interrupt();
848 break;
849 }
850 } else {
851 throw ioe;
852 }
853 }
854 }
855 }
856
857
858
859
860
861
862
863
864 public static Path validateRootPath(Path root) throws IOException {
865 try {
866 URI rootURI = new URI(root.toString());
867 String scheme = rootURI.getScheme();
868 if (scheme == null) {
869 throw new IOException("Root directory does not have a scheme");
870 }
871 return root;
872 } catch (URISyntaxException e) {
873 IOException io = new IOException("Root directory path is not a valid " +
874 "URI -- check your " + HConstants.HBASE_DIR + " configuration");
875 io.initCause(e);
876 throw io;
877 }
878 }
879
880
881
882
883
884
885
886
887
888 public static String removeRootPath(Path path, final Configuration conf) throws IOException {
889 Path root = FSUtils.getRootDir(conf);
890 String pathStr = path.toString();
891
892 if (!pathStr.startsWith(root.toString())) return pathStr;
893
894 return pathStr.substring(root.toString().length() + 1);
895 }
896
897
898
899
900
901
902
903 public static void waitOnSafeMode(final Configuration conf,
904 final long wait)
905 throws IOException {
906 FileSystem fs = FileSystem.get(conf);
907 if (!(fs instanceof DistributedFileSystem)) return;
908 DistributedFileSystem dfs = (DistributedFileSystem)fs;
909
910 while (isInSafeMode(dfs)) {
911 LOG.info("Waiting for dfs to exit safe mode...");
912 try {
913 Thread.sleep(wait);
914 } catch (InterruptedException e) {
915
916 }
917 }
918 }
919
920
921
922
923
924
925
926
927
928
929
930 public static String getPath(Path p) {
931 return p.toUri().getPath();
932 }
933
934
935
936
937
938
939
940 public static Path getRootDir(final Configuration c) throws IOException {
941 Path p = new Path(c.get(HConstants.HBASE_DIR));
942 FileSystem fs = p.getFileSystem(c);
943 return p.makeQualified(fs);
944 }
945
946 public static void setRootDir(final Configuration c, final Path root) throws IOException {
947 c.set(HConstants.HBASE_DIR, root.toString());
948 }
949
950 public static void setFsDefault(final Configuration c, final Path root) throws IOException {
951 c.set("fs.defaultFS", root.toString());
952 c.set("fs.default.name", root.toString());
953 }
954
955
956
957
958
959
960
961
962
963 @SuppressWarnings("deprecation")
964 public static boolean metaRegionExists(FileSystem fs, Path rootdir)
965 throws IOException {
966 Path metaRegionDir =
967 HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO);
968 return fs.exists(metaRegionDir);
969 }
970
971
972
973
974
975
976
977
978
979 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
980 final FileSystem fs, FileStatus status, long start, long length)
981 throws IOException {
982 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
983 BlockLocation [] blockLocations =
984 fs.getFileBlockLocations(status, start, length);
985 for(BlockLocation bl : blockLocations) {
986 String [] hosts = bl.getHosts();
987 long len = bl.getLength();
988 blocksDistribution.addHostsAndBlockWeight(hosts, len);
989 }
990
991 return blocksDistribution;
992 }
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005 public static boolean isMajorCompacted(final FileSystem fs,
1006 final Path hbaseRootDir)
1007 throws IOException {
1008 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1009 PathFilter regionFilter = new RegionDirFilter(fs);
1010 PathFilter familyFilter = new FamilyDirFilter(fs);
1011 for (Path d : tableDirs) {
1012 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1013 for (FileStatus regionDir : regionDirs) {
1014 Path dd = regionDir.getPath();
1015
1016 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1017 for (FileStatus familyDir : familyDirs) {
1018 Path family = familyDir.getPath();
1019
1020 FileStatus[] familyStatus = fs.listStatus(family);
1021 if (familyStatus.length > 1) {
1022 LOG.debug(family.toString() + " has " + familyStatus.length +
1023 " files.");
1024 return false;
1025 }
1026 }
1027 }
1028 }
1029 return true;
1030 }
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 public static int getTotalTableFragmentation(final HMaster master)
1042 throws IOException {
1043 Map<String, Integer> map = getTableFragmentation(master);
1044 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
1045 }
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 public static Map<String, Integer> getTableFragmentation(
1058 final HMaster master)
1059 throws IOException {
1060 Path path = getRootDir(master.getConfiguration());
1061
1062 FileSystem fs = path.getFileSystem(master.getConfiguration());
1063 return getTableFragmentation(fs, path);
1064 }
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076 public static Map<String, Integer> getTableFragmentation(
1077 final FileSystem fs, final Path hbaseRootDir)
1078 throws IOException {
1079 Map<String, Integer> frags = new HashMap<String, Integer>();
1080 int cfCountTotal = 0;
1081 int cfFragTotal = 0;
1082 PathFilter regionFilter = new RegionDirFilter(fs);
1083 PathFilter familyFilter = new FamilyDirFilter(fs);
1084 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
1085 for (Path d : tableDirs) {
1086 int cfCount = 0;
1087 int cfFrag = 0;
1088 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
1089 for (FileStatus regionDir : regionDirs) {
1090 Path dd = regionDir.getPath();
1091
1092 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1093 for (FileStatus familyDir : familyDirs) {
1094 cfCount++;
1095 cfCountTotal++;
1096 Path family = familyDir.getPath();
1097
1098 FileStatus[] familyStatus = fs.listStatus(family);
1099 if (familyStatus.length > 1) {
1100 cfFrag++;
1101 cfFragTotal++;
1102 }
1103 }
1104 }
1105
1106 frags.put(FSUtils.getTableName(d).getNameAsString(),
1107 Math.round((float) cfFrag / cfCount * 100));
1108 }
1109
1110 frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100));
1111 return frags;
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122 public static Path getTableDir(Path rootdir, final TableName tableName) {
1123 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
1124 tableName.getQualifierAsString());
1125 }
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 public static TableName getTableName(Path tablePath) {
1136 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 public static Path getNamespaceDir(Path rootdir, final String namespace) {
1148 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
1149 new Path(namespace)));
1150 }
1151
1152
1153
1154
1155 static class FileFilter implements PathFilter {
1156 private final FileSystem fs;
1157
1158 public FileFilter(final FileSystem fs) {
1159 this.fs = fs;
1160 }
1161
1162 @Override
1163 public boolean accept(Path p) {
1164 try {
1165 return fs.isFile(p);
1166 } catch (IOException e) {
1167 LOG.debug("unable to verify if path=" + p + " is a regular file", e);
1168 return false;
1169 }
1170 }
1171 }
1172
1173
1174
1175
1176 public static class BlackListDirFilter implements PathFilter {
1177 private final FileSystem fs;
1178 private List<String> blacklist;
1179
1180
1181
1182
1183
1184
1185
1186 @SuppressWarnings("unchecked")
1187 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
1188 this.fs = fs;
1189 blacklist =
1190 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
1191 : directoryNameBlackList);
1192 }
1193
1194 @Override
1195 public boolean accept(Path p) {
1196 boolean isValid = false;
1197 try {
1198 if (isValidName(p.getName())) {
1199 isValid = fs.getFileStatus(p).isDir();
1200 } else {
1201 isValid = false;
1202 }
1203 } catch (IOException e) {
1204 LOG.warn("An error occurred while verifying if [" + p.toString()
1205 + "] is a valid directory. Returning 'not valid' and continuing.", e);
1206 }
1207 return isValid;
1208 }
1209
1210 protected boolean isValidName(final String name) {
1211 return !blacklist.contains(name);
1212 }
1213 }
1214
1215
1216
1217
1218 public static class DirFilter extends BlackListDirFilter {
1219
1220 public DirFilter(FileSystem fs) {
1221 super(fs, null);
1222 }
1223 }
1224
1225
1226
1227
1228
1229 public static class UserTableDirFilter extends BlackListDirFilter {
1230 public UserTableDirFilter(FileSystem fs) {
1231 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
1232 }
1233
1234 protected boolean isValidName(final String name) {
1235 if (!super.isValidName(name))
1236 return false;
1237
1238 try {
1239 TableName.isLegalTableQualifierName(Bytes.toBytes(name));
1240 } catch (IllegalArgumentException e) {
1241 LOG.info("INVALID NAME " + name);
1242 return false;
1243 }
1244 return true;
1245 }
1246 }
1247
1248
1249
1250
1251
1252
1253
1254
1255 public static boolean isAppendSupported(final Configuration conf) {
1256 boolean append = conf.getBoolean("dfs.support.append", false);
1257 if (append) {
1258 try {
1259
1260
1261
1262 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1263 append = true;
1264 } catch (SecurityException e) {
1265 } catch (NoSuchMethodException e) {
1266 append = false;
1267 }
1268 }
1269 if (!append) {
1270
1271 try {
1272 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1273 append = true;
1274 } catch (NoSuchMethodException e) {
1275 append = false;
1276 }
1277 }
1278 return append;
1279 }
1280
1281
1282
1283
1284
1285
1286 public static boolean isHDFS(final Configuration conf) throws IOException {
1287 FileSystem fs = FileSystem.get(conf);
1288 String scheme = fs.getUri().getScheme();
1289 return scheme.equalsIgnoreCase("hdfs");
1290 }
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1301 Configuration conf, CancelableProgressable reporter) throws IOException;
1302
1303 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1304 throws IOException {
1305 List<Path> tableDirs = new LinkedList<Path>();
1306
1307 for(FileStatus status :
1308 fs.globStatus(new Path(rootdir,
1309 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1310 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1311 }
1312 return tableDirs;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321
1322 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1323 throws IOException {
1324
1325 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1326 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1327 for (FileStatus dir: dirs) {
1328 tabledirs.add(dir.getPath());
1329 }
1330 return tabledirs;
1331 }
1332
1333
1334
1335
1336
1337
1338 public static boolean isRecoveredEdits(Path path) {
1339 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
1340 }
1341
1342
1343
1344
1345 public static class RegionDirFilter implements PathFilter {
1346
1347 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1348 final FileSystem fs;
1349
1350 public RegionDirFilter(FileSystem fs) {
1351 this.fs = fs;
1352 }
1353
1354 @Override
1355 public boolean accept(Path rd) {
1356 if (!regionDirPattern.matcher(rd.getName()).matches()) {
1357 return false;
1358 }
1359
1360 try {
1361 return fs.getFileStatus(rd).isDir();
1362 } catch (IOException ioe) {
1363
1364 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1365 return false;
1366 }
1367 }
1368 }
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1379
1380 FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs));
1381 List<Path> regionDirs = new ArrayList<Path>(rds.length);
1382 for (FileStatus rdfs: rds) {
1383 Path rdPath = rdfs.getPath();
1384 regionDirs.add(rdPath);
1385 }
1386 return regionDirs;
1387 }
1388
1389
1390
1391
1392
1393 public static class FamilyDirFilter implements PathFilter {
1394 final FileSystem fs;
1395
1396 public FamilyDirFilter(FileSystem fs) {
1397 this.fs = fs;
1398 }
1399
1400 @Override
1401 public boolean accept(Path rd) {
1402 try {
1403
1404 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(rd.getName()));
1405 } catch (IllegalArgumentException iae) {
1406
1407 return false;
1408 }
1409
1410 try {
1411 return fs.getFileStatus(rd).isDir();
1412 } catch (IOException ioe) {
1413
1414 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1415 return false;
1416 }
1417 }
1418 }
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1429
1430 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1431 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1432 for (FileStatus fdfs: fds) {
1433 Path fdPath = fdfs.getPath();
1434 familyDirs.add(fdPath);
1435 }
1436 return familyDirs;
1437 }
1438
1439 public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1440 FileStatus[] fds = fs.listStatus(familyDir, new ReferenceFileFilter(fs));
1441 List<Path> referenceFiles = new ArrayList<Path>(fds.length);
1442 for (FileStatus fdfs: fds) {
1443 Path fdPath = fdfs.getPath();
1444 referenceFiles.add(fdPath);
1445 }
1446 return referenceFiles;
1447 }
1448
1449
1450
1451
1452 public static class HFileFilter implements PathFilter {
1453 final FileSystem fs;
1454
1455 public HFileFilter(FileSystem fs) {
1456 this.fs = fs;
1457 }
1458
1459 @Override
1460 public boolean accept(Path rd) {
1461 try {
1462
1463 return !fs.getFileStatus(rd).isDir() && StoreFileInfo.isHFile(rd);
1464 } catch (IOException ioe) {
1465
1466 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1467 return false;
1468 }
1469 }
1470 }
1471
1472 public static class ReferenceFileFilter implements PathFilter {
1473
1474 private final FileSystem fs;
1475
1476 public ReferenceFileFilter(FileSystem fs) {
1477 this.fs = fs;
1478 }
1479
1480 @Override
1481 public boolean accept(Path rd) {
1482 try {
1483
1484 return !fs.getFileStatus(rd).isDir() && StoreFileInfo.isReference(rd);
1485 } catch (IOException ioe) {
1486
1487 LOG.warn("Skipping file " + rd +" due to IOException", ioe);
1488 return false;
1489 }
1490 }
1491 }
1492
1493
1494
1495
1496
1497
1498
1499 public static FileSystem getCurrentFileSystem(Configuration conf)
1500 throws IOException {
1501 return getRootDir(conf).getFileSystem(conf);
1502 }
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1521 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1522 throws IOException {
1523 if (map == null) {
1524 map = new HashMap<String, Path>();
1525 }
1526
1527
1528 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1529
1530
1531 PathFilter familyFilter = new FamilyDirFilter(fs);
1532 FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs));
1533 for (FileStatus regionDir : regionDirs) {
1534 Path dd = regionDir.getPath();
1535
1536 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
1537 for (FileStatus familyDir : familyDirs) {
1538 Path family = familyDir.getPath();
1539
1540
1541 FileStatus[] familyStatus = fs.listStatus(family);
1542 for (FileStatus sfStatus : familyStatus) {
1543 Path sf = sfStatus.getPath();
1544 map.put( sf.getName(), sf);
1545 }
1546 }
1547 }
1548 return map;
1549 }
1550
1551 public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1552 int result = 0;
1553 try {
1554 for (Path familyDir:getFamilyDirs(fs, p)){
1555 result += getReferenceFilePaths(fs, familyDir).size();
1556 }
1557 } catch (IOException e) {
1558 LOG.warn("Error Counting reference files.", e);
1559 }
1560 return result;
1561 }
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577 public static Map<String, Path> getTableStoreFilePathMap(
1578 final FileSystem fs, final Path hbaseRootDir)
1579 throws IOException {
1580 Map<String, Path> map = new HashMap<String, Path>();
1581
1582
1583
1584
1585
1586 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1587 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1588 FSUtils.getTableName(tableDir));
1589 }
1590 return map;
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 public static FileStatus [] listStatus(final FileSystem fs,
1605 final Path dir, final PathFilter filter) throws IOException {
1606 FileStatus [] status = null;
1607 try {
1608 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
1609 } catch (FileNotFoundException fnfe) {
1610
1611 if (LOG.isTraceEnabled()) {
1612 LOG.trace(dir + " doesn't exist");
1613 }
1614 }
1615 if (status == null || status.length < 1) return null;
1616 return status;
1617 }
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
1628 return listStatus(fs, dir, null);
1629 }
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
1641 throws IOException {
1642 return fs.delete(path, recursive);
1643 }
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
1654 return fs.exists(path);
1655 }
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1668 FsAction action) throws AccessDeniedException {
1669 if (ugi.getShortUserName().equals(file.getOwner())) {
1670 if (file.getPermission().getUserAction().implies(action)) {
1671 return;
1672 }
1673 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1674 if (file.getPermission().getGroupAction().implies(action)) {
1675 return;
1676 }
1677 } else if (file.getPermission().getOtherAction().implies(action)) {
1678 return;
1679 }
1680 throw new AccessDeniedException("Permission denied:" + " action=" + action
1681 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1682 }
1683
1684 private static boolean contains(String[] groups, String user) {
1685 for (String group : groups) {
1686 if (group.equals(user)) {
1687 return true;
1688 }
1689 }
1690 return false;
1691 }
1692
1693
1694
1695
1696
1697
1698
1699
1700 public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
1701 throws IOException {
1702 LOG.debug("Current file system:");
1703 logFSTree(LOG, fs, root, "|-");
1704 }
1705
1706
1707
1708
1709
1710
1711 private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
1712 throws IOException {
1713 FileStatus[] files = FSUtils.listStatus(fs, root, null);
1714 if (files == null) return;
1715
1716 for (FileStatus file : files) {
1717 if (file.isDir()) {
1718 LOG.debug(prefix + file.getPath().getName() + "/");
1719 logFSTree(LOG, fs, file.getPath(), prefix + "---");
1720 } else {
1721 LOG.debug(prefix + file.getPath().getName());
1722 }
1723 }
1724 }
1725
1726 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
1727 throws IOException {
1728
1729 fs.setTimes(src, EnvironmentEdgeManager.currentTimeMillis(), -1);
1730 return fs.rename(src, dest);
1731 }
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1747 final Configuration conf) throws IOException {
1748 return getRegionDegreeLocalityMappingFromFS(
1749 conf, null,
1750 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1751
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1771 final Configuration conf, final String desiredTable, int threadPoolSize)
1772 throws IOException {
1773 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1774 new ConcurrentHashMap<String, Map<String, Float>>();
1775 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1776 regionDegreeLocalityMapping);
1777 return regionDegreeLocalityMapping;
1778 }
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800 private static void getRegionLocalityMappingFromFS(
1801 final Configuration conf, final String desiredTable,
1802 int threadPoolSize,
1803 Map<String, String> regionToBestLocalityRSMapping,
1804 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1805 throws IOException {
1806 FileSystem fs = FileSystem.get(conf);
1807 Path rootPath = FSUtils.getRootDir(conf);
1808 long startTime = EnvironmentEdgeManager.currentTimeMillis();
1809 Path queryPath;
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910 public static void setupShortCircuitRead(final Configuration conf) {
1911
1912 boolean shortCircuitSkipChecksum =
1913 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1914 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1915 if (shortCircuitSkipChecksum) {
1916 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1917 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1918 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1919 assert !shortCircuitSkipChecksum;
1920 }
1921 checkShortCircuitReadBufferSize(conf);
1922 }
1923
1924
1925
1926
1927
1928 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1929 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1930 final int notSet = -1;
1931
1932 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1933 int size = conf.getInt(dfsKey, notSet);
1934
1935 if (size != notSet) return;
1936
1937 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1938 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1939 }
1940 }