1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.util;
21
22 import java.io.IOException;
23
24 import org.apache.commons.logging.Log;
25 import org.apache.commons.logging.LogFactory;
26 import org.apache.hadoop.hbase.classification.InterfaceAudience;
27 import org.apache.hadoop.hbase.classification.InterfaceStability;
28 import org.apache.hadoop.conf.Configuration;
29 import org.apache.hadoop.conf.Configured;
30 import org.apache.hadoop.fs.FileSystem;
31 import org.apache.hadoop.fs.Path;
32 import org.apache.hadoop.hbase.TableName;
33 import org.apache.hadoop.hbase.HBaseConfiguration;
34 import org.apache.hadoop.hbase.HConstants;
35 import org.apache.hadoop.hbase.HRegionInfo;
36 import org.apache.hadoop.hbase.HTableDescriptor;
37 import org.apache.hadoop.hbase.MasterNotRunningException;
38 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
39 import org.apache.hadoop.hbase.client.Delete;
40 import org.apache.hadoop.hbase.client.Get;
41 import org.apache.hadoop.hbase.client.HBaseAdmin;
42 import org.apache.hadoop.hbase.client.Result;
43 import org.apache.hadoop.hbase.regionserver.HRegion;
44 import org.apache.hadoop.hbase.regionserver.wal.HLog;
45 import org.apache.hadoop.io.WritableComparator;
46 import org.apache.hadoop.util.GenericOptionsParser;
47 import org.apache.hadoop.util.Tool;
48 import org.apache.hadoop.util.ToolRunner;
49
50 import com.google.common.base.Preconditions;
51
52
53
54
55
56 @InterfaceAudience.Public
57 @InterfaceStability.Evolving
58 public class Merge extends Configured implements Tool {
59 static final Log LOG = LogFactory.getLog(Merge.class);
60 private Path rootdir;
61 private volatile MetaUtils utils;
62 private TableName tableName;
63 private volatile byte [] region1;
64 private volatile byte [] region2;
65 private volatile HRegionInfo mergeInfo;
66
67
68 public Merge() {
69 super();
70 }
71
72
73
74
75 public Merge(Configuration conf) {
76 this.mergeInfo = null;
77 setConf(conf);
78 }
79
80 public int run(String[] args) throws Exception {
81 if (parseArgs(args) != 0) {
82 return -1;
83 }
84
85
86 FileSystem fs = FileSystem.get(getConf());
87 LOG.info("Verifying that file system is available...");
88 try {
89 FSUtils.checkFileSystemAvailable(fs);
90 } catch (IOException e) {
91 LOG.fatal("File system is not available", e);
92 return -1;
93 }
94
95
96 LOG.info("Verifying that HBase is not running...");
97 try {
98 HBaseAdmin.checkHBaseAvailable(getConf());
99 LOG.fatal("HBase cluster must be off-line, and is not. Aborting.");
100 return -1;
101 } catch (ZooKeeperConnectionException zkce) {
102
103 } catch (MasterNotRunningException e) {
104
105 }
106
107
108
109 this.utils = new MetaUtils(getConf());
110 this.rootdir = FSUtils.getRootDir(getConf());
111 try {
112 mergeTwoRegions();
113 return 0;
114 } catch (Exception e) {
115 LOG.fatal("Merge failed", e);
116 return -1;
117
118 } finally {
119 if (this.utils != null) {
120 this.utils.shutdown();
121 }
122 }
123 }
124
125
126 HRegionInfo getMergedHRegionInfo() {
127 return this.mergeInfo;
128 }
129
130
131
132
133 private void mergeTwoRegions() throws IOException {
134 LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " +
135 Bytes.toStringBinary(this.region2) + " in table " + this.tableName);
136 HRegion meta = this.utils.getMetaRegion();
137 Get get = new Get(region1);
138 get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
139 Result result1 = meta.get(get);
140 Preconditions.checkState(!result1.isEmpty(),
141 "First region cells can not be null");
142 HRegionInfo info1 = HRegionInfo.getHRegionInfo(result1);
143 if (info1 == null) {
144 throw new NullPointerException("info1 is null using key " +
145 Bytes.toStringBinary(region1) + " in " + meta);
146 }
147 get = new Get(region2);
148 get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
149 Result result2 = meta.get(get);
150 Preconditions.checkState(!result2.isEmpty(),
151 "Second region cells can not be null");
152 HRegionInfo info2 = HRegionInfo.getHRegionInfo(result2);
153 if (info2 == null) {
154 throw new NullPointerException("info2 is null using key " + meta);
155 }
156 HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
157 this.rootdir, this.tableName);
158 HRegion merged = merge(htd, meta, info1, info2);
159
160 LOG.info("Adding " + merged.getRegionInfo() + " to " +
161 meta.getRegionInfo());
162
163 HRegion.addRegionToMETA(meta, merged);
164 merged.close();
165 }
166
167
168
169
170
171 private HRegion merge(final HTableDescriptor htd, HRegion meta,
172 HRegionInfo info1, HRegionInfo info2)
173 throws IOException {
174 if (info1 == null) {
175 throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
176 Bytes.toStringBinary(meta.getRegionName()));
177 }
178 if (info2 == null) {
179 throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
180 Bytes.toStringBinary(meta.getRegionName()));
181 }
182 HRegion merged = null;
183 HLog log = utils.getLog();
184 HRegion r1 = HRegion.openHRegion(info1, htd, log, getConf());
185 try {
186 HRegion r2 = HRegion.openHRegion(info2, htd, log, getConf());
187 try {
188 merged = HRegion.merge(r1, r2);
189 } finally {
190 if (!r2.isClosed()) {
191 r2.close();
192 }
193 }
194 } finally {
195 if (!r1.isClosed()) {
196 r1.close();
197 }
198 }
199
200
201
202
203 removeRegionFromMeta(meta, info1);
204 removeRegionFromMeta(meta, info2);
205
206 this.mergeInfo = merged.getRegionInfo();
207 return merged;
208 }
209
210
211
212
213
214
215
216
217
218
219 private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
220 throws IOException {
221 if (LOG.isDebugEnabled()) {
222 LOG.debug("Removing region: " + regioninfo + " from " + meta);
223 }
224
225 Delete delete = new Delete(regioninfo.getRegionName(),
226 System.currentTimeMillis());
227 meta.delete(delete);
228 }
229
230
231
232
233
234
235
236
237 private int parseArgs(String[] args) throws IOException {
238 GenericOptionsParser parser =
239 new GenericOptionsParser(getConf(), args);
240
241 String[] remainingArgs = parser.getRemainingArgs();
242 if (remainingArgs.length != 3) {
243 usage();
244 return -1;
245 }
246 tableName = TableName.valueOf(remainingArgs[0]);
247
248 region1 = Bytes.toBytesBinary(remainingArgs[1]);
249 region2 = Bytes.toBytesBinary(remainingArgs[2]);
250 int status = 0;
251 if (notInTable(tableName, region1) || notInTable(tableName, region2)) {
252 status = -1;
253 } else if (Bytes.equals(region1, region2)) {
254 LOG.error("Can't merge a region with itself");
255 status = -1;
256 }
257 return status;
258 }
259
260 private boolean notInTable(final TableName tn, final byte [] rn) {
261 if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length,
262 rn, 0, tn.getName().length) != 0) {
263 LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " +
264 tn);
265 return true;
266 }
267 return false;
268 }
269
270 private void usage() {
271 System.err
272 .println("For hadoop 0.20, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
273 + "[-Dfs.default.name=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
274 System.err
275 .println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
276 + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
277 }
278
279 public static void main(String[] args) {
280 int status;
281 try {
282 status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args);
283 } catch (Exception e) {
284 LOG.error("exiting due to error", e);
285 status = -1;
286 }
287 System.exit(status);
288 }
289 }