diff --git a/CHANGES.txt b/CHANGES.txt index 638d69cf6ef9..e3e75e515095 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -23,8 +23,10 @@ Release 0.20.0 - Unreleased HBASE-1031 Add the Zookeeper jar HBASE-1142 Cleanup thrift server; remove Text and profuse DEBUG messaging (Tim Sell via Stack) + HBASE-1064 HBase REST xml/json improvements (Brian Beggs working of + initial Michael Gottesman work via Stack) -Release 0.19.0 - Unreleased +Release 0.19.0 - 01/21/2009 INCOMPATIBLE CHANGES HBASE-885 TableMap and TableReduce should be interfaces (Doğacan Güney via Stack) diff --git a/NOTICE.txt b/NOTICE.txt index 40a24e9789d7..4fb7d7469353 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -3,8 +3,42 @@ Foundation (http://www.apache.org/). In addition, this product includes software developed by: + European Commission project OneLab (http://www.one-lab.org) + Facebook, Inc. (http://developers.facebook.com/thrift/ -- Page includes the Thrift Software License) + JUnit (http://www.junit.org/) + + +Michael Gottesman developed AgileJSON. Its source code is here: + + http://github.com/gottesmm/agile-json-2.0/tree/master + +It has this license at the head of the each source file: + + * Permission is hereby granted, free of charge, to any person obtaining a + * copy + * of this software and associated documentation files (the "Software"), to + * deal + * in the Software without restriction, including without limitation the + * rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all + * copies or substantial portions of the Software. + * + * The Software shall be used for Good, not Evil. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. diff --git a/lib/AgileJSON-2.0.jar b/lib/AgileJSON-2.0.jar new file mode 100644 index 000000000000..906161a9cf7c Binary files /dev/null and b/lib/AgileJSON-2.0.jar differ diff --git a/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java index a5af539b0267..e0168da8353a 100644 --- a/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -27,10 +27,15 @@ import java.util.Map; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableComparable; +import agilejson.TOJSON; + /** * An HColumnDescriptor contains information about a column family such as the * number of versions, compression settings, etc. @@ -40,7 +45,7 @@ * column and recreating it. If there is data stored in the column, it will be * deleted when the column is deleted. */ -public class HColumnDescriptor implements WritableComparable { +public class HColumnDescriptor implements ISerializable, WritableComparable { // For future backward compatibility // Version 3 was when column names become byte arrays and when we picked up @@ -257,6 +262,7 @@ public HColumnDescriptor(final byte [] familyName, final int maxVersions, /** * @return Name of this column family with colon as required by client API */ + @TOJSON(fieldName = "name", base64=true) public byte [] getNameWithColon() { return HStoreKey.addDelimiter(this.name); } @@ -315,6 +321,7 @@ public void setValue(String key, String value) { } /** @return compression type being used for the column family */ + @TOJSON public CompressionType getCompression() { String value = getValue(COMPRESSION); if (value != null) { @@ -327,6 +334,7 @@ else if (value.equalsIgnoreCase("RECORD")) } /** @return maximum number of versions */ + @TOJSON public int getMaxVersions() { String value = getValue(HConstants.VERSIONS); if (value != null) @@ -344,6 +352,7 @@ public void setMaxVersions(int maxVersions) { /** * @return Compression type setting. */ + @TOJSON public CompressionType getCompressionType() { return getCompression(); } @@ -364,6 +373,7 @@ public void setCompressionType(CompressionType type) { /** * @return True if we are to keep all in use HRegionServer cache. */ + @TOJSON(prefixLength = 2) public boolean isInMemory() { String value = getValue(HConstants.IN_MEMORY); if (value != null) @@ -382,6 +392,7 @@ public void setInMemory(boolean inMemory) { /** * @return Maximum value length. */ + @TOJSON public synchronized int getMaxValueLength() { if (this.maxValueLength == null) { String value = getValue(LENGTH); @@ -402,6 +413,7 @@ public void setMaxValueLength(int maxLength) { /** * @return Time-to-live of cell contents, in seconds. */ + @TOJSON public int getTimeToLive() { String value = getValue(TTL); return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL; @@ -417,6 +429,7 @@ public void setTimeToLive(int timeToLive) { /** * @return True if MapFile blocks should be cached. */ + @TOJSON(prefixLength = 2) public boolean isBlockCacheEnabled() { String value = getValue(BLOCKCACHE); if (value != null) @@ -434,6 +447,7 @@ public void setBlockCacheEnabled(boolean blockCacheEnabled) { /** * @return true if a bloom filter is enabled */ + @TOJSON(prefixLength = 2) public boolean isBloomfilter() { String value = getValue(BLOOMFILTER); if (value != null) @@ -577,4 +591,11 @@ else if (result > 0) } return result; } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) throws HBaseRestException { + serializer.serializeColumnDescriptor(this); + } } diff --git a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java index 17505586d145..fa430a12871e 100644 --- a/src/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/src/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -31,14 +31,19 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableComparable; +import agilejson.TOJSON; + /** * HTableDescriptor contains the name of an HTable, and its * column families. */ -public class HTableDescriptor implements WritableComparable { +public class HTableDescriptor implements WritableComparable, ISerializable { // Changes prior to version 3 were not recorded here. // Version 3 adds metadata as a map where keys and values are byte[]. @@ -383,6 +388,7 @@ public void setReadOnly(final boolean readOnly) { } /** @return name of table */ + @TOJSON public byte [] getName() { return name; } @@ -621,6 +627,11 @@ else if (result > 0) public Collection getFamilies() { return Collections.unmodifiableCollection(this.families.values()); } + + @TOJSON(fieldName = "columns") + public HColumnDescriptor[] getColumnFamilies() { + return getFamilies().toArray(new HColumnDescriptor[0]); + } /** * @param column @@ -667,4 +678,11 @@ public static Path getTableDir(Path rootdir, final byte [] tableName) { new HColumnDescriptor(HConstants.COLUMN_FAMILY_HISTORIAN, HConstants.ALL_VERSIONS, HColumnDescriptor.CompressionType.NONE, false, false, Integer.MAX_VALUE, HConstants.WEEK_IN_SECONDS, false)}); + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) throws HBaseRestException { + serializer.serializeTableDescriptor(this); + } } \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/RegionHistorian.java b/src/java/org/apache/hadoop/hbase/RegionHistorian.java index 90d5a4e729ec..df08ce74d092 100644 --- a/src/java/org/apache/hadoop/hbase/RegionHistorian.java +++ b/src/java/org/apache/hadoop/hbase/RegionHistorian.java @@ -328,4 +328,4 @@ public void offline() { LOG.debug("Offlined"); } } -} +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/io/Cell.java b/src/java/org/apache/hadoop/hbase/io/Cell.java index 76b9b474fc46..4eb831187efc 100644 --- a/src/java/org/apache/hadoop/hbase/io/Cell.java +++ b/src/java/org/apache/hadoop/hbase/io/Cell.java @@ -29,28 +29,36 @@ import java.util.TreeMap; import java.util.Map.Entry; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; +import agilejson.TOJSON; + /** - * Cell - Used to transport a cell value (byte[]) and the timestamp it was + * Cell - Used to transport a cell value (byte[]) and the timestamp it was * stored with together as a result for get and getRow methods. This promotes - * the timestamp of a cell to a first-class value, making it easy to take - * note of temporal data. Cell is used all the way from HStore up to HTable. + * the timestamp of a cell to a first-class value, making it easy to take note + * of temporal data. Cell is used all the way from HStore up to HTable. */ -public class Cell implements Writable, Iterable> { - protected final SortedMap valueMap = - new TreeMap(new Comparator() { - public int compare(Long l1, Long l2) { - return l2.compareTo(l1); - }}); - +public class Cell implements Writable, Iterable>, + ISerializable { + protected final SortedMap valueMap = new TreeMap( + new Comparator() { + public int compare(Long l1, Long l2) { + return l2.compareTo(l1); + } + }); + /** For Writable compatibility */ public Cell() { } /** * Create a new Cell with a given value and timestamp. Used by HStore. + * * @param value * @param timestamp */ @@ -60,24 +68,29 @@ public Cell(String value, long timestamp) { /** * Create a new Cell with a given value and timestamp. Used by HStore. + * * @param value * @param timestamp */ public Cell(byte[] value, long timestamp) { valueMap.put(timestamp, value); } - + /** - * @param vals array of values - * @param ts array of timestamps + * @param vals + * array of values + * @param ts + * array of timestamps */ public Cell(String[] vals, long[] ts) { this(Bytes.toByteArrays(vals), ts); } - + /** - * @param vals array of values - * @param ts array of timestamps + * @param vals + * array of values + * @param ts + * array of timestamps */ public Cell(byte[][] vals, long[] ts) { if (vals.length != ts.length) { @@ -88,42 +101,51 @@ public Cell(byte[][] vals, long[] ts) { valueMap.put(ts[i], vals[i]); } } - + /** @return the current cell's value */ + @TOJSON(base64=true) public byte[] getValue() { return valueMap.get(valueMap.firstKey()); } - + /** @return the current cell's timestamp */ + @TOJSON public long getTimestamp() { return valueMap.firstKey(); } - + /** @return the number of values this cell holds */ public int getNumValues() { return valueMap.size(); } - - /** Add values and timestamps of another cell into this cell - * @param c Cell + + /** + * Add values and timestamps of another cell into this cell + * + * @param c + * Cell */ public void mergeCell(Cell c) { valueMap.putAll(c.valueMap); } - - /** Add a new timestamp and value to this cell - * @param val value - * @param ts timestamp + + /** + * Add a new timestamp and value to this cell + * + * @param val + * value + * @param ts + * timestamp */ public void add(byte[] val, long ts) { valueMap.put(ts, val); } - + @Override public String toString() { if (valueMap.size() == 1) { - return "timestamp=" + getTimestamp() + ", value=" + - Bytes.toString(getValue()); + return "timestamp=" + getTimestamp() + ", value=" + + Bytes.toString(getValue()); } StringBuilder s = new StringBuilder("{ "); int i = 0; @@ -141,7 +163,7 @@ public String toString() { s.append(" }"); return s.toString(); } - + // // Writable // @@ -162,7 +184,7 @@ public void write(final DataOutput out) throws IOException { Bytes.writeByteArray(out, entry.getValue()); } } - + // // Iterable // @@ -170,23 +192,36 @@ public void write(final DataOutput out) throws IOException { public Iterator> iterator() { return new CellIterator(); } - + private class CellIterator implements Iterator> { private Iterator> it; + CellIterator() { it = valueMap.entrySet().iterator(); } - + public boolean hasNext() { return it.hasNext(); } - + public Entry next() { return it.next(); } - + public void remove() throws UnsupportedOperationException { throw new UnsupportedOperationException("remove is not supported"); } } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org + * .apache.hadoop.hbase.rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeCell(this); + } } diff --git a/src/java/org/apache/hadoop/hbase/io/RowResult.java b/src/java/org/apache/hadoop/hbase/io/RowResult.java index bbc83e712656..4f650f79c61f 100644 --- a/src/java/org/apache/hadoop/hbase/io/RowResult.java +++ b/src/java/org/apache/hadoop/hbase/io/RowResult.java @@ -32,14 +32,20 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.rest.descriptors.RestCell; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Writable; +import agilejson.TOJSON; + /** * Holds row name and then a map of columns to cells. */ -public class RowResult implements Writable, SortedMap { +public class RowResult implements Writable, SortedMap, ISerializable { private byte [] row = null; private final HbaseMapWritable cells; @@ -63,6 +69,7 @@ public RowResult (final byte [] row, * Get the row for this RowResult * @return the row */ + @TOJSON(base64=true) public byte [] getRow() { return row; } @@ -124,6 +131,22 @@ public void clear() { public Set> entrySet() { return Collections.unmodifiableSet(this.cells.entrySet()); } + + /** + * This method used solely for the REST serialization + * + * @return + */ + @TOJSON + public RestCell[] getCells() { + RestCell[] restCells = new RestCell[this.cells.size()]; + int i = 0; + for (Map.Entry entry : this.cells.entrySet()) { + restCells[i] = new RestCell(entry.getKey(), entry.getValue()); + i++; + } + return restCells; + } public Collection values() { ArrayList result = new ArrayList(); @@ -235,10 +258,17 @@ public String toString() { return sb.toString(); } + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) throws HBaseRestException { + serializer.serializeRowResult(this); + } + // // Writable // - + public void readFields(final DataInput in) throws IOException { this.row = Bytes.readByteArray(in); this.cells.readFields(in); diff --git a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 966aac44779c..6e48bb5bb770 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -248,4 +248,4 @@ void interruptIfNecessary() { this.interrupt(); } } -} +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/rest/AbstractController.java b/src/java/org/apache/hadoop/hbase/rest/AbstractController.java new file mode 100644 index 000000000000..6bf99e4f8676 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/AbstractController.java @@ -0,0 +1,72 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public abstract class AbstractController implements RESTConstants { + + private Log LOG = LogFactory.getLog(AbstractController.class); + protected Configuration conf; + protected AbstractModel model; + + public void initialize(HBaseConfiguration conf, HBaseAdmin admin) { + this.conf = conf; + this.model = generateModel(conf, admin); + } + + public abstract void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException; + + public abstract void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException; + + public abstract void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException; + + public abstract void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException; + + protected abstract AbstractModel generateModel(HBaseConfiguration conf, + HBaseAdmin a); + + protected byte[][] getColumnsFromQueryMap(Map queryMap) { + byte[][] columns = null; + String[] columnArray = queryMap.get(RESTConstants.COLUMN); + if (columnArray != null) { + columns = new byte[columnArray.length][]; + for (int i = 0; i < columnArray.length; i++) { + columns[i] = Bytes.toBytes(columnArray[i]); + } + } + return columns; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/AbstractModel.java b/src/java/org/apache/hadoop/hbase/rest/AbstractModel.java new file mode 100644 index 000000000000..a03455a3f7d7 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/AbstractModel.java @@ -0,0 +1,99 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.util.Base64; +import org.apache.hadoop.hbase.util.Bytes; + +public abstract class AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(AbstractModel.class); + protected HBaseConfiguration conf; + protected HBaseAdmin admin; + + protected static class Encodings { + + protected interface Encoding { + + String encode(byte[] b) throws HBaseRestException; + } + + public static Encoding EBase64 = new Encoding() { + + public String encode(byte[] b) throws HBaseRestException { + return new String(Base64.encodeBytes(b)); + } + }; + public static Encoding EUTF8 = new Encoding() { + + public String encode(byte[] b) throws HBaseRestException { + return new String(b); + } + }; + } + + protected static Encodings.Encoding encoding = Encodings.EUTF8; + + public void initialize(HBaseConfiguration conf, HBaseAdmin admin) { + this.conf = conf; + this.admin = admin; + } + + protected byte[][] getColumns(byte[] tableName) throws HBaseRestException { + try { + HTable h = new HTable(tableName); + Collection columns = h.getTableDescriptor() + .getFamilies(); + byte[][] resultant = new byte[columns.size()][]; + int count = 0; + + for (HColumnDescriptor c : columns) { + resultant[count++] = c.getNameWithColon(); + } + + return resultant; + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + protected static byte COLON = Bytes.toBytes(":")[0]; + + protected boolean isColumnFamily(byte[] columnName) { + for (int i = 0; i < columnName.length; i++) { + if (columnName[i] == COLON) { + return true; + } + } + + return false; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/DatabaseController.java b/src/java/org/apache/hadoop/hbase/rest/DatabaseController.java new file mode 100644 index 000000000000..d732a9efd68f --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/DatabaseController.java @@ -0,0 +1,84 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; + +public class DatabaseController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(DatabaseController.class); + + protected DatabaseModel getModel() { + return (DatabaseModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, + HBaseAdmin admin) { + return new DatabaseModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + s.setNoQueryResults(); + DatabaseModel innerModel = getModel(); + + if (queryMap.size() == 0) { + s.setOK(innerModel.getDatabaseMetadata()); + } else { + s.setBadRequest("Unknown query."); + } + s.respond(); + } + + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/DatabaseModel.java b/src/java/org/apache/hadoop/hbase/rest/DatabaseModel.java new file mode 100644 index 000000000000..1c7a4e896993 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/DatabaseModel.java @@ -0,0 +1,85 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; + +import agilejson.TOJSON; + +public class DatabaseModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(DatabaseModel.class); + + public DatabaseModel(HBaseConfiguration conf, HBaseAdmin admin) { + super.initialize(conf, admin); + } + + public static class DatabaseMetadata implements ISerializable { + protected boolean master_running; + protected HTableDescriptor[] tables; + + public DatabaseMetadata(HBaseAdmin a) throws IOException { + master_running = a.isMasterRunning(); + tables = a.listTables(); + } + + @TOJSON(prefixLength = 2) + public boolean isMasterRunning() { + return master_running; + } + + @TOJSON + public HTableDescriptor[] getTables() { + return tables; + } + + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeDatabaseMetadata(this); + } + } + + // Serialize admin ourselves to json object + // rather than returning the admin object for obvious reasons + public DatabaseMetadata getMetadata() throws HBaseRestException { + return getDatabaseMetadata(); + } + + protected DatabaseMetadata getDatabaseMetadata() throws HBaseRestException { + DatabaseMetadata databaseMetadata = null; + try { + databaseMetadata = new DatabaseMetadata(this.admin); + } catch (IOException e) { + throw new HBaseRestException(e); + } + + return databaseMetadata; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java b/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java index 291525695019..a9ce7515df9f 100644 --- a/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java +++ b/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java @@ -19,7 +19,12 @@ */ package org.apache.hadoop.hbase.rest; +import java.io.BufferedReader; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; @@ -27,49 +32,98 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.HBaseRestParserFactory; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.rest.serializer.RestSerializerFactory; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.InfoServer; -import org.apache.hadoop.mapred.StatusHttpServer; import org.mortbay.http.NCSARequestLog; import org.mortbay.http.SocketListener; import org.mortbay.jetty.servlet.WebApplicationContext; /** - * Servlet implementation class for hbase REST interface. - * Presumes container ensures single thread through here at any one time - * (Usually the default configuration). In other words, code is not - * written thread-safe. - *

This servlet has explicit dependency on Jetty server; it uses the - * jetty implementation of MultipartResponse. + * Servlet implementation class for hbase REST interface. Presumes container + * ensures single thread through here at any one time (Usually the default + * configuration). In other words, code is not written thread-safe. + *

+ * This servlet has explicit dependency on Jetty server; it uses the jetty + * implementation of MultipartResponse. * - *

TODO: + *

+ * TODO: *

    - *
  • multipart/related response is not correct; the servlet setContentType - * is broken. I am unable to add parameters such as boundary or start to - * multipart/related. They get stripped.
  • - *
  • Currently creating a scanner, need to specify a column. Need to make - * it so the HTable instance has current table's metadata to-hand so easy to - * find the list of all column families so can make up list of columns if none + *
  • multipart/related response is not correct; the servlet setContentType is + * broken. I am unable to add parameters such as boundary or start to + * multipart/related. They get stripped.
  • + *
  • Currently creating a scanner, need to specify a column. Need to make it + * so the HTable instance has current table's metadata to-hand so easy to find + * the list of all column families so can make up list of columns if none * specified.
  • *
  • Minor items are we are decoding URLs in places where probably already * done and how to timeout scanners that are in the scanner list.
  • *
- * @see Hbase REST Specification + * + * @see Hbase + * REST Specification */ -@SuppressWarnings("serial") -public class Dispatcher extends javax.servlet.http.HttpServlet -implements javax.servlet.Servlet { - @SuppressWarnings("unused") - private static final Log LOG = LogFactory.getLog(Dispatcher.class.getName()); - private MetaHandler metaHandler; - private TableHandler tableHandler; - private RowHandler rowHandler; - private ScannerHandler scannerHandler; - - private static final String SCANNER = "scanner"; - private static final String ROW = "row"; - +public class Dispatcher extends javax.servlet.http.HttpServlet { + + /** + * + */ + private static final long serialVersionUID = -8075335435797071569L; + private static final Log LOG = LogFactory.getLog(Dispatcher.class); + protected DatabaseController dbController; + protected TableController tableController; + protected RowController rowController; + protected ScannerController scannercontroller; + protected TimestampController tsController; + + public enum ContentType { + XML("text/xml"), JSON("application/json"), PLAIN("text/plain"), MIME( + "multipart/related"), NOT_ACCEPTABLE(""); + + private final String type; + + private ContentType(final String t) { + this.type = t; + } + + @Override + public String toString() { + return this.type; + } + + /** + * Utility method used looking at Accept header content. + * + * @param t + * The content type to examine. + * @return The enum that matches the prefix of t or the default + * enum if t is empty. If unsupported type, we return + * NOT_ACCEPTABLE. + */ + public static ContentType getContentType(final String t) { + // Default to text/plain. Curl sends */*. + if (t == null || t.equals("*/*")) { + return ContentType.XML; + } + String lowerCased = t.toLowerCase(); + ContentType[] values = ContentType.values(); + ContentType result = null; + for (int i = 0; i < values.length; i++) { + if (lowerCased.startsWith(values[i].type)) { + result = values[i]; + break; + } + } + return result == null ? NOT_ACCEPTABLE : result; + } + } + /** * Default constructor */ @@ -80,149 +134,293 @@ public Dispatcher() { @Override public void init() throws ServletException { super.init(); - + HBaseConfiguration conf = new HBaseConfiguration(); HBaseAdmin admin = null; - - try{ + + try { admin = new HBaseAdmin(conf); - metaHandler = new MetaHandler(conf, admin); - tableHandler = new TableHandler(conf, admin); - rowHandler = new RowHandler(conf, admin); - scannerHandler = new ScannerHandler(conf, admin); - } catch(Exception e){ + createControllers(); + + dbController.initialize(conf, admin); + tableController.initialize(conf, admin); + rowController.initialize(conf, admin); + tsController.initialize(conf, admin); + scannercontroller.initialize(conf, admin); + + LOG.debug("no errors in init."); + } catch (Exception e) { + System.out.println(e.toString()); throw new ServletException(e); } } + protected void createControllers() { + dbController = new DatabaseController(); + tableController = new TableController(); + rowController = new RowController(); + tsController = new TimestampController(); + scannercontroller = new ScannerController(); + } + + @SuppressWarnings("unchecked") @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException, ServletException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 0 || pathSegments[0].length() <= 0) { - // if it was a root request, then get some metadata about - // the entire instance. - metaHandler.doGet(request, response, pathSegments); - } else { - if (pathSegments.length >= 2 && pathSegments[0].length() > 0 && pathSegments[1].toLowerCase().equals(ROW)) { - // if it has table name and row path segments - rowHandler.doGet(request, response, pathSegments); + throws IOException, ServletException { + try { + Status s = this.createStatus(request, response); + byte[][] pathSegments = getPathSegments(request); + Map queryMap = request.getParameterMap(); + + if (pathSegments.length == 0 || pathSegments[0].length <= 0) { + // if it was a root request, then get some metadata about + // the entire instance. + dbController.get(s, pathSegments, queryMap); } else { - // otherwise, it must be a GET request suitable for the - // table handler. - tableHandler.doGet(request, response, pathSegments); + if (pathSegments.length >= 2 + && pathSegments.length <= 3 + && pathSegments[0].length > 0 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW)) { + // if it has table name and row path segments + rowController.get(s, pathSegments, queryMap); + } else if (pathSegments.length == 4 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW)) { + tsController.get(s, pathSegments, queryMap); + } else { + // otherwise, it must be a GET request suitable for the + // table handler. + tableController.get(s, pathSegments, queryMap); + } + } + LOG.debug("GET - No Error"); + } catch (HBaseRestException e) { + LOG.debug("GET - Error: " + e.toString()); + try { + Status sError = createStatus(request, response); + sError.setInternalError(e); + sError.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } } } + @SuppressWarnings("unchecked") @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) - throws IOException, ServletException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 0 || pathSegments[0].length() <= 0) { - // if it was a root request, it must be a create table request - tableHandler.doPost(request, response, pathSegments); - return; - } else { - // there should be at least two path segments (table name and row or - // scanner or disable/enable operation) - if (pathSegments.length >= 2 && pathSegments[0].length() > 0) { - if (pathSegments[1].toLowerCase().equals(SCANNER) - && pathSegments.length >= 2) { - scannerHandler.doPost(request, response, pathSegments); - return; - } else if (pathSegments[1].toLowerCase().equals(ROW) - && pathSegments.length >= 3) { - rowHandler.doPost(request, response, pathSegments); - return; - } else if ((pathSegments[1].toLowerCase().equals(TableHandler.DISABLE) || pathSegments[1].toLowerCase().equals(TableHandler.ENABLE)) - && pathSegments.length == 2) { - tableHandler.doPost(request, response, pathSegments); - return; + throws IOException, ServletException { + try { + + Status s = createStatus(request, response); + byte[][] pathSegments = getPathSegments(request); + Map queryMap = request.getParameterMap(); + byte[] input = readInputBuffer(request); + IHBaseRestParser parser = this.getParser(request); + + if ((pathSegments.length >= 0 && pathSegments.length <= 1) + || Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ENABLE) + || Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.DISABLE)) { + // this is a table request + tableController.post(s, pathSegments, queryMap, input, parser); + } else { + // there should be at least two path segments (table name and row or + // scanner) + if (pathSegments.length >= 2 && pathSegments[0].length > 0) { + if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { + scannercontroller.post(s, pathSegments, queryMap, input, parser); + return; + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW) + && pathSegments.length >= 3) { + rowController.post(s, pathSegments, queryMap, input, parser); + return; + } } } + } catch (HBaseRestException e) { + LOG.debug("POST - Error: " + e.toString()); + try { + Status s_error = createStatus(request, response); + s_error.setInternalError(e); + s_error.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + } } - - // if we get to this point, then no handler was matched this request. - GenericHandler.doNotFound(response, "No handler for " + request.getPathInfo()); } - + @SuppressWarnings("unchecked") @Override protected void doPut(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 1 && pathSegments[0].length() > 0) { - // if it has only table name - tableHandler.doPut(request, response, pathSegments); - } else { - // Equate PUT with a POST. - doPost(request, response); + throws ServletException, IOException { + try { + byte[][] pathSegments = getPathSegments(request); + if(pathSegments.length == 0) { + throw new HBaseRestException("method not supported"); + } else if (pathSegments.length == 1 && pathSegments[0].length > 0) { + // if it has only table name + Status s = createStatus(request, response); + Map queryMap = request.getParameterMap(); + IHBaseRestParser parser = this.getParser(request); + byte[] input = readInputBuffer(request); + tableController.put(s, pathSegments, queryMap, input, parser); + } else { + // Equate PUT with a POST. + doPost(request, response); + } + } catch (HBaseRestException e) { + try { + Status s_error = createStatus(request, response); + s_error.setInternalError(e); + s_error.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + } } } + @SuppressWarnings("unchecked") @Override protected void doDelete(HttpServletRequest request, - HttpServletResponse response) - throws IOException, ServletException { - String [] pathSegments = getPathSegments(request); - - if (pathSegments.length == 1 && pathSegments[0].length() > 0) { - // if it only has only table name - tableHandler.doDelete(request, response, pathSegments); - return; - } else if (pathSegments.length >= 3 && pathSegments[0].length() > 0) { - // must be at least two path segments (table name and row or scanner) - if (pathSegments[1].toLowerCase().equals(SCANNER) && - pathSegments.length == 3 && pathSegments[2].length() > 0) { - // DELETE to a scanner requires at least three path segments - scannerHandler.doDelete(request, response, pathSegments); - return; - } else if (pathSegments[1].toLowerCase().equals(ROW) && - pathSegments.length >= 3) { - rowHandler.doDelete(request, response, pathSegments); + HttpServletResponse response) throws IOException, ServletException { + try { + Status s = createStatus(request, response); + byte[][] pathSegments = getPathSegments(request); + Map queryMap = request.getParameterMap(); + + if(pathSegments.length == 0) { + throw new HBaseRestException("method not supported"); + } else if (pathSegments.length == 1 && pathSegments[0].length > 0) { + // if it only has only table name + tableController.delete(s, pathSegments, queryMap); return; + } else if (pathSegments.length >= 3 && pathSegments[0].length > 0) { + // must be at least two path segments (table name and row or scanner) + if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER) + && pathSegments.length == 3 && pathSegments[2].length > 0) { + // DELETE to a scanner requires at least three path segments + scannercontroller.delete(s, pathSegments, queryMap); + return; + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ROW) + && pathSegments.length >= 3) { + rowController.delete(s, pathSegments, queryMap); + return; + } else if (pathSegments.length == 4) { + tsController.delete(s, pathSegments, queryMap); + } + } + } catch (HBaseRestException e) { + LOG.debug("POST - Error: " + e.toString()); + try { + Status s_error = createStatus(request, response); + s_error.setInternalError(e); + s_error.respond(); + } catch (HBaseRestException f) { + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } } - - // if we reach this point, then no handler exists for this request. - GenericHandler.doNotFound(response, "No handler"); } - - /* + + /** + * This method will get the path segments from the HttpServletRequest. Please + * note that if the first segment of the path is /api this is removed from the + * returning byte array. + * * @param request + * * @return request pathinfo split on the '/' ignoring the first '/' so first * element in pathSegment is not the empty string. */ - private String [] getPathSegments(final HttpServletRequest request) { + protected byte[][] getPathSegments(final HttpServletRequest request) { int context_len = request.getContextPath().length() + 1; - return request.getRequestURI().substring(context_len).split("/"); + + byte[][] pathSegments = Bytes.toByteArrays(request.getRequestURI().substring(context_len) + .split("/")); + byte[] apiAsBytes = "api".getBytes(); + if (Arrays.equals(apiAsBytes, pathSegments[0])) { + byte[][] newPathSegments = new byte[pathSegments.length - 1][]; + for(int i = 0; i < newPathSegments.length; i++) { + newPathSegments[i] = pathSegments[i + 1]; + } + pathSegments = newPathSegments; + } + return pathSegments; + } + + protected byte[] readInputBuffer(HttpServletRequest request) + throws HBaseRestException { + try { + String resultant = ""; + BufferedReader r = request.getReader(); + + int maxLength = 5000; // tie to conf + int bufferLength = 640; + + char[] c = new char[bufferLength]; // 40 characters * sizeof(UTF16) + // TODO make s maxLength and c size values in configuration + if (!r.ready()) { + Thread.sleep(1000); // If r is not ready wait 1 second + if (!r.ready()) { // If r still is not ready something is wrong, return + // blank. + return new byte[0]; + } + } + while (r.ready()) { + int n = r.read(c, 0, bufferLength); + resultant += new String(c); + if (n != bufferLength) { + break; + } else if (resultant.length() > maxLength) { + resultant = resultant.substring(0, maxLength); + break; + } + } + return Bytes.toBytes(resultant.trim()); + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + protected IHBaseRestParser getParser(HttpServletRequest request) + throws HBaseRestException { + return HBaseRestParserFactory.getParser(ContentType.getContentType(request + .getHeader("content-type"))); + } + + protected Status createStatus(HttpServletRequest request, + HttpServletResponse response) throws HBaseRestException { + return new Status(response, RestSerializerFactory.getSerializer(request, + response), this.getPathSegments(request)); } // // Main program and support routines // - - private static void printUsageAndExit() { + protected static void printUsageAndExit() { printUsageAndExit(null); } - - private static void printUsageAndExit(final String message) { + + protected static void printUsageAndExit(final String message) { if (message != null) { System.err.println(message); } - System.out.println("Usage: java org.apache.hadoop.hbase.rest.Dispatcher " + - "--help | [--port=PORT] [--bind=ADDR] start"); + System.out.println("Usage: java org.apache.hadoop.hbase.rest.Dispatcher " + + "--help | [--port=PORT] [--bind=ADDR] start"); System.out.println("Arguments:"); System.out.println(" start Start REST server"); System.out.println(" stop Stop REST server"); System.out.println("Options:"); System.out.println(" port Port to listen on. Default: 60050."); System.out.println(" bind Address to bind on. Default: 0.0.0.0."); - System.out.println(" max-num-threads The maximum number of threads for Jetty to run. Defaults to 256."); + System.out + .println(" max-num-threads The maximum number of threads for Jetty to run. Defaults to 256."); System.out.println(" help Print this message and exit."); System.exit(0); @@ -230,9 +428,10 @@ private static void printUsageAndExit(final String message) { /* * Start up the REST servlet in standalone mode. + * * @param args */ - protected static void doMain(final String [] args) throws Exception { + protected static void doMain(final String[] args) throws Exception { if (args.length < 1) { printUsageAndExit(); } @@ -246,7 +445,7 @@ protected static void doMain(final String [] args) throws Exception { final String addressArgKey = "--bind="; final String portArgKey = "--port="; final String numThreadsKey = "--max-num-threads="; - for (String cmd: args) { + for (String cmd : args) { if (cmd.startsWith(addressArgKey)) { bindAddress = cmd.substring(addressArgKey.length()); continue; @@ -258,14 +457,14 @@ protected static void doMain(final String [] args) throws Exception { } else if (cmd.equals("start")) { continue; } else if (cmd.equals("stop")) { - printUsageAndExit("To shutdown the REST server run " + - "bin/hbase-daemon.sh stop rest or send a kill signal to " + - "the REST server pid"); - } else if (cmd.startsWith(numThreadsKey)) { + printUsageAndExit("To shutdown the REST server run " + + "bin/hbase-daemon.sh stop rest or send a kill signal to " + + "the REST server pid"); + } else if (cmd.startsWith(numThreadsKey)) { numThreads = Integer.parseInt(cmd.substring(numThreadsKey.length())); continue; } - + // Print out usage if we get to here. printUsageAndExit(); } @@ -278,20 +477,17 @@ protected static void doMain(final String [] args) throws Exception { NCSARequestLog ncsa = new NCSARequestLog(); ncsa.setLogLatency(true); webServer.setRequestLog(ncsa); - WebApplicationContext context = - webServer.addWebApplication("/api", InfoServer.getWebAppDir("rest")); - context.addServlet("stacks", "/stacks", - StatusHttpServer.StackServlet.class.getName()); - context.addServlet("logLevel", "/logLevel", - org.apache.hadoop.log.LogLevel.Servlet.class.getName()); + WebApplicationContext context = webServer.addWebApplication("/", InfoServer + .getWebAppDir("rest")); webServer.start(); } - + /** * @param args - * @throws Exception + * @throws Exception */ - public static void main(String [] args) throws Exception { + public static void main(String[] args) throws Exception { + System.out.println("Starting restServer"); doMain(args); } } diff --git a/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java b/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java index 2266aaaeb3cf..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java @@ -1,342 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.io.PrintWriter; -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; -import java.util.Map; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.znerd.xmlenc.LineBreak; -import org.znerd.xmlenc.XMLOutputter; - -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * GenericHandler contains some basic common stuff that all the individual - * REST handler types take advantage of. - */ -public abstract class GenericHandler { - protected HBaseConfiguration conf; - protected HBaseAdmin admin; - - protected static final String ACCEPT = "accept"; - protected static final String COLUMN = "column"; - protected static final String TIMESTAMP = "timestamp"; - protected static final String START_ROW = "start_row"; - protected static final String END_ROW = "end_row"; - protected static final String CONTENT_TYPE = "content-type"; - protected static final String ROW = "row"; - protected static final String REGIONS = "regions"; - protected static final String VERSION = "version"; - protected static final String OFFSET = "offset"; - protected static final String LIMIT = "limit"; - - protected final Log LOG = LogFactory.getLog(this.getClass()); - - public GenericHandler(HBaseConfiguration conf, HBaseAdmin admin) { - this.conf = conf; - this.admin = admin; - } - - /* - * Supported content types as enums - */ - protected enum ContentType { - XML("text/xml"), - PLAIN("text/plain"), - MIME("multipart/related"), - NOT_ACCEPTABLE(""); - - private final String type; - - private ContentType(final String t) { - this.type = t; - } - - @Override - public String toString() { - return this.type; - } - - /** - * Utility method used looking at Accept header content. - * @param t The content type to examine. - * @return The enum that matches the prefix of t or - * the default enum if t is empty. If unsupported type, we - * return NOT_ACCEPTABLE. - */ - public static ContentType getContentType(final String t) { - // Default to text/plain. Curl sends */*. - if (t == null || t.equals("*/*")) { - return ContentType.XML; - } - String lowerCased = t.toLowerCase(); - ContentType [] values = ContentType.values(); - ContentType result = null; - for (int i = 0; i < values.length; i++) { - if (lowerCased.startsWith(values[i].type)) { - result = values[i]; - break; - } - } - return result == null? NOT_ACCEPTABLE: result; - } - } - - - /* - * @param o - * @return XMLOutputter wrapped around o. - * @throws IllegalStateException - * @throws IOException - */ - protected XMLOutputter getXMLOutputter(final PrintWriter o) - throws IllegalStateException, IOException { - XMLOutputter outputter = new XMLOutputter(o, HConstants.UTF8_ENCODING); - outputter.setLineBreak(LineBreak.UNIX); - outputter.setIndentation(" "); - outputter.declaration(); - return outputter; - } - - /* - * Write an XML element. - * @param outputter - * @param name - * @param value - * @throws IllegalStateException - * @throws IOException - */ - protected void doElement(final XMLOutputter outputter, - final String name, final String value) - throws IllegalStateException, IOException { - outputter.startTag(name); - if (value.length() > 0) { - outputter.pcdata(value); - } - outputter.endTag(); - } - - /* - * Set content-type, encoding, and status on passed response - * @param response - * @param status - * @param contentType - */ - public static void setResponseHeader(final HttpServletResponse response, - final int status, final String contentType) { - // Container adds the charset to the HTTP content-type header. - response.setContentType(contentType); - response.setCharacterEncoding(HConstants.UTF8_ENCODING); - response.setStatus(status); - } - - /* - * If we can't do the specified Accepts header type. - * @param response - * @throws IOException - */ - public static void doNotAcceptable(final HttpServletResponse response) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_ACCEPTABLE); - } - - /* - * If we can't do the specified Accepts header type. - * @param response - * @param message - * @throws IOException - */ - public static void doNotAcceptable(final HttpServletResponse response, - final String message) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_ACCEPTABLE, message); - } - - /* - * Resource not found. - * @param response - * @throws IOException - */ - public static void doNotFound(final HttpServletResponse response) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_FOUND); - } - - /* - * Resource not found. - * @param response - * @param msg - * @throws IOException - */ - public static void doNotFound(final HttpServletResponse response, final String msg) - throws IOException { - response.sendError(HttpServletResponse.SC_NOT_FOUND, msg); - } - - /* - * Unimplemented method. - * @param response - * @param message to send - * @throws IOException - */ - public static void doMethodNotAllowed(final HttpServletResponse response, - final String message) - throws IOException { - response.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, message); - } - - protected String getTableName(final String [] pathSegments) - throws UnsupportedEncodingException { - // Get table name? First part of passed segment. It can't be empty string - // or null because we should have tested for that before coming in here. - return URLDecoder.decode(pathSegments[0], HConstants.UTF8_ENCODING); - } - - /* - * Output row columns - * @param outputter - * @param m - * @throws IllegalStateException - * @throws IllegalArgumentException - * @throws IOException - */ - protected void outputColumnsXml(final XMLOutputter outputter, final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - outputColumnsXml(null, outputter, m); - } - - protected void outputColumnsXml(final HttpServletRequest request, - final XMLOutputter outputter, final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - int offset = 0, limit = Integer.MAX_VALUE; - if (request != null) { - String offset_string = request.getParameter(OFFSET); - if (offset_string != null && !offset_string.equals("")) - offset = Integer.parseInt(offset_string); - String limit_string = request.getParameter(LIMIT); - if (limit_string != null && !limit_string.equals("")) { - limit = Integer.parseInt(limit_string); - } - } - - for (Map.Entry e: m.entrySet()) { - if (offset > 0) { - --offset; - continue; - } - if (limit < 1) { - break; - } else { - --limit; - } - outputter.startTag(COLUMN); - doElement(outputter, "name", - org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey())); - outputCellXml(outputter, e.getValue()); - outputter.endTag(); - } - } - - protected void outputColumnsWithMultiVersionsXml(final XMLOutputter outputter, - final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - outputColumnsWithMultiVersionsXml(null, outputter, m); - } - - protected void outputColumnsWithMultiVersionsXml(final HttpServletRequest request, - final XMLOutputter outputter, final Map m) - throws IllegalStateException, IllegalArgumentException, IOException { - int offset = 0, limit = Integer.MAX_VALUE; - if (request != null) { - String offset_string = request.getParameter(OFFSET); - if (offset_string != null && !offset_string.equals("")) - offset = Integer.parseInt(offset_string); - String limit_string = request.getParameter(LIMIT); - if (limit_string != null && !limit_string.equals("")) { - limit = Integer.parseInt(limit_string); - } - } - - for (Map.Entry e: m.entrySet()) { - if (offset > 0) { - --offset; - continue; - } - if (limit < 1) { - break; - } else { - --limit; - } - for (Cell c : e.getValue()) { - outputter.startTag(COLUMN); - doElement(outputter, "name", - org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey())); - outputCellXml(outputter, c); - outputter.endTag(); - } - } - } - - protected void outputCellXml(final XMLOutputter outputter, Cell c) - throws IllegalStateException, IllegalArgumentException, IOException { - // We don't know String from binary data so we always base64 encode. - doElement(outputter, "value", - org.apache.hadoop.hbase.util.Base64.encodeBytes(c.getValue())); - doElement(outputter, "timestamp", String.valueOf(c.getTimestamp())); - } -// Commented - multipart support is currently nonexistant. -// protected void outputColumnsMime(final MultiPartResponse mpr, -// final Map m) -// throws IOException { -// for (Map.Entry e: m.entrySet()) { -// mpr.startPart("application/octet-stream", -// new String [] {"Content-Description: " + e.getKey().toString(), -// "Content-Transfer-Encoding: binary", -// "Content-Length: " + e.getValue().getValue().length}); -// mpr.getOut().write(e.getValue().getValue()); -// } -// } - - /* - * Get an HTable instance by it's table name. - */ - protected HTable getTable(final String tableName) throws IOException { - return new HTable(this.conf, Bytes.toBytes(tableName)); - } - - protected String makeColumnName(String column) { - if (column.indexOf(':') == -1) - column += ':'; - return column; - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java b/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java index 64e288b22893..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java @@ -1,108 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.util.Bytes; -import org.znerd.xmlenc.XMLOutputter; - - -/** - * MetaHandler fields all requests for metadata at the instance level. At the - * moment this is only GET requests to /. - */ -public class MetaHandler extends GenericHandler { - - public MetaHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ - super(conf, admin); - } - - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - getTables(request, response); - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "POST not allowed at /"); - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "PUT not allowed at /"); - } - - public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "DELETE not allowed at /"); - } - - /* - * Return list of tables. - * @param request - * @param response - */ - private void getTables(final HttpServletRequest request, - final HttpServletResponse response) - throws IOException { - HTableDescriptor [] tables = this.admin.listTables(); - - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - setResponseHeader(response, tables.length > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag("tables"); - for (int i = 0; i < tables.length; i++) { - doElement(outputter, "table", Bytes.toString(tables[i].getName())); - } - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - break; - case PLAIN: - setResponseHeader(response, tables.length > 0? 200: 204, - ContentType.PLAIN.toString()); - PrintWriter out = response.getWriter(); - for (int i = 0; i < tables.length; i++) { - out.println(Bytes.toString(tables[i].getName())); - } - out.close(); - break; - default: - doNotAcceptable(response); - } - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/RESTConstants.java b/src/java/org/apache/hadoop/hbase/rest/RESTConstants.java new file mode 100644 index 000000000000..7a79383ccb07 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/RESTConstants.java @@ -0,0 +1,111 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.rest.filter.RowFilterSetFactory; +import org.apache.hadoop.hbase.rest.filter.StopRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.WhileMatchRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.PageRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.ColumnValueFilterFactory; +import org.apache.hadoop.hbase.rest.filter.RegExpRowFilterFactory; +import org.apache.hadoop.hbase.rest.filter.InclusiveStopRowFilterFactory; +import java.util.HashMap; +import org.apache.hadoop.hbase.rest.filter.FilterFactory; + +public interface RESTConstants { + final static String TRUE = "true"; + final static String FALSE = "false"; + // Used for getting all data from a column specified in that order. + final static String COLUMNS = "columns"; + final static String COLUMN = "column"; + // Used with TableExists + final static String EXISTS = "exists"; + // Maps to Transaction ID + final static String TRANSACTION = "transaction"; + // Transaction Operation Key. + final static String TRANSACTION_OPERATION = "transaction_op"; + // Transaction Operation Values + final static String TRANSACTION_OPERATION_COMMIT = "commit"; + final static String TRANSACTION_OPERATION_CREATE = "create"; + final static String TRANSACTION_OPERATION_ABORT = "abort"; + // Filter Key + final static String FILTER = "filter"; + final static String FILTER_TYPE = "type"; + final static String FILTER_VALUE = "value"; + final static String FILTER_RANK = "rank"; + // Scanner Key + final static String SCANNER = "scanner"; + // The amount of rows to return at one time. + final static String SCANNER_RESULT_SIZE = "result_size"; + final static String SCANNER_START_ROW = "start_row"; + final static String SCANNER_STOP_ROW = "stop_row"; + final static String SCANNER_FILTER = "filter"; + final static String SCANNER_TIMESTAMP = "timestamp"; + final static String NUM_VERSIONS = "num_versions"; + final static String SCANNER_COLUMN = "column"; + // static items used on the path + static final String DISABLE = "disable"; + static final String ENABLE = "enable"; + static final String REGIONS = "regions"; + static final String ROW = "row"; + static final String TIME_STAMPS = "timestamps"; + static final String METADATA = "metadata"; + + static final String NAME = "name"; + static final String VALUE = "value"; + static final String ROWS = "rows"; + + static final FactoryMap filterFactories = FactoryMap.getFactoryMap(); + static final String LIMIT = "limit"; + + static class FactoryMap { + + protected static boolean created = false; + protected HashMap map = new HashMap(); + + protected FactoryMap() { + } + + public static FactoryMap getFactoryMap() { + if (!created) { + created = true; + FactoryMap f = new FactoryMap(); + f.initialize(); + return f; + } else { + return null; + } + } + + public FilterFactory get(String c) { + return map.get(c); + } + + protected void initialize() { + map.put("ColumnValueFilter", new ColumnValueFilterFactory()); + map.put("InclusiveStopRowFilter", new InclusiveStopRowFilterFactory()); + map.put("PageRowFilter", new PageRowFilterFactory()); + map.put("RegExpRowFilter", new RegExpRowFilterFactory()); + map.put("RowFilterSet", new RowFilterSetFactory()); + map.put("StopRowFilter", new StopRowFilterFactory()); + map.put("WhileMatchRowFilter", new WhileMatchRowFilterFactory()); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/RowController.java b/src/java/org/apache/hadoop/hbase/rest/RowController.java new file mode 100644 index 000000000000..d95be68fce20 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/RowController.java @@ -0,0 +1,135 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public class RowController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(RowController.class); + + protected RowModel getModel() { + return (RowModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) { + return new RowModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + RowModel innerModel = getModel(); + s.setNoQueryResults(); + + byte[] tableName; + byte[] rowName; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + RowResult row = null; + + if (queryMap.size() == 0 && pathSegments.length <= 3) { + row = innerModel.get(tableName, rowName); + } else if (pathSegments.length == 4 + && Bytes.toString(pathSegments[3]).toLowerCase().equals( + RESTConstants.TIME_STAMPS)) { + innerModel.getTimestamps(tableName, rowName); + } else { + row = innerModel.get(tableName, rowName, this.getColumnsFromQueryMap(queryMap)); + } + if(row == null) { + throw new HBaseRestException("row not found"); + } + s.setOK(row); + s.respond(); + } + + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + RowModel innerModel = getModel(); + + BatchUpdate b; + RowUpdateDescriptor rud = parser + .getRowUpdateDescriptor(input, pathSegments); + + if (input.length == 0) { + s.setUnsupportedMediaType("no data send with post request"); + s.respond(); + return; + } + + b = new BatchUpdate(rud.getRowName()); + + for (byte[] key : rud.getColVals().keySet()) { + b.put(key, rud.getColVals().get(key)); + } + + try { + innerModel.post(rud.getTableName().getBytes(), b); + s.setOK(); + } catch (HBaseRestException e) { + s.setUnsupportedMediaType(e.getMessage()); + } + s.respond(); + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + s.setMethodNotImplemented(); + s.respond(); + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + RowModel innerModel = getModel(); + byte[] tableName; + byte[] rowName; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + if(queryMap.size() == 0) { + innerModel.delete(tableName, rowName); + } else { + innerModel.delete(tableName, rowName, this.getColumnsFromQueryMap(queryMap)); + } + s.setOK(); + s.respond(); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/RowHandler.java b/src/java/org/apache/hadoop/hbase/rest/RowHandler.java index 9561fac1340f..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/RowHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/RowHandler.java @@ -1,346 +0,0 @@ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.io.BatchUpdate; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.util.Bytes; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.znerd.xmlenc.XMLOutputter; - -public class RowHandler extends GenericHandler { - - public RowHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException { - super(conf, admin); - } - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { - HTable table = getTable(pathSegments[0]); - if (pathSegments[1].toLowerCase().equals(ROW)) { - // get a row - getRow(table, request, response, pathSegments); - } else { - doNotFound(response, "Not handled in RowHandler"); - } - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { - putRow(request, response, pathSegments); - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) throws ServletException, IOException { - doPost(request, response, pathSegments); - } - - public void doDelete(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws ServletException, IOException { - deleteRow(request, response, pathSegments); - } - - /* - * @param request - * @param response - * @param pathSegments info path split on the '/' character. First segment - * is the tablename, second is 'row', and third is the row id. - * @throws IOException - * Retrieve a row in one of several output formats. - */ - private void getRow(HTable table, final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException { - // pull the row key out of the path - byte[] row = Bytes.toBytes(URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING)); - - String timestampStr = null; - if (pathSegments.length == 4) { - // A timestamp has been supplied. - timestampStr = pathSegments[3]; - if (timestampStr.equals("timestamps")) { - // Not supported in hbase just yet. TODO - doMethodNotAllowed(response, "Not yet supported by hbase"); - return; - } - } - - String[] column_params = request.getParameterValues(COLUMN); - - byte[][] columns = null; - - if (column_params != null && column_params.length > 0) { - List available_columns = new ArrayList(); - for (String column_param : column_params) { - if (column_param.length() > 0 && table.getTableDescriptor().hasFamily(Bytes.toBytes(column_param))) { - available_columns.add(column_param); - } - } - columns = Bytes.toByteArrays(available_columns.toArray(new String[0])); - } - - String[] version_params = request.getParameterValues(VERSION); - int version = 0; - if (version_params != null && version_params.length == 1) { - version = Integer.parseInt(version_params[0]); - } - - if (version > 0 && columns != null) { - Map result = new TreeMap(Bytes.BYTES_COMPARATOR); - - for (byte[] col : columns) { - Cell[] cells = timestampStr == null ? table.get(row, col, version) - : table.get(row, col, Long.parseLong(timestampStr), version); - if (cells != null) { - result.put(col, cells); - } - } - - if (result == null || result.size() == 0) { - doNotFound(response, "Row not found!"); - } else { - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - outputRowWithMultiVersionsXml(request, response, result); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " - + request.getHeader(CONTENT_TYPE)); - } - } - } else { - Map result = timestampStr == null ? table.getRow(row, columns) : table.getRow(row, columns, Long.parseLong(timestampStr)); - if (result == null || result.size() == 0) { - doNotFound(response, "Row not found!"); - } else { - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - outputRowXml(request, response, result); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " - + request.getHeader(CONTENT_TYPE)); - } - } - } - } - - /* - * Output a row encoded as XML. - * @param response - * @param result - * @throws IOException - */ - private void outputRowXml(final HttpServletRequest request, - final HttpServletResponse response, final Map result) - throws IOException { - setResponseHeader(response, result.size() > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag(ROW); - doElement(outputter, "count", String.valueOf(result.size())); - outputColumnsXml(request, outputter, result); - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - } - - private void outputRowWithMultiVersionsXml(final HttpServletRequest request, - final HttpServletResponse response, final Map result) - throws IOException { - setResponseHeader(response, result.size() > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag(ROW); - doElement(outputter, "count", String.valueOf(result.size())); - outputColumnsWithMultiVersionsXml(request, outputter, result); - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - } - /* - * @param response - * @param result - * Output the results contained in result as a multipart/related response. - */ - // private void outputRowMime(final HttpServletResponse response, - // final Map result) - // throws IOException { - // response.setStatus(result.size() > 0? 200: 204); - // // This code ties me to the jetty server. - // MultiPartResponse mpr = new MultiPartResponse(response); - // // Content type should look like this for multipart: - // // Content-type: multipart/related;start="";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml" - // String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" + - // mpr.getBoundary() + "\""; - // // Setting content type is broken. I'm unable to set parameters on the - // // content-type; They get stripped. Can't set boundary, etc. - // // response.addHeader("Content-Type", ct); - // response.setContentType(ct); - // outputColumnsMime(mpr, result); - // mpr.close(); - // } - - /* - * @param request - * @param response - * @param pathSegments - * Do a put based on the client request. - */ - private void putRow(final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { - HTable table = getTable(pathSegments[0]); - - // pull the row key out of the path - String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING); - - switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) { - case XML: - putRowXml(table, row, request, response, pathSegments); - break; - case MIME: - doNotAcceptable(response, "Don't support multipart/related yet..."); - break; - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - - /* - * @param request - * @param response - * @param pathSegments - * Decode supplied XML and do a put to Hbase. - */ - private void putRowXml(HTable table, String row, - final HttpServletRequest request, final HttpServletResponse response, - final String [] pathSegments) - throws IOException, ServletException{ - - DocumentBuilderFactory docBuilderFactory - = DocumentBuilderFactory.newInstance(); - //ignore all comments inside the xml file - docBuilderFactory.setIgnoringComments(true); - - DocumentBuilder builder = null; - Document doc = null; - - String timestamp = pathSegments.length >= 4 ? pathSegments[3] : null; - - try{ - builder = docBuilderFactory.newDocumentBuilder(); - doc = builder.parse(request.getInputStream()); - } catch (javax.xml.parsers.ParserConfigurationException e) { - throw new ServletException(e); - } catch (org.xml.sax.SAXException e){ - throw new ServletException(e); - } - - BatchUpdate batchUpdate; - - try{ - // start an update - batchUpdate = timestamp == null ? - new BatchUpdate(row) : new BatchUpdate(row, Long.parseLong(timestamp)); - - // set the columns from the xml - NodeList columns = doc.getElementsByTagName("column"); - - for(int i = 0; i < columns.getLength(); i++){ - // get the current column element we're working on - Element column = (Element)columns.item(i); - - // extract the name and value children - Node name_node = column.getElementsByTagName("name").item(0); - String name = name_node.getFirstChild().getNodeValue(); - - Node value_node = column.getElementsByTagName("value").item(0); - - byte[] value = new byte[0]; - - // for some reason there's no value here. probably indicates that - // the consumer passed a null as the cell value. - if(value_node.getFirstChild() != null && - value_node.getFirstChild().getNodeValue() != null){ - // decode the base64'd value - value = org.apache.hadoop.hbase.util.Base64.decode( - value_node.getFirstChild().getNodeValue()); - } - - // put the value - batchUpdate.put(name, value); - } - - // commit the update - table.commit(batchUpdate); - - // respond with a 200 - response.setStatus(200); - } - catch(Exception e){ - throw new ServletException(e); - } - } - - /* - * @param request - * @param response - * @param pathSegments - * Delete some or all cells for a row. - */ - private void deleteRow(final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { - // grab the table we're operating on - HTable table = getTable(getTableName(pathSegments)); - - // pull the row key out of the path - String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING); - - String[] columns = request.getParameterValues(COLUMN); - - // hack - we'll actually test for the presence of the timestamp parameter - // eventually - boolean timestamp_present = false; - if(timestamp_present){ // do a timestamp-aware delete - doMethodNotAllowed(response, "DELETE with a timestamp not implemented!"); - } - else{ // ignore timestamps - if(columns == null || columns.length == 0){ - // retrieve all the columns - doMethodNotAllowed(response, - "DELETE without specified columns not implemented!"); - } else{ - // delete each column in turn - for(int i = 0; i < columns.length; i++){ - table.deleteAll(row, columns[i]); - } - } - response.setStatus(202); - } - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/RowModel.java b/src/java/org/apache/hadoop/hbase/rest/RowModel.java new file mode 100644 index 000000000000..1b8ce8c4f805 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/RowModel.java @@ -0,0 +1,140 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +public class RowModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(RowModel.class); + + public RowModel(HBaseConfiguration conf, HBaseAdmin admin) { + super.initialize(conf, admin); + } + + public RowResult get(byte[] tableName, byte[] rowName) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, columns); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, columns, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, long timestamp) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public TimestampsDescriptor getTimestamps( + @SuppressWarnings("unused") byte[] tableName, + @SuppressWarnings("unused") byte[] rowName) throws HBaseRestException { + // try { + // TimestampsDescriptor tsd = new TimestampsDescriptor(); + // HTable table = new HTable(tableName); + // RowResult row = table.getRow(rowName); + + throw new HBaseRestException("operation currently unsupported"); + + // } catch (IOException e) { + // throw new HBaseRestException("Error finding timestamps for row: " + // + Bytes.toString(rowName), e); + // } + + } + + public void post(byte[] tableName, BatchUpdate b) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.commit(b); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void post(byte[] tableName, List b) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.commit(b); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void delete(byte[] tableName, byte[] rowName) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.deleteAll(rowName); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void delete(byte[] tableName, byte[] rowName, byte[][] columns) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + for (byte[] column : columns) { + table.deleteAll(rowName, column); + } + } catch (IOException e) { + throw new HBaseRestException(e); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerController.java b/src/java/org/apache/hadoop/hbase/rest/ScannerController.java new file mode 100644 index 000000000000..d8f17fcf9ddc --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerController.java @@ -0,0 +1,358 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.RowFilterSet; +import org.apache.hadoop.hbase.filter.StopRowFilter; +import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.filter.FilterFactory; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * + */ +public class ScannerController extends AbstractController { + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#delete(org.apache.hadoop + * .hbase.rest.Status, byte[][], java.util.Map) + */ + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + ScannerModel innerModel = this.getModel(); + if (pathSegments.length == 3 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { + // get the scannerId + Integer scannerId = null; + String scannerIdString = new String(pathSegments[2]); + if (!Pattern.matches("^\\d+$", scannerIdString)) { + throw new HBaseRestException( + "the scannerid in the path and must be an integer"); + } + scannerId = Integer.parseInt(scannerIdString); + + try { + innerModel.scannerClose(scannerId); + s.setOK(); + } catch (HBaseRestException e) { + s.setNotFound(); + } + } else { + s.setBadRequest("invalid query"); + } + s.respond(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#generateModel(org.apache + * .hadoop.hbase.HBaseConfiguration, + * org.apache.hadoop.hbase.client.HBaseAdmin) + */ + @Override + protected AbstractModel generateModel(HBaseConfiguration conf, HBaseAdmin a) { + return new ScannerModel(conf, a); + } + + protected ScannerModel getModel() { + return (ScannerModel) model; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#get(org.apache.hadoop.hbase + * .rest.Status, byte[][], java.util.Map) + */ + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + + s.setBadRequest("invalid query"); + s.respond(); + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#post(org.apache.hadoop. + * hbase.rest.Status, byte[][], java.util.Map, byte[], + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser) + */ + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + ScannerModel innerModel = this.getModel(); + byte[] tableName; + tableName = pathSegments[0]; + + // Otherwise we interpret this request as a scanner request. + if (pathSegments.length == 2 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { // new scanner request + ScannerDescriptor sd = this.getScannerDescriptor(queryMap); + s.setScannerCreated(createScanner(innerModel, tableName, sd)); + } else if (pathSegments.length == 3 + && Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.SCANNER)) { // open scanner request + // first see if the limit variable is present + Long numRows = 1L; + String[] numRowsString = queryMap.get(RESTConstants.LIMIT); + if (numRowsString != null && Pattern.matches("^\\d+$", numRowsString[0])) { + numRows = Long.parseLong(numRowsString[0]); + } + // get the scannerId + Integer scannerId = null; + String scannerIdString = new String(pathSegments[2]); + if (!Pattern.matches("^\\d+$", scannerIdString)) { + throw new HBaseRestException( + "the scannerid in the path and must be an integer"); + } + scannerId = Integer.parseInt(scannerIdString); + + try { + s.setOK(innerModel.scannerGet(scannerId, numRows)); + } catch (HBaseRestException e) { + s.setNotFound(); + } + } else { + s.setBadRequest("Unknown Query."); + } + s.respond(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#put(org.apache.hadoop.hbase + * .rest.Status, byte[][], java.util.Map, byte[], + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser) + */ + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + + s.setBadRequest("invalid query"); + s.respond(); + + } + + private ScannerDescriptor getScannerDescriptor(Map queryMap) { + long timestamp = 0L; + byte[] startRow = null; + byte[] stopRow = null; + String filters = null; + + String[] timeStampString = queryMap.get(RESTConstants.SCANNER_TIMESTAMP); + if (timeStampString != null && timeStampString.length == 1) { + timestamp = Long.parseLong(timeStampString[0]); + } + + String[] startRowString = queryMap.get(RESTConstants.SCANNER_START_ROW); + if (startRowString != null && startRowString.length == 1) { + startRow = Bytes.toBytes(startRowString[0]); + } + + String[] stopRowString = queryMap.get(RESTConstants.SCANNER_STOP_ROW); + if (stopRowString != null && stopRowString.length == 1) { + stopRow = Bytes.toBytes(stopRowString[0]); + } + + String[] filtersStrings = queryMap.get(RESTConstants.SCANNER_FILTER); + if (filtersStrings != null && filtersStrings.length > 0) { + filters = ""; + for (@SuppressWarnings("unused") + String filter : filtersStrings) { + // TODO filters are not hooked up yet... And the String should probably + // be changed to a set + } + } + return new ScannerDescriptor(this.getColumnsFromQueryMap(queryMap), + timestamp, startRow, stopRow, filters); + } + + protected ScannerIdentifier createScanner(ScannerModel innerModel, + byte[] tableName, ScannerDescriptor scannerDescriptor) + throws HBaseRestException { + + RowFilterInterface filterSet = null; + + // Might want to change this. I am doing this so that I can use + // a switch statement that is more efficient. + int switchInt = 0; + if (scannerDescriptor.getColumns() != null + && scannerDescriptor.getColumns().length > 0) { + switchInt += 1; + } + switchInt += (scannerDescriptor.getTimestamp() != 0L) ? (1 << 1) : 0; + switchInt += (scannerDescriptor.getStartRow().length > 0) ? (1 << 2) : 0; + switchInt += (scannerDescriptor.getStopRow().length > 0) ? (1 << 3) : 0; + if (scannerDescriptor.getFilters() != null + && !scannerDescriptor.getFilters().equals("")) { + switchInt += (scannerDescriptor.getFilters() != null) ? (1 << 4) : 0; + filterSet = unionFilters(scannerDescriptor.getFilters()); + } + + return scannerSwitch(switchInt, innerModel, tableName, scannerDescriptor + .getColumns(), scannerDescriptor.getTimestamp(), scannerDescriptor + .getStartRow(), scannerDescriptor.getStopRow(), filterSet); + } + + public ScannerIdentifier scannerSwitch(int switchInt, + ScannerModel innerModel, byte[] tableName, byte[][] columns, + long timestamp, byte[] startRow, byte[] stopRow, + RowFilterInterface filterSet) throws HBaseRestException { + switch (switchInt) { + case 0: + return innerModel.scannerOpen(tableName); + case 1: + return innerModel.scannerOpen(tableName, columns); + case 2: + return innerModel.scannerOpen(tableName, timestamp); + case 3: + return innerModel.scannerOpen(tableName, columns, timestamp); + case 4: + return innerModel.scannerOpen(tableName, startRow); + case 5: + return innerModel.scannerOpen(tableName, columns, startRow); + case 6: + return innerModel.scannerOpen(tableName, startRow, timestamp); + case 7: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp); + case 8: + return innerModel.scannerOpen(tableName, getStopRow(stopRow)); + case 9: + return innerModel.scannerOpen(tableName, columns, getStopRow(stopRow)); + case 10: + return innerModel.scannerOpen(tableName, timestamp, getStopRow(stopRow)); + case 11: + return innerModel.scannerOpen(tableName, columns, timestamp, + getStopRow(stopRow)); + case 12: + return innerModel.scannerOpen(tableName, startRow, getStopRow(stopRow)); + case 13: + return innerModel.scannerOpen(tableName, columns, startRow, + getStopRow(stopRow)); + case 14: + return innerModel.scannerOpen(tableName, startRow, timestamp, + getStopRow(stopRow)); + case 15: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp, + getStopRow(stopRow)); + case 16: + return innerModel.scannerOpen(tableName, filterSet); + case 17: + return innerModel.scannerOpen(tableName, columns, filterSet); + case 18: + return innerModel.scannerOpen(tableName, timestamp, filterSet); + case 19: + return innerModel.scannerOpen(tableName, columns, timestamp, filterSet); + case 20: + return innerModel.scannerOpen(tableName, startRow, filterSet); + case 21: + return innerModel.scannerOpen(tableName, columns, startRow, filterSet); + case 22: + return innerModel.scannerOpen(tableName, startRow, timestamp, filterSet); + case 23: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp, + filterSet); + case 24: + return innerModel.scannerOpen(tableName, getStopRowUnionFilter(stopRow, + filterSet)); + case 25: + return innerModel.scannerOpen(tableName, columns, getStopRowUnionFilter( + stopRow, filterSet)); + case 26: + return innerModel.scannerOpen(tableName, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + case 27: + return innerModel.scannerOpen(tableName, columns, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + case 28: + return innerModel.scannerOpen(tableName, startRow, getStopRowUnionFilter( + stopRow, filterSet)); + case 29: + return innerModel.scannerOpen(tableName, columns, startRow, + getStopRowUnionFilter(stopRow, filterSet)); + case 30: + return innerModel.scannerOpen(tableName, startRow, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + case 31: + return innerModel.scannerOpen(tableName, columns, startRow, timestamp, + getStopRowUnionFilter(stopRow, filterSet)); + default: + return null; + } + } + + protected RowFilterInterface getStopRow(byte[] stopRow) { + return new WhileMatchRowFilter(new StopRowFilter(stopRow)); + } + + protected RowFilterInterface getStopRowUnionFilter(byte[] stopRow, + RowFilterInterface filter) { + Set filterSet = new HashSet(); + filterSet.add(getStopRow(stopRow)); + filterSet.add(filter); + return new RowFilterSet(filterSet); + } + + /** + * Given a list of filters in JSON string form, returns a RowSetFilter that + * returns true if all input filters return true on a Row (aka an AND + * statement). + * + * @param filters + * array of input filters in a JSON String + * @return RowSetFilter with all input filters in an AND Statement + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + protected RowFilterInterface unionFilters(String filters) + throws HBaseRestException { + FilterFactory f = RESTConstants.filterFactories.get("RowFilterSet"); + return f.getFilterFromJSON(filters); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java b/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java index c1df133481ae..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java @@ -1,339 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; - -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Scanner; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.io.Cell; -import org.apache.hadoop.hbase.io.RowResult; -import org.znerd.xmlenc.XMLOutputter; - -/** - * ScannderHandler fields all scanner related requests. - */ -public class ScannerHandler extends GenericHandler { - private static final String ROWS = "rows"; - - public ScannerHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ - super(conf, admin); - } - - private class ScannerRecord { - private final Scanner scanner; - private List nextRows; - - ScannerRecord(final Scanner s) { - this.scanner = s; - nextRows = new ArrayList(); - } - - public Scanner getScanner() { - return this.scanner; - } - - public boolean hasNext(int nbRows) throws IOException { - if (nextRows.size() < nbRows) { - RowResult[] results = scanner.next(nbRows - nextRows.size()); - for (RowResult result : results) { - nextRows.add(result); - } - return nextRows.size() > 0; - } else { - return true; - } - } - - /** - * Call next on the scanner. - * @return Null if finished, RowResult otherwise - * @throws IOException - */ - public RowResult[] next(int nbRows) throws IOException { - if (!hasNext(nbRows)) { - return null; - } - RowResult[] temp = nextRows.toArray(new RowResult[nextRows.size()]); - nextRows.clear(); - return temp; - } - } - - /* - * Map of outstanding scanners keyed by scannerid. - */ - private final Map scanners = - new HashMap(); - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doMethodNotAllowed(response, "GET to a scanner not supported."); - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - if (pathSegments.length == 2) { - // trying to create a scanner - openScanner(request, response, pathSegments); - } - else if (pathSegments.length == 3) { - // advancing a scanner - getScanner(request, response, pathSegments[2]); - } - else{ - doNotFound(response, "No handler for request"); - } - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - doPost(request, response, pathSegments); - } - - public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - deleteScanner(response, pathSegments[2]); - } - - /* - * Advance scanner and return current position. - * @param request - * @param response - * @param scannerid - * @throws IOException - */ - private void getScanner(final HttpServletRequest request, - final HttpServletResponse response, final String scannerid) - throws IOException { - ScannerRecord sr = this.scanners.get(scannerid); - if (sr == null) { - doNotFound(response, "No such scanner."); - return; - } - - String limitString = request.getParameter(LIMIT); - int limit = 1; - if (limitString != null && limitString.length() > 0) { - limit = Integer.valueOf(limitString); - } - if (sr.hasNext(limit)) { - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - outputScannerEntryXML(response, sr, limit); - break; - case MIME: -/* outputScannerEntryMime(response, sr);*/ - doNotAcceptable(response); - break; - default: - doNotAcceptable(response); - } - } - else{ - this.scanners.remove(scannerid); - doNotFound(response, "Scanner is expended"); - } - } - - private void outputScannerEntryXML(final HttpServletResponse response, - final ScannerRecord sr, int limit) - throws IOException { - // respond with a 200 and Content-type: text/xml - setResponseHeader(response, 200, ContentType.XML.toString()); - - // setup an xml outputter - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - - boolean rows = false; - - if (limit > 1) { - outputter.startTag(ROWS); - rows = true; - } - - RowResult[] rowResults = sr.next(limit); - - for (RowResult rowResult: rowResults) { - outputter.startTag(ROW); - - // write the row key - doElement(outputter, "name", - org.apache.hadoop.hbase.util.Base64.encodeBytes(rowResult.getRow())); - - outputColumnsXml(outputter, rowResult); - outputter.endTag(); - } - - if (rows) { - outputter.endTag(); - } - - outputter.endDocument(); - outputter.getWriter().close(); - } - - // private void outputScannerEntryMime(final HttpServletResponse response, - // final ScannerRecord sr) - // throws IOException { - // response.setStatus(200); - // // This code ties me to the jetty server. - // MultiPartResponse mpr = new MultiPartResponse(response); - // // Content type should look like this for multipart: - // // Content-type: multipart/related;start="";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml" - // String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" + - // mpr.getBoundary() + "\""; - // // Setting content type is broken. I'm unable to set parameters on the - // // content-type; They get stripped. Can't set boundary, etc. - // // response.addHeader("Content-Type", ct); - // response.setContentType(ct); - // // Write row, key-column and timestamp each in its own part. - // mpr.startPart("application/octet-stream", - // new String [] {"Content-Description: row", - // "Content-Transfer-Encoding: binary", - // "Content-Length: " + sr.getKey().getRow().getBytes().length}); - // mpr.getOut().write(sr.getKey().getRow().getBytes()); - // - // // Usually key-column is empty when scanning. - // if (sr.getKey().getColumn() != null && - // sr.getKey().getColumn().getLength() > 0) { - // mpr.startPart("application/octet-stream", - // new String [] {"Content-Description: key-column", - // "Content-Transfer-Encoding: binary", - // "Content-Length: " + sr.getKey().getColumn().getBytes().length}); - // } - // mpr.getOut().write(sr.getKey().getColumn().getBytes()); - // // TODO: Fix. Need to write out the timestamp in the ordained timestamp - // // format. - // byte [] timestampBytes = Long.toString(sr.getKey().getTimestamp()).getBytes(); - // mpr.startPart("application/octet-stream", - // new String [] {"Content-Description: timestamp", - // "Content-Transfer-Encoding: binary", - // "Content-Length: " + timestampBytes.length}); - // mpr.getOut().write(timestampBytes); - // // Write out columns - // outputColumnsMime(mpr, sr.getValue()); - // mpr.close(); - // } - - /* - * Create scanner - * @param request - * @param response - * @param pathSegments - * @throws IOException - */ - private void openScanner(final HttpServletRequest request, - final HttpServletResponse response, final String [] pathSegments) - throws IOException, ServletException { - // get the table - HTable table = getTable(getTableName(pathSegments)); - - // get the list of columns we're supposed to interact with - String[] raw_columns = request.getParameterValues(COLUMN); - byte [][] columns = null; - - if (raw_columns != null) { - columns = new byte [raw_columns.length][]; - for (int i = 0; i < raw_columns.length; i++) { - // I think this decoding is redundant. - columns[i] = - Bytes.toBytes(URLDecoder.decode(raw_columns[i], HConstants.UTF8_ENCODING)); - } - } else { - // TODO: Need to put into the scanner all of the table's column - // families. TODO: Verify this returns all rows. For now just fail. - doMethodNotAllowed(response, "Unspecified columns parameter currently not supported!"); - return; - } - - // TODO: Parse according to the timestamp format we agree on. - String raw_ts = request.getParameter(TIMESTAMP); - - // TODO: Are these decodings redundant? - byte [] startRow = request.getParameter(START_ROW) == null? - HConstants.EMPTY_START_ROW: - Bytes.toBytes(URLDecoder.decode(request.getParameter(START_ROW), - HConstants.UTF8_ENCODING)); - // Empty start row is same value as empty end row. - byte [] endRow = request.getParameter(END_ROW) == null? - HConstants.EMPTY_START_ROW: - Bytes.toBytes(URLDecoder.decode(request.getParameter(END_ROW), - HConstants.UTF8_ENCODING)); - - Scanner scanner = (request.getParameter(END_ROW) == null)? - table.getScanner(columns, startRow): - table.getScanner(columns, startRow, endRow); - - // Make a scanner id by hashing the object toString value (object name + - // an id). Will make identifier less burdensome and more url friendly. - String scannerid = - Integer.toHexString(JenkinsHash.getInstance().hash(scanner.toString().getBytes(), -1)); - ScannerRecord sr = new ScannerRecord(scanner); - - // store the scanner for subsequent requests - this.scanners.put(scannerid, sr); - - // set a 201 (Created) header and a Location pointing to the new - // scanner - response.setStatus(201); - response.addHeader("Location", request.getContextPath() + "/" + - pathSegments[0] + "/" + pathSegments[1] + "/" + scannerid); - response.getOutputStream().close(); - } - - /* - * Delete scanner - * @param response - * @param scannerid - * @throws IOException - */ - private void deleteScanner(final HttpServletResponse response, - final String scannerid) - throws IOException, ServletException { - ScannerRecord sr = this.scanners.remove(scannerid); - if (sr == null) { - doNotFound(response, "No such scanner"); - } else { - sr.getScanner().close(); - response.setStatus(200); - response.getOutputStream().close(); - } - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java b/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java new file mode 100644 index 000000000000..a529eb318c80 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/ScannerModel.java @@ -0,0 +1,282 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Scanner; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + */ +public class ScannerModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TableModel.class); + + public ScannerModel(HBaseConfiguration config, HBaseAdmin admin) { + super.initialize(config, admin); + } + + // + // Normal Scanner + // + protected static class ScannerMaster { + + protected static Map scannerMap = new ConcurrentHashMap(); + protected static AtomicInteger nextScannerId = new AtomicInteger(1); + + public Integer addScanner(Scanner scanner) { + Integer i = new Integer(nextScannerId.getAndIncrement()); + scannerMap.put(i, scanner); + return i; + } + + public Scanner getScanner(Integer id) { + return scannerMap.get(id); + } + + public Scanner removeScanner(Integer id) { + return scannerMap.remove(id); + } + + /** + * @param id + * id of scanner to close + */ + public void scannerClose(Integer id) { + Scanner s = scannerMap.remove(id); + s.close(); + } + } + + protected static ScannerMaster scannerMaster = new ScannerMaster(); + + /** + * returns the next numResults RowResults from the Scaner mapped to Integer + * id. If the end of the table is reached, the scanner is closed and all + * succesfully retrieved rows are returned. + * + * @param id + * id target scanner is mapped to. + * @param numRows + * number of results to return. + * @return all successfully retrieved rows. + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public RowResult[] scannerGet(Integer id, Long numRows) + throws HBaseRestException { + try { + ArrayList a; + Scanner s; + RowResult r; + + a = new ArrayList(); + s = scannerMaster.getScanner(id); + + if (s == null) { + throw new HBaseRestException("ScannerId: " + id + + " is unavailable. Please create a new scanner"); + } + + for (int i = 0; i < numRows; i++) { + if ((r = s.next()) != null) { + a.add(r); + } else { + scannerMaster.scannerClose(id); + break; + } + } + + return a.toArray(new RowResult[0]); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + /** + * Returns all rows inbetween the scanners current position and the end of the + * table. + * + * @param id + * id of scanner to use + * @return all rows till end of table + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public RowResult[] scannerGet(Integer id) throws HBaseRestException { + try { + ArrayList a; + Scanner s; + RowResult r; + + a = new ArrayList(); + s = scannerMaster.getScanner(id); + + while ((r = s.next()) != null) { + a.add(r); + } + + scannerMaster.scannerClose(id); + + return a.toArray(new RowResult[0]); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public boolean scannerClose(Integer id) throws HBaseRestException { + Scanner s = scannerMaster.removeScanner(id); + + if (s == null) { + throw new HBaseRestException("Scanner id: " + id + " does not exist"); + } + return true; + } + + // Scanner Open Methods + // No Columns + public ScannerIdentifier scannerOpen(byte[] tableName) + throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName)); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, long timestamp) + throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), timestamp); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow) + throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow, + long timestamp) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow, timestamp); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, long timestamp, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), timestamp, filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow, filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[] startRow, + long timestamp, RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, getColumns(tableName), startRow, timestamp, + filter); + } + + // With Columns + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, HConstants.EMPTY_START_ROW, timestamp))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns) + throws HBaseRestException { + return scannerOpen(tableName, columns, HConstants.LATEST_TIMESTAMP); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow, long timestamp) throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, startRow, timestamp))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow) throws HBaseRestException { + return scannerOpen(tableName, columns, startRow, + HConstants.LATEST_TIMESTAMP); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + long timestamp, RowFilterInterface filter) throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, HConstants.EMPTY_START_ROW, timestamp, filter))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, columns, HConstants.LATEST_TIMESTAMP, filter); + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow, long timestamp, RowFilterInterface filter) + throws HBaseRestException { + try { + HTable table; + table = new HTable(tableName); + return new ScannerIdentifier(scannerMaster.addScanner(table.getScanner( + columns, startRow, timestamp, filter))); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public ScannerIdentifier scannerOpen(byte[] tableName, byte[][] columns, + byte[] startRow, RowFilterInterface filter) throws HBaseRestException { + return scannerOpen(tableName, columns, startRow, + HConstants.LATEST_TIMESTAMP, filter); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/Status.java b/src/java/org/apache/hadoop/hbase/rest/Status.java new file mode 100644 index 000000000000..9cc5e8572b40 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/Status.java @@ -0,0 +1,256 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.HashMap; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; +import org.apache.hadoop.hbase.util.Bytes; + +import agilejson.TOJSON; + +public class Status { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(Status.class); + + public static final HashMap statNames = new HashMap(); + + static { + statNames.put(HttpServletResponse.SC_CONTINUE, "continue"); + statNames.put(HttpServletResponse.SC_SWITCHING_PROTOCOLS, + "switching protocols"); + statNames.put(HttpServletResponse.SC_OK, "ok"); + statNames.put(HttpServletResponse.SC_CREATED, "created"); + statNames.put(HttpServletResponse.SC_ACCEPTED, "accepted"); + statNames.put(HttpServletResponse.SC_NON_AUTHORITATIVE_INFORMATION, + "non-authoritative information"); + statNames.put(HttpServletResponse.SC_NO_CONTENT, "no content"); + statNames.put(HttpServletResponse.SC_RESET_CONTENT, "reset content"); + statNames.put(HttpServletResponse.SC_PARTIAL_CONTENT, "partial content"); + statNames.put(HttpServletResponse.SC_MULTIPLE_CHOICES, "multiple choices"); + statNames + .put(HttpServletResponse.SC_MOVED_PERMANENTLY, "moved permanently"); + statNames + .put(HttpServletResponse.SC_MOVED_TEMPORARILY, "moved temporarily"); + statNames.put(HttpServletResponse.SC_FOUND, "found"); + statNames.put(HttpServletResponse.SC_SEE_OTHER, "see other"); + statNames.put(HttpServletResponse.SC_NOT_MODIFIED, "not modified"); + statNames.put(HttpServletResponse.SC_USE_PROXY, "use proxy"); + statNames.put(HttpServletResponse.SC_TEMPORARY_REDIRECT, + "temporary redirect"); + statNames.put(HttpServletResponse.SC_BAD_REQUEST, "bad request"); + statNames.put(HttpServletResponse.SC_UNAUTHORIZED, "unauthorized"); + statNames.put(HttpServletResponse.SC_FORBIDDEN, "forbidden"); + statNames.put(HttpServletResponse.SC_NOT_FOUND, "not found"); + statNames.put(HttpServletResponse.SC_METHOD_NOT_ALLOWED, + "method not allowed"); + statNames.put(HttpServletResponse.SC_NOT_ACCEPTABLE, "not acceptable"); + statNames.put(HttpServletResponse.SC_PROXY_AUTHENTICATION_REQUIRED, + "proxy authentication required"); + statNames.put(HttpServletResponse.SC_REQUEST_TIMEOUT, "request timeout"); + statNames.put(HttpServletResponse.SC_CONFLICT, "conflict"); + statNames.put(HttpServletResponse.SC_GONE, "gone"); + statNames.put(HttpServletResponse.SC_LENGTH_REQUIRED, "length required"); + statNames.put(HttpServletResponse.SC_PRECONDITION_FAILED, + "precondition failed"); + statNames.put(HttpServletResponse.SC_REQUEST_ENTITY_TOO_LARGE, + "request entity too large"); + statNames.put(HttpServletResponse.SC_REQUEST_URI_TOO_LONG, + "request uri too long"); + statNames.put(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, + "unsupported media type"); + statNames.put(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE, + "requested range not satisfiable"); + statNames.put(HttpServletResponse.SC_EXPECTATION_FAILED, + "expectation failed"); + statNames.put(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, + "internal server error"); + statNames.put(HttpServletResponse.SC_NOT_IMPLEMENTED, "not implemented"); + statNames.put(HttpServletResponse.SC_BAD_GATEWAY, "bad gateway"); + statNames.put(HttpServletResponse.SC_SERVICE_UNAVAILABLE, + "service unavailable"); + statNames.put(HttpServletResponse.SC_GATEWAY_TIMEOUT, "gateway timeout"); + statNames.put(HttpServletResponse.SC_HTTP_VERSION_NOT_SUPPORTED, + "http version not supported"); + } + protected int statusCode; + protected HttpServletResponse response; + protected Object message; + protected IRestSerializer serializer; + protected byte[][] pathSegments; + + public int getStatusCode() { + return statusCode; + } + + @TOJSON + public Object getMessage() { + return message; + } + + public static class StatusMessage implements ISerializable { + int statusCode; + boolean error; + Object reason; + + public StatusMessage(int statusCode, boolean error, Object o) { + this.statusCode = statusCode; + this.error = error; + reason = o; + } + + @TOJSON + public int getStatusCode() { + return statusCode; + } + + @TOJSON + public boolean getError() { + return error; + } + + @TOJSON + public Object getMessage() { + return reason; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML(org.apache.hadoop.hbase + * .rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeStatusMessage(this); + } + } + + public Status(HttpServletResponse r, IRestSerializer serializer, byte[][] bs) { + this.setOK(); + this.response = r; + this.serializer = serializer; + this.pathSegments = bs; + } + + // Good Messages + public void setOK() { + this.statusCode = HttpServletResponse.SC_OK; + this.message = new StatusMessage(HttpServletResponse.SC_OK, false, "success"); + } + + public void setOK(Object message) { + this.statusCode = HttpServletResponse.SC_OK; + this.message = message; + } + + public void setAccepted() { + this.statusCode = HttpServletResponse.SC_ACCEPTED; + this.message = new StatusMessage(HttpServletResponse.SC_ACCEPTED, false, "success"); + } + + public void setExists(boolean error) { + this.statusCode = HttpServletResponse.SC_CONFLICT; + this.message = new StatusMessage(statusCode, error, "table already exists"); + } + + public void setCreated() { + this.statusCode = HttpServletResponse.SC_CREATED; + this.setOK(); + } + + public void setScannerCreated(ScannerIdentifier scannerIdentifier) { + this.statusCode = HttpServletResponse.SC_OK; + this.message = scannerIdentifier; + response.addHeader("Location", "/" + Bytes.toString(pathSegments[0]) + + "/scanner/" + scannerIdentifier.getId()); + } + // Bad Messages + + public void setInternalError(Exception e) { + this.statusCode = HttpServletResponse.SC_INTERNAL_SERVER_ERROR; + this.message = new StatusMessage(statusCode, true, e); + } + + public void setNoQueryResults() { + this.statusCode = HttpServletResponse.SC_NOT_FOUND; + this.message = new StatusMessage(statusCode, true, "no query results"); + } + + public void setConflict(Object message) { + this.statusCode = HttpServletResponse.SC_CONFLICT; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setNotFound(Object message) { + this.statusCode = HttpServletResponse.SC_NOT_FOUND; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setBadRequest(Object message) { + this.statusCode = HttpServletResponse.SC_BAD_REQUEST; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setNotFound() { + setNotFound("Unable to find requested URI"); + } + + public void setMethodNotImplemented() { + this.statusCode = HttpServletResponse.SC_METHOD_NOT_ALLOWED; + this.message = new StatusMessage(statusCode, true, "method not implemented"); + } + + public void setInvalidURI() { + setInvalidURI("Invalid URI"); + } + + public void setInvalidURI(Object message) { + this.statusCode = HttpServletResponse.SC_BAD_REQUEST; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setUnsupportedMediaType(Object message) { + this.statusCode = HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE; + this.message = new StatusMessage(statusCode, true, message); + } + + public void setGone() { + this.statusCode = HttpServletResponse.SC_GONE; + this.message = new StatusMessage(statusCode, true, "item no longer available"); + } + + + // Utility + public void respond() throws HBaseRestException { + response.setStatus(this.statusCode); + this.serializer.writeOutput(this.message); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/TableController.java b/src/java/org/apache/hadoop/hbase/rest/TableController.java new file mode 100644 index 000000000000..54866f21d706 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TableController.java @@ -0,0 +1,170 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.ArrayList; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public class TableController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TableController.class); + + protected TableModel getModel() { + return (TableModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) { + return new TableModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TableModel innerModel = getModel(); + + byte[] tableName; + + tableName = pathSegments[0]; + if (pathSegments.length < 2) { + s.setOK(innerModel.getTableMetadata(Bytes.toString(tableName))); + } else { + if (Bytes.toString(pathSegments[1]).toLowerCase().equals(REGIONS)) { + s.setOK(innerModel.getTableRegions(Bytes.toString(tableName))); + } else { + s.setBadRequest("unknown query."); + } + } + s.respond(); + } + + /* + * (non-Javadoc) + * + * @param input column descriptor JSON. Should be of the form:
+   * {"column_families":[ { "name":STRING, "bloomfilter":BOOLEAN,
+   * "max_versions":INTEGER, "compression_type":STRING, "in_memory":BOOLEAN,
+   * "block_cache_enabled":BOOLEAN, "max_value_length":INTEGER,
+   * "time_to_live":INTEGER ]} 
If any of the json object fields (except + * name) are not included the default values will be included instead. The + * default values are:
 bloomfilter => false max_versions => 3
+   * compression_type => NONE in_memory => false block_cache_enabled => false
+   * max_value_length => 2147483647 time_to_live => Integer.MAX_VALUE 
+ * + * @see + * org.apache.hadoop.hbase.rest.AbstractController#post(org.apache.hadoop. + * hbase.rest.Status, byte[][], java.util.Map, byte[], + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser) + */ + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + TableModel innerModel = getModel(); + + byte[] tableName; + + if (pathSegments.length == 0) { + // If no input, we don't know columnfamily schema, so send + // no data + if (input.length == 0) { + s.setBadRequest("no data send with post request"); + } else { + HTableDescriptor htd = parser.getTableDescriptor(input); + // Send to innerModel. If iM returns false, means the + // table already exists so return conflict. + if (!innerModel.post(htd.getName(), htd)) { + s.setConflict("table already exists"); + } else { + // Otherwise successfully created table. Return "created":true + s.setCreated(); + } + } + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.ENABLE)) { + tableName = pathSegments[0]; + innerModel.enableTable(tableName); + s.setAccepted(); + } else if (Bytes.toString(pathSegments[1]).toLowerCase().equals( + RESTConstants.DISABLE)) { + tableName = pathSegments[0]; + innerModel.disableTable(tableName); + s.setAccepted(); + } else { + s.setBadRequest("Unknown Query."); + } + s.respond(); + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + if (pathSegments.length != 1) { + s.setBadRequest("must specifify the name of the table"); + s.respond(); + } else if (queryMap.size() > 0) { + s + .setBadRequest("no query string should be specified when updating a table"); + s.respond(); + } else { + ArrayList newColumns = parser + .getColumnDescriptors(input); + byte[] tableName = pathSegments[0]; + getModel().updateTable(Bytes.toString(tableName), newColumns); + s.setOK(); + s.respond(); + } + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TableModel innerModel = getModel(); + + byte[] tableName; + + tableName = pathSegments[0]; + + if (pathSegments.length == 1) { + if (!innerModel.delete(tableName)) { + s.setBadRequest("table does not exist"); + } else { + s.setAccepted(); + } + s.respond(); + } else { + + } + } + +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/rest/TableHandler.java b/src/java/org/apache/hadoop/hbase/rest/TableHandler.java index f5a111fcf281..e69de29bb2d1 100644 --- a/src/java/org/apache/hadoop/hbase/rest/TableHandler.java +++ b/src/java/org/apache/hadoop/hbase/rest/TableHandler.java @@ -1,416 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.rest; - -import java.io.IOException; -import java.io.PrintWriter; - -import javax.servlet.ServletException; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; - -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.util.Bytes; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import org.w3c.dom.Node; -import org.w3c.dom.NodeList; -import org.znerd.xmlenc.XMLOutputter; - - -/** - * TableHandler fields all requests that deal with an individual table. - * That means all requests that start with /api/[table_name]/... go to - * this handler. - */ -public class TableHandler extends GenericHandler { - public static final String DISABLE = "disable"; - public static final String ENABLE = "enable"; - - public TableHandler(HBaseConfiguration conf, HBaseAdmin admin) - throws ServletException{ - super(conf, admin); - } - - public void doGet(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - // if it's just table name, return the metadata - if (pathSegments.length == 1) { - getTableMetadata(request, response, pathSegments[0]); - } - else{ - HTable table = getTable(pathSegments[0]); - if (pathSegments[1].toLowerCase().equals(REGIONS)) { - // get a region list - getTableRegions(table, request, response); - } - else{ - doNotFound(response, "Not handled in TableHandler"); - } - } - } - - public void doPost(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - if (pathSegments.length == 0 || pathSegments[0].length() <= 0) { - // if it's a creation operation - putTable(request, response, pathSegments); - } else { - // if it's a disable operation or enable operation - String tableName = pathSegments[0]; - if (pathSegments[1].toLowerCase().equals(DISABLE)) { - admin.disableTable(tableName); - } else if (pathSegments[1].toLowerCase().equals(ENABLE)) { - admin.enableTable(tableName); - } - response.setStatus(202); - } - } - - public void doPut(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - updateTable(request, response, pathSegments); - } - - public void doDelete(HttpServletRequest request, HttpServletResponse response, - String[] pathSegments) - throws ServletException, IOException { - deleteTable(request, response, pathSegments); - } - - /* - * Return region offsets. - * @param request - * @param response - */ - private void getTableRegions(HTable table, final HttpServletRequest request, - final HttpServletResponse response) - throws IOException { - // Presumption is that this.table has already been focused on target table. - byte [][] startKeys = table.getStartKeys(); - // Presumption is that this.table has already been set against target table - switch (ContentType.getContentType(request.getHeader(ACCEPT))) { - case XML: - setResponseHeader(response, startKeys.length > 0? 200: 204, - ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag("regions"); - for (int i = 0; i < startKeys.length; i++) { - doElement(outputter, "region", Bytes.toString(startKeys[i])); - } - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - break; - case PLAIN: - setResponseHeader(response, startKeys.length > 0? 200: 204, - ContentType.PLAIN.toString()); - PrintWriter out = response.getWriter(); - for (int i = 0; i < startKeys.length; i++) { - // TODO: Add in the server location. Is it needed? - out.print(Bytes.toString(startKeys[i])); - } - out.close(); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - /* - * Get table metadata. - * @param request - * @param response - * @param tableName - * @throws IOException - */ - private void getTableMetadata(final HttpServletRequest request, - final HttpServletResponse response, final String tableName) - throws IOException { - HTableDescriptor [] tables = this.admin.listTables(); - HTableDescriptor descriptor = null; - for (int i = 0; i < tables.length; i++) { - if (Bytes.toString(tables[i].getName()).equals(tableName)) { - descriptor = tables[i]; - break; - } - } - if (descriptor == null) { - doNotFound(response, "Table not found!"); - } else { - // Presumption is that this.table has already been set against target table - ContentType type = ContentType.getContentType(request.getHeader(ACCEPT)); - switch (type) { - case XML: - setResponseHeader(response, 200, ContentType.XML.toString()); - XMLOutputter outputter = getXMLOutputter(response.getWriter()); - outputter.startTag("table"); - doElement(outputter, "name", Bytes.toString(descriptor.getName())); - outputter.startTag("columnfamilies"); - for (HColumnDescriptor e: descriptor.getFamilies()) { - outputter.startTag("columnfamily"); - doElement(outputter, "name", Bytes.toString(e.getName())); - doElement(outputter, "compression", e.getCompression().toString()); - doElement(outputter, "bloomfilter", - Boolean.toString(e.isBloomfilter())); - doElement(outputter, "max-versions", - Integer.toString(e.getMaxVersions())); - doElement(outputter, "maximum-cell-size", - Integer.toString(e.getMaxValueLength())); - outputter.endTag(); - } - outputter.endTag(); - outputter.endTag(); - outputter.endDocument(); - outputter.getWriter().close(); - break; - case PLAIN: - setResponseHeader(response, 200, ContentType.PLAIN.toString()); - PrintWriter out = response.getWriter(); - out.print(descriptor.toString()); - out.close(); - break; - case MIME: - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - } - - private void putTable(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws IOException, ServletException { - switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) { - case XML: - putTableXml(request, response, pathSegments); - break; - case MIME: - doNotAcceptable(response, "Don't support multipart/related yet..."); - break; - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - - private void updateTable(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws IOException, ServletException { - switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) { - case XML: - updateTableXml(request, response, pathSegments); - break; - case MIME: - doNotAcceptable(response, "Don't support multipart/related yet..."); - break; - default: - doNotAcceptable(response, "Unsupported Accept Header Content: " + - request.getHeader(CONTENT_TYPE)); - } - } - - private void deleteTable(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) - throws ServletException { - try { - String tableName = pathSegments[0]; - String[] column_params = request.getParameterValues(COLUMN); - if (column_params != null && column_params.length > 0) { - for (String column : column_params) { - admin.deleteColumn(tableName, makeColumnName(column)); - } - } else { - admin.deleteTable(tableName); - } - response.setStatus(202); - } catch (Exception e) { - throw new ServletException(e); - } - } - - private void putTableXml(HttpServletRequest - request, HttpServletResponse response, String[] pathSegments) - throws IOException, ServletException { - DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory - .newInstance(); - // ignore all comments inside the xml file - docBuilderFactory.setIgnoringComments(true); - - DocumentBuilder builder = null; - Document doc = null; - - try { - builder = docBuilderFactory.newDocumentBuilder(); - doc = builder.parse(request.getInputStream()); - } catch (javax.xml.parsers.ParserConfigurationException e) { - throw new ServletException(e); - } catch (org.xml.sax.SAXException e) { - throw new ServletException(e); - } - - try { - Node name_node = doc.getElementsByTagName("name").item(0); - String table_name = name_node.getFirstChild().getNodeValue(); - - HTableDescriptor htd = new HTableDescriptor(table_name); - NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); - for (int i = 0; i < columnfamily_nodes.getLength(); i++) { - Element columnfamily = (Element)columnfamily_nodes.item(i); - htd.addFamily(putColumnFamilyXml(columnfamily)); - } - admin.createTable(htd); - } catch (Exception e) { - throw new ServletException(e); - } - } - - private void updateTableXml(HttpServletRequest request, - HttpServletResponse response, String[] pathSegments) throws IOException, - ServletException { - DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory - .newInstance(); - // ignore all comments inside the xml file - docBuilderFactory.setIgnoringComments(true); - - DocumentBuilder builder = null; - Document doc = null; - - try { - builder = docBuilderFactory.newDocumentBuilder(); - doc = builder.parse(request.getInputStream()); - } catch (javax.xml.parsers.ParserConfigurationException e) { - throw new ServletException(e); - } catch (org.xml.sax.SAXException e) { - throw new ServletException(e); - } - - try { - String tableName = pathSegments[0]; - HTableDescriptor htd = admin.getTableDescriptor(tableName); - - NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); - - for (int i = 0; i < columnfamily_nodes.getLength(); i++) { - Element columnfamily = (Element) columnfamily_nodes.item(i); - HColumnDescriptor hcd = putColumnFamilyXml(columnfamily, htd); - if (htd.hasFamily(Bytes.toBytes(hcd.getNameAsString()))) { - admin.modifyColumn(tableName, hcd.getNameAsString(), hcd); - } else { - admin.addColumn(tableName, hcd); - } - } - } catch (Exception e) { - throw new ServletException(e); - } - } - - private HColumnDescriptor putColumnFamilyXml(Element columnfamily) { - return putColumnFamilyXml(columnfamily, null); - } - - private HColumnDescriptor putColumnFamilyXml(Element columnfamily, HTableDescriptor currentTDesp) { - Node name_node = columnfamily.getElementsByTagName("name").item(0); - String colname = makeColumnName(name_node.getFirstChild().getNodeValue()); - - int max_versions = HColumnDescriptor.DEFAULT_VERSIONS; - CompressionType compression = HColumnDescriptor.DEFAULT_COMPRESSION; - boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY; - boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE; - int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH; - int ttl = HColumnDescriptor.DEFAULT_TTL; - boolean bloomfilter = HColumnDescriptor.DEFAULT_BLOOMFILTER; - - if (currentTDesp != null) { - HColumnDescriptor currentCDesp = currentTDesp.getFamily(Bytes.toBytes(colname)); - if (currentCDesp != null) { - max_versions = currentCDesp.getMaxVersions(); - compression = currentCDesp.getCompression(); - in_memory = currentCDesp.isInMemory(); - block_cache = currentCDesp.isBlockCacheEnabled(); - max_cell_size = currentCDesp.getMaxValueLength(); - ttl = currentCDesp.getTimeToLive(); - bloomfilter = currentCDesp.isBloomfilter(); - } - } - - NodeList max_versions_list = columnfamily.getElementsByTagName("max-versions"); - if (max_versions_list.getLength() > 0) { - max_versions = Integer.parseInt(max_versions_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList compression_list = columnfamily.getElementsByTagName("compression"); - if (compression_list.getLength() > 0) { - compression = CompressionType.valueOf(compression_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList in_memory_list = columnfamily.getElementsByTagName("in-memory"); - if (in_memory_list.getLength() > 0) { - in_memory = Boolean.valueOf(in_memory_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList block_cache_list = columnfamily.getElementsByTagName("block-cache"); - if (block_cache_list.getLength() > 0) { - block_cache = Boolean.valueOf(block_cache_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList max_cell_size_list = columnfamily.getElementsByTagName("max-cell-size"); - if (max_cell_size_list.getLength() > 0) { - max_cell_size = Integer.valueOf(max_cell_size_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList ttl_list = columnfamily.getElementsByTagName("time-to-live"); - if (ttl_list.getLength() > 0) { - ttl = Integer.valueOf(ttl_list.item(0).getFirstChild().getNodeValue()); - } - - NodeList bloomfilter_list = columnfamily.getElementsByTagName("bloomfilter"); - if (bloomfilter_list.getLength() > 0) { - bloomfilter = Boolean.valueOf(bloomfilter_list.item(0).getFirstChild().getNodeValue()); - } - - HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(colname), max_versions, - compression, in_memory, block_cache, max_cell_size, ttl, bloomfilter); - - NodeList metadataList = columnfamily.getElementsByTagName("metadata"); - for (int i = 0; i < metadataList.getLength(); i++) { - Element metadataColumn = (Element)metadataList.item(i); - // extract the name and value children - Node mname_node = metadataColumn.getElementsByTagName("name").item(0); - String mname = mname_node.getFirstChild().getNodeValue(); - Node mvalue_node = metadataColumn.getElementsByTagName("value").item(0); - String mvalue = mvalue_node.getFirstChild().getNodeValue(); - hcd.setValue(mname, mvalue); - } - - return hcd; - } -} diff --git a/src/java/org/apache/hadoop/hbase/rest/TableModel.java b/src/java/org/apache/hadoop/hbase/rest/TableModel.java new file mode 100644 index 000000000000..2202474fd199 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TableModel.java @@ -0,0 +1,280 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Scanner; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; +import org.apache.hadoop.hbase.util.Bytes; + +import agilejson.TOJSON; + +public class TableModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TableModel.class); + + public TableModel(HBaseConfiguration config, HBaseAdmin admin) { + super.initialize(config, admin); + } + + // Get Methods + public RowResult[] get(byte[] tableName) throws HBaseRestException { + return get(tableName, getColumns(tableName)); + } + + /** + * Returns all cells from all rows from the given table in the given columns. + * The output is in the order that the columns are given. + * + * @param tableName + * table name + * @param columnNames + * column names + * @return resultant rows + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public RowResult[] get(byte[] tableName, byte[][] columnNames) + throws HBaseRestException { + try { + ArrayList a = new ArrayList(); + HTable table = new HTable(tableName); + + Scanner s = table.getScanner(columnNames); + RowResult r; + + while ((r = s.next()) != null) { + a.add(r); + } + + return a.toArray(new RowResult[0]); + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + protected boolean doesTableExist(byte[] tableName) throws HBaseRestException { + try { + return this.admin.tableExists(tableName); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + protected void disableTable(byte[] tableName) throws HBaseRestException { + try { + this.admin.disableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("IOException disabling table", e); + } + } + + protected void enableTable(byte[] tableName) throws HBaseRestException { + try { + this.admin.enableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("IOException enabiling table", e); + } + } + + public boolean updateTable(String tableName, + ArrayList columns) throws HBaseRestException { + HTableDescriptor htc = null; + try { + htc = this.admin.getTableDescriptor(tableName); + } catch (IOException e) { + throw new HBaseRestException("Table does not exist"); + } + + for (HColumnDescriptor column : columns) { + if (htc.hasFamily(Bytes.toBytes(column.getNameAsString()))) { + try { + this.admin.disableTable(tableName); + this.admin.modifyColumn(tableName, column.getNameAsString(), column); + this.admin.enableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("unable to modify column " + + column.getNameAsString(), e); + } + } else { + try { + this.admin.disableTable(tableName); + this.admin.addColumn(tableName, column); + this.admin.enableTable(tableName); + } catch (IOException e) { + throw new HBaseRestException("unable to add column " + + column.getNameAsString(), e); + } + } + } + + return true; + + } + + /** + * Get table metadata. + * + * @param request + * @param response + * @param tableName + * @throws IOException + */ + public HTableDescriptor getTableMetadata(final String tableName) + throws HBaseRestException { + HTableDescriptor descriptor = null; + try { + HTableDescriptor[] tables = this.admin.listTables(); + for (int i = 0; i < tables.length; i++) { + if (Bytes.toString(tables[i].getName()).equals(tableName)) { + descriptor = tables[i]; + break; + } + } + if (descriptor == null) { + + } else { + return descriptor; + } + } catch (IOException e) { + throw new HBaseRestException("error processing request."); + } + return descriptor; + } + + /** + * Return region offsets. + * + * @param request + * @param response + */ + public Regions getTableRegions(final String tableName) + throws HBaseRestException { + try { + HTable table = new HTable(this.conf, tableName); + // Presumption is that this.table has already been focused on target + // table. + Regions regions = new Regions(table.getStartKeys()); + // Presumption is that this.table has already been set against target + // table + return regions; + } catch (IOException e) { + throw new HBaseRestException("Unable to get regions from table"); + } + } + + // Post Methods + /** + * Creates table tableName described by the json in input. + * + * @param tableName + * table name + * @param htd + * HBaseTableDescriptor for the table to be created + * + * @return true if operation does not fail due to a table with the given + * tableName not existing. + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public boolean post(byte[] tableName, HTableDescriptor htd) + throws HBaseRestException { + try { + if (!this.admin.tableExists(tableName)) { + this.admin.createTable(htd); + return true; + } + } catch (IOException e) { + throw new HBaseRestException(e); + } + return false; + } + + /** + * Deletes table tableName + * + * @param tableName + * name of the table. + * @return true if table exists and deleted, false if table does not exist. + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + public boolean delete(byte[] tableName) throws HBaseRestException { + try { + if (this.admin.tableExists(tableName)) { + this.admin.disableTable(tableName); + this.admin.deleteTable(tableName); + return true; + } + return false; + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + public class Regions implements ISerializable { + byte[][] regionKey; + + public Regions(byte[][] bs) { + super(); + this.regionKey = bs; + } + + @SuppressWarnings("unused") + private Regions() { + } + + /** + * @return the regionKey + */ + @TOJSON(fieldName = "region") + public byte[][] getRegionKey() { + return regionKey; + } + + /** + * @param regionKey + * the regionKey to set + */ + public void setRegionKey(byte[][] regionKey) { + this.regionKey = regionKey; + } + + /* + * (non-Javadoc) + * + * @see org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML() + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeRegionData(this); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/TimestampController.java b/src/java/org/apache/hadoop/hbase/rest/TimestampController.java new file mode 100644 index 000000000000..d43b1bb6eb49 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TimestampController.java @@ -0,0 +1,139 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.parser.IHBaseRestParser; +import org.apache.hadoop.hbase.util.Bytes; + +public class TimestampController extends AbstractController { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TimestampController.class); + + protected TimestampModel getModel() { + return (TimestampModel) model; + } + + @Override + protected AbstractModel generateModel( + @SuppressWarnings("hiding") HBaseConfiguration conf, HBaseAdmin admin) { + return new TimestampModel(conf, admin); + } + + @Override + public void get(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TimestampModel innerModel = getModel(); + + byte[] tableName; + byte[] rowName; + long timestamp; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + timestamp = Bytes.toLong(pathSegments[3]); + + if (queryMap.size() == 0) { + s.setOK(innerModel.get(tableName, rowName, timestamp)); + } else { + // get the column names if any were passed in + String[] column_params = queryMap.get(RESTConstants.COLUMN); + byte[][] columns = null; + + if (column_params != null && column_params.length > 0) { + List available_columns = new ArrayList(); + for (String column_param : column_params) { + available_columns.add(column_param); + } + columns = Bytes.toByteArrays(available_columns.toArray(new String[0])); + } + s.setOK(innerModel.get(tableName, rowName, columns, timestamp)); + } + s.respond(); + } + + @Override + public void post(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + TimestampModel innerModel = getModel(); + + byte[] tableName; + byte[] rowName; + byte[] columnName; + long timestamp; + + tableName = pathSegments[0]; + rowName = pathSegments[1]; + columnName = pathSegments[2]; + timestamp = Bytes.toLong(pathSegments[3]); + + try { + if (queryMap.size() == 0) { + innerModel.post(tableName, rowName, columnName, timestamp, input); + s.setOK(); + } else { + s.setUnsupportedMediaType("Unknown Query."); + } + } catch (HBaseRestException e) { + s.setUnsupportedMediaType(e.getMessage()); + } + s.respond(); + } + + @Override + public void put(Status s, byte[][] pathSegments, + Map queryMap, byte[] input, IHBaseRestParser parser) + throws HBaseRestException { + throw new UnsupportedOperationException("Not supported yet."); + } + + @Override + public void delete(Status s, byte[][] pathSegments, + Map queryMap) throws HBaseRestException { + TimestampModel innerModel = getModel(); + + byte[] tableName; + byte[] rowName; + long timestamp; + + tableName = pathSegments[0]; + rowName = pathSegments[2]; + timestamp = Bytes.toLong(pathSegments[3]); + + if (queryMap.size() == 0) { + innerModel.delete(tableName, rowName, timestamp); + } else { + innerModel.delete(tableName, rowName, this + .getColumnsFromQueryMap(queryMap), timestamp); + } + s.setAccepted(); + s.respond(); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java b/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java new file mode 100644 index 000000000000..65dff21752dd --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/TimestampModel.java @@ -0,0 +1,126 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +public class TimestampModel extends AbstractModel { + + @SuppressWarnings("unused") + private Log LOG = LogFactory.getLog(TimestampModel.class); + + public TimestampModel(HBaseConfiguration conf, HBaseAdmin admin) { + super.initialize(conf, admin); + } + + public void delete(byte[] tableName, byte[] rowName, long timestamp) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + table.deleteAll(rowName, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void delete(byte[] tableName, byte[] rowName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + for (byte[] column : columns) { + table.deleteAll(rowName, column, timestamp); + } + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public Cell get(byte[] tableName, byte[] rowName, byte[] columnName, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.get(rowName, columnName, timestamp, 1)[0]; + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public Cell[] get(byte[] tableName, byte[] rowName, byte[] columnName, + long timestamp, int numVersions) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.get(rowName, columnName, timestamp, numVersions); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public RowResult get(byte[] tableName, byte[] rowName, byte[][] columns, + long timestamp) throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, columns, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + /** + * @param tableName + * @param rowName + * @param timestamp + * @return + * @throws HBaseRestException + */ + public RowResult get(byte[] tableName, byte[] rowName, long timestamp) + throws HBaseRestException { + try { + HTable table = new HTable(tableName); + return table.getRow(rowName, timestamp); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } + + public void post(byte[] tableName, byte[] rowName, byte[] columnName, + long timestamp, byte[] value) throws HBaseRestException { + try { + HTable table; + BatchUpdate b; + + table = new HTable(tableName); + b = new BatchUpdate(rowName, timestamp); + + b.put(columnName, value); + table.commit(b); + } catch (IOException e) { + throw new HBaseRestException(e); + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java new file mode 100644 index 000000000000..1430396cbc5a --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/RestCell.java @@ -0,0 +1,103 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import org.apache.hadoop.hbase.io.Cell; + +import agilejson.TOJSON; + +/** + * + */ +public class RestCell extends Cell { + + byte[] name; + + + + /** + * + */ + public RestCell() { + super(); + // TODO Auto-generated constructor stub + } + + /** + * + */ + public RestCell(byte[] name, Cell cell) { + super(cell.getValue(), cell.getTimestamp()); + this.name = name; + } + + /** + * @param value + * @param timestamp + */ + public RestCell(byte[] value, long timestamp) { + super(value, timestamp); + // TODO Auto-generated constructor stub + } + + /** + * @param vals + * @param ts + */ + public RestCell(byte[][] vals, long[] ts) { + super(vals, ts); + // TODO Auto-generated constructor stub + } + + /** + * @param value + * @param timestamp + */ + public RestCell(String value, long timestamp) { + super(value, timestamp); + // TODO Auto-generated constructor stub + } + + /** + * @param vals + * @param ts + */ + public RestCell(String[] vals, long[] ts) { + super(vals, ts); + // TODO Auto-generated constructor stub + } + + /** + * @return the name + */ + @TOJSON(base64=true) + public byte[] getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(byte[] name) { + this.name = name; + } + + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java new file mode 100644 index 000000000000..44010555fd7d --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/RowUpdateDescriptor.java @@ -0,0 +1,74 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import java.util.HashMap; +import java.util.Map; + +/** + * + */ +public class RowUpdateDescriptor { + private String tableName; + private String rowName; + private Map colvals = new HashMap(); + + public RowUpdateDescriptor(String tableName, String rowName) { + this.tableName = tableName; + this.rowName = rowName; + } + + public RowUpdateDescriptor() {} + + /** + * @return the tableName + */ + public String getTableName() { + return tableName; + } + + /** + * @param tableName the tableName to set + */ + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * @return the rowName + */ + public String getRowName() { + return rowName; + } + + /** + * @param rowName the rowName to set + */ + public void setRowName(String rowName) { + this.rowName = rowName; + } + + /** + * @return the test + */ + public Map getColVals() { + return colvals; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java new file mode 100644 index 000000000000..2cddabe44a1a --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerDescriptor.java @@ -0,0 +1,130 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +/** + * + */ +public class ScannerDescriptor { + byte[][] columns; + long timestamp; + byte[] startRow; + byte[] stopRow; + String filters; + + /** + * @param columns + * @param timestamp + * @param startRow + * @param stopRow + * @param filters + */ + public ScannerDescriptor(byte[][] columns, long timestamp, byte[] startRow, + byte[] stopRow, String filters) { + super(); + this.columns = columns; + this.timestamp = timestamp; + this.startRow = startRow; + this.stopRow = stopRow; + this.filters = filters; + + if(this.startRow == null) { + this.startRow = new byte[0]; + } + if(this.stopRow == null) { + this.stopRow = new byte[0]; + } + } + + /** + * @return the columns + */ + public byte[][] getColumns() { + return columns; + } + + /** + * @param columns + * the columns to set + */ + public void setColumns(byte[][] columns) { + this.columns = columns; + } + + /** + * @return the timestamp + */ + public long getTimestamp() { + return timestamp; + } + + /** + * @param timestamp + * the timestamp to set + */ + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + /** + * @return the startRow + */ + public byte[] getStartRow() { + return startRow; + } + + /** + * @param startRow + * the startRow to set + */ + public void setStartRow(byte[] startRow) { + this.startRow = startRow; + } + + /** + * @return the stopRow + */ + public byte[] getStopRow() { + return stopRow; + } + + /** + * @param stopRow + * the stopRow to set + */ + public void setStopRow(byte[] stopRow) { + this.stopRow = stopRow; + } + + /** + * @return the filters + */ + public String getFilters() { + return filters; + } + + /** + * @param filters + * the filters to set + */ + public void setFilters(String filters) { + this.filters = filters; + } +} \ No newline at end of file diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java new file mode 100644 index 000000000000..168472afa7b1 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/ScannerIdentifier.java @@ -0,0 +1,96 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; + +import agilejson.TOJSON; + +/** + * + */ +public class ScannerIdentifier implements ISerializable { + Integer id; + Long numRows; + + /** + * @param id + */ + public ScannerIdentifier(Integer id) { + super(); + this.id = id; + } + + /** + * @param id + * @param numRows + */ + public ScannerIdentifier(Integer id, Long numRows) { + super(); + this.id = id; + this.numRows = numRows; + } + + /** + * @return the id + */ + @TOJSON + public Integer getId() { + return id; + } + + /** + * @param id + * the id to set + */ + public void setId(Integer id) { + this.id = id; + } + + /** + * @return the numRows + */ + public Long getNumRows() { + return numRows; + } + + /** + * @param numRows + * the numRows to set + */ + public void setNumRows(Long numRows) { + this.numRows = numRows; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.xml.IOutputXML#toXML(org.apache.hadoop.hbase + * .rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeScannerIdentifier(this); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java b/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java new file mode 100644 index 000000000000..9125c807a85a --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/descriptors/TimestampsDescriptor.java @@ -0,0 +1,67 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.descriptors; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.serializer.IRestSerializer; +import org.apache.hadoop.hbase.rest.serializer.ISerializable; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * + */ +public class TimestampsDescriptor implements ISerializable { + Map timestamps = new HashMap(); + + public void add(long timestamp, byte[] tableName, byte[] rowName) { + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(tableName)); + sb.append("/row/"); + sb.append(Bytes.toString(rowName)); + sb.append('/'); + sb.append(timestamp); + + timestamps.put(timestamp, sb.toString()); + } + + /** + * @return the timestamps + */ + public Map getTimestamps() { + return timestamps; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.ISerializable#restSerialize(org + * .apache.hadoop.hbase.rest.serializer.IRestSerializer) + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException { + serializer.serializeTimestamps(this); + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java b/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java new file mode 100644 index 000000000000..a938534fd99c --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/exception/HBaseRestException.java @@ -0,0 +1,86 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.exception; + +import agilejson.TOJSON; + +public class HBaseRestException extends Exception { + + /** + * + */ + private static final long serialVersionUID = 8481585437124298646L; + private Exception innerException; + private String innerClass; + private String innerMessage; + + public HBaseRestException() { + + } + + public HBaseRestException(Exception e) throws HBaseRestException { + if (HBaseRestException.class.isAssignableFrom(e.getClass())) { + throw ((HBaseRestException) e); + } + setInnerException(e); + innerClass = e.getClass().toString(); + innerMessage = e.getMessage(); + } + + /** + * @param message + */ + public HBaseRestException(String message) { + super(message); + innerMessage = message; + } + + public HBaseRestException(String message, Exception exception) { + super(message, exception); + setInnerException(exception); + innerClass = exception.getClass().toString(); + innerMessage = message; + } + + @TOJSON + public String getInnerClass() { + return this.innerClass; + } + + @TOJSON + public String getInnerMessage() { + return this.innerMessage; + } + + /** + * @param innerException + * the innerException to set + */ + public void setInnerException(Exception innerException) { + this.innerException = innerException; + } + + /** + * @return the innerException + */ + public Exception getInnerException() { + return innerException; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java new file mode 100644 index 000000000000..7af652d4cf27 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/ColumnValueFilterFactory.java @@ -0,0 +1,66 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.ColumnValueFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * FilterFactory that constructs a ColumnValueFilter from a JSON arg String. + * Expects a Stringified JSON argument with the following form: + * + * { "column_name" : "MY_COLUMN_NAME", "compare_op" : "INSERT_COMPARE_OP_HERE", + * "value" : "MY_COMPARE_VALUE" } + * + * The current valid compare ops are: equal, greater, greater_or_equal, less, + * less_or_equal, not_equal + */ +public class ColumnValueFilterFactory implements FilterFactory { + + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException { + JSONObject innerJSON; + String columnName; + String compareOp; + String value; + + try { + innerJSON = new JSONObject(args); + } catch (JSONException e) { + throw new HBaseRestException(e); + } + + if ((columnName = innerJSON.optString(COLUMN_NAME)) == null) { + throw new MalformedFilterException(); + } + if ((compareOp = innerJSON.optString(COMPARE_OP)) == null) { + throw new MalformedFilterException(); + } + if ((value = innerJSON.optString(VALUE)) == null) { + throw new MalformedFilterException(); + } + + return new ColumnValueFilter(columnName.getBytes(), + ColumnValueFilter.CompareOp.valueOf(compareOp), value.getBytes()); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java new file mode 100644 index 000000000000..00803c12a337 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactory.java @@ -0,0 +1,71 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * Constructs Filters from JSON. Filters are defined + * as JSON Objects of the form: + * { + * "type" : "FILTER_CLASS_NAME", + * "args" : "FILTER_ARGUMENTS" + * } + * + * For Filters like WhileMatchRowFilter, + * nested Filters are supported. Just serialize a different + * filter in the form (for instance if you wanted to use WhileMatchRowFilter + * with a StopRowFilter: + * + * { + * "type" : "WhileMatchRowFilter", + * "args" : { + * "type" : "StopRowFilter", + * "args" : "ROW_KEY_TO_STOP_ON" + * } + * } + * + * For filters like RowSetFilter, nested Filters AND Filter arrays + * are supported. So for instance If one wanted to do a RegExp + * RowFilter UNIONed with a WhileMatchRowFilter(StopRowFilter), + * you would look like this: + * + * { + * "type" : "RowFilterSet", + * "args" : [ + * { + * "type" : "RegExpRowFilter", + * "args" : "MY_REGULAR_EXPRESSION" + * }, + * { + * "type" : "WhileMatchRowFilter" + * "args" : { + * "type" : "StopRowFilter" + * "args" : "MY_STOP_ROW_EXPRESSION" + * } + * } + * ] + * } + */ +public interface FilterFactory extends FilterFactoryConstants { + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactoryConstants.java b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactoryConstants.java new file mode 100644 index 000000000000..e41d0d77dc57 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/FilterFactoryConstants.java @@ -0,0 +1,41 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +public interface FilterFactoryConstants { + static String TYPE = "type"; + static String ARGUMENTS = "args"; + static String COLUMN_NAME = "column_name"; + static String COMPARE_OP = "compare_op"; + static String VALUE = "value"; + + static class MalformedFilterException extends HBaseRestException { + + public MalformedFilterException() { + } + + @Override + public String toString() { + return "malformed filter expression"; + } + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java new file mode 100644 index 000000000000..65392135c251 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/InclusiveStopRowFilterFactory.java @@ -0,0 +1,37 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.InclusiveStopRowFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * FilterFactory that construct a InclusiveStopRowFilter + * from a JSON argument String. + * + * It expects that the whole input string consists of only + * the rowKey that you wish to stop on. + */ +public class InclusiveStopRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new InclusiveStopRowFilter(Bytes.toBytes(args)); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/PageRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/PageRowFilterFactory.java new file mode 100644 index 000000000000..35b8a4d63f8e --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/PageRowFilterFactory.java @@ -0,0 +1,34 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.PageRowFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; + +/** + * Constructs a PageRowFilter from a JSON argument String. + * Expects the entire JSON argument string to consist + * of the long that is the length of the pages that you want. + */ +public class PageRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new PageRowFilter(Long.parseLong(args)); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/RegExpRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/RegExpRowFilterFactory.java new file mode 100644 index 000000000000..df72f30acdc1 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/RegExpRowFilterFactory.java @@ -0,0 +1,34 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RegExpRowFilter; +import org.apache.hadoop.hbase.filter.RowFilterInterface; + +/** + * Constructs a RegExpRowFilter from a JSON argument string. + * Expects the entire JSON arg string to consist of the + * entire regular expression to be used. + */ +public class RegExpRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new RegExpRowFilter(args); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java new file mode 100644 index 000000000000..603ad64b2d49 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/RowFilterSetFactory.java @@ -0,0 +1,115 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import java.util.HashSet; +import java.util.Set; +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.RowFilterSet; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.rest.filter.FilterFactoryConstants.MalformedFilterException; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * Constructs a RowFilterSet from a JSON argument String. + * + * Assumes that the input is a JSONArray consisting of JSON Object version of + * the filters that you wish to mash together in an AND statement. + * + * The Syntax for the individual inner filters are defined by their respective + * FilterFactory. If a filter factory for said Factory does not exist, a + * MalformedFilterJSONException will be thrown. + * + * Currently OR Statements are not supported even though at a later iteration + * they could be supported easily. + */ +public class RowFilterSetFactory implements FilterFactory { + + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException { + JSONArray filterArray; + Set set; + JSONObject filter; + + try { + filterArray = new JSONArray(args); + } catch (JSONException e) { + throw new HBaseRestException(e); + } + + // If only 1 Row, just return the row. + if (filterArray.length() == 1) { + return getRowFilter(filterArray.optJSONObject(0)); + } + + // Otherwise continue + set = new HashSet(); + + for (int i = 0; i < filterArray.length(); i++) { + + // Get FIlter Object + if ((filter = filterArray.optJSONObject(i)) == null) { + throw new MalformedFilterException(); + } + + // Add newly constructed filter to the filter set; + set.add(getRowFilter(filter)); + } + + // Put set into a RowFilterSet and return. + return new RowFilterSet(set); + } + + /** + * A refactored method that encapsulates the creation of a RowFilter given a + * JSONObject with a correct form of: { "type" : "MY_TYPE", "args" : MY_ARGS, + * } + * + * @param filter + * @return + * @throws org.apache.hadoop.hbase.rest.exception.HBaseRestException + */ + protected RowFilterInterface getRowFilter(JSONObject filter) + throws HBaseRestException { + FilterFactory f; + String filterType; + String filterArgs; + + // Get Filter's Type + if ((filterType = filter.optString(FilterFactoryConstants.TYPE)) == null) { + throw new MalformedFilterException(); + } + + // Get Filter Args + if ((filterArgs = filter.optString(FilterFactoryConstants.ARGUMENTS)) == null) { + throw new MalformedFilterException(); + } + + // Get Filter Factory for given Filter Type + if ((f = RESTConstants.filterFactories.get(filterType)) == null) { + throw new MalformedFilterException(); + } + + return f.getFilterFromJSON(filterArgs); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java new file mode 100644 index 000000000000..28caaf62f4b3 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/StopRowFilterFactory.java @@ -0,0 +1,37 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.StopRowFilter; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * FilterFactory that construct a StopRowFilter + * from an Argument String. + * + * It expects that the whole input string consists of only + * the rowKey that you wish to stop on. + */ +public class StopRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) { + return new StopRowFilter(Bytes.toBytes(args)); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java b/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java new file mode 100644 index 000000000000..bdb2a255916c --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/filter/WhileMatchRowFilterFactory.java @@ -0,0 +1,61 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.hadoop.hbase.filter.RowFilterInterface; +import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * Factory to produce WhileMatchRowFilters from JSON + * Expects as an arguement a valid JSON Object in + * String form of another RowFilterInterface. + */ +public class WhileMatchRowFilterFactory implements FilterFactory { + public RowFilterInterface getFilterFromJSON(String args) + throws HBaseRestException { + JSONObject innerFilterJSON; + FilterFactory f; + String innerFilterType; + String innerFilterArgs; + + try { + innerFilterJSON = new JSONObject(args); + } catch (JSONException e) { + throw new HBaseRestException(e); + } + + // Check if filter is correct + if ((innerFilterType = innerFilterJSON.optString(TYPE)) == null) + throw new MalformedFilterException(); + if ((innerFilterArgs = innerFilterJSON.optString(ARGUMENTS)) == null) + throw new MalformedFilterException(); + + if ((f = RESTConstants.filterFactories.get(innerFilterType)) == null) + throw new MalformedFilterException(); + + RowFilterInterface innerFilter = f.getFilterFromJSON(innerFilterArgs); + + return new WhileMatchRowFilter(innerFilter); + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java b/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java new file mode 100644 index 000000000000..8247127410ff --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/HBaseRestParserFactory.java @@ -0,0 +1,56 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hbase.rest.Dispatcher.ContentType; + +/** + * + */ +public class HBaseRestParserFactory { + + private static final Map> parserMap = + new HashMap>(); + + static { + parserMap.put(ContentType.XML, XMLRestParser.class); + parserMap.put(ContentType.JSON, JsonRestParser.class); + } + + public static IHBaseRestParser getParser(ContentType ct) { + IHBaseRestParser parser = null; + + Class clazz = parserMap.get(ct); + try { + parser = (IHBaseRestParser) clazz.newInstance(); + } catch (InstantiationException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } catch (IllegalAccessException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + return parser; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java new file mode 100644 index 000000000000..5663a15a21c8 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/IHBaseRestParser.java @@ -0,0 +1,52 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.util.ArrayList; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + */ +public interface IHBaseRestParser { + /** + * Parses a HTableDescriptor given the input array. + * + * @param input + * @return + * @throws HBaseRestException + */ + public HTableDescriptor getTableDescriptor(byte[] input) + throws HBaseRestException; + + public ArrayList getColumnDescriptors(byte[] input) + throws HBaseRestException; + + public ScannerDescriptor getScannerDescriptor(byte[] input) + throws HBaseRestException; + + public RowUpdateDescriptor getRowUpdateDescriptor(byte[] input, + byte[][] pathSegments) throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java new file mode 100644 index 000000000000..e1f1180dee58 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/JsonRestParser.java @@ -0,0 +1,235 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.util.ArrayList; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.util.Bytes; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +/** + * + */ +public class JsonRestParser implements IHBaseRestParser { + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getTableDescriptor + * (byte[]) + */ + public HTableDescriptor getTableDescriptor(byte[] input) + throws HBaseRestException { + try { + JSONObject o; + HTableDescriptor h; + JSONArray columnDescriptorArray; + o = new JSONObject(new String(input)); + columnDescriptorArray = o.getJSONArray("column_families"); + h = new HTableDescriptor(o.getString("name")); + + for (int i = 0; i < columnDescriptorArray.length(); i++) { + JSONObject json_columnDescriptor = columnDescriptorArray + .getJSONObject(i); + h.addFamily(this.getColumnDescriptor(json_columnDescriptor)); + } + return h; + } catch (Exception e) { + throw new HBaseRestException(e); + } + } + + private HColumnDescriptor getColumnDescriptor(JSONObject jsonObject) + throws JSONException { + String strTemp; + strTemp = jsonObject.getString("name"); + if (strTemp.charAt(strTemp.length() - 1) != ':') { + strTemp += ":"; + } + + byte[] name = Bytes.toBytes(strTemp); + + int maxVersions; + HColumnDescriptor.CompressionType cType; + boolean inMemory; + boolean blockCacheEnabled; + int maxValueLength; + int timeToLive; + boolean bloomfilter; + + try { + bloomfilter = jsonObject.getBoolean("bloomfilter"); + } catch (JSONException e) { + bloomfilter = false; + } + + try { + maxVersions = jsonObject.getInt("max_versions"); + } catch (JSONException e) { + maxVersions = 3; + } + + try { + cType = HColumnDescriptor.CompressionType.valueOf(jsonObject + .getString("compression_type")); + } catch (JSONException e) { + cType = HColumnDescriptor.CompressionType.NONE; + } + + try { + inMemory = jsonObject.getBoolean("in_memory"); + } catch (JSONException e) { + inMemory = false; + } + + try { + blockCacheEnabled = jsonObject.getBoolean("block_cache_enabled"); + } catch (JSONException e) { + blockCacheEnabled = false; + } + + try { + maxValueLength = jsonObject.getInt("max_value_length"); + } catch (JSONException e) { + maxValueLength = 2147483647; + } + + try { + timeToLive = jsonObject.getInt("time_to_live"); + } catch (JSONException e) { + timeToLive = Integer.MAX_VALUE; + } + + return new HColumnDescriptor(name, maxVersions, cType, inMemory, + blockCacheEnabled, maxValueLength, timeToLive, bloomfilter); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getColumnDescriptors + * (byte[]) + */ + public ArrayList getColumnDescriptors(byte[] input) + throws HBaseRestException { + ArrayList columns = new ArrayList(); + try { + JSONObject o; + JSONArray columnDescriptorArray; + o = new JSONObject(new String(input)); + columnDescriptorArray = o.getJSONArray("column_families"); + + for (int i = 0; i < columnDescriptorArray.length(); i++) { + JSONObject json_columnDescriptor = columnDescriptorArray + .getJSONObject(i); + columns.add(this.getColumnDescriptor(json_columnDescriptor)); + } + } catch (JSONException e) { + throw new HBaseRestException("Error Parsing json input", e); + } + + return columns; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getScannerDescriptor + * (byte[]) + */ + public ScannerDescriptor getScannerDescriptor(byte[] input) + throws HBaseRestException { + JSONObject scannerDescriptor; + JSONArray columnArray; + + byte[][] columns = null; + long timestamp; + byte[] startRow; + byte[] stopRow; + String filters; + + try { + scannerDescriptor = new JSONObject(new String(input)); + + columnArray = scannerDescriptor.optJSONArray(RESTConstants.COLUMNS); + timestamp = scannerDescriptor.optLong(RESTConstants.SCANNER_TIMESTAMP); + startRow = Bytes.toBytes(scannerDescriptor.optString( + RESTConstants.SCANNER_START_ROW, "")); + stopRow = Bytes.toBytes(scannerDescriptor.optString( + RESTConstants.SCANNER_STOP_ROW, "")); + filters = scannerDescriptor.optString(RESTConstants.SCANNER_FILTER); + + if (columnArray != null) { + columns = new byte[columnArray.length()][]; + for (int i = 0; i < columnArray.length(); i++) { + columns[i] = Bytes.toBytes(columnArray.optString(i)); + } + } + + return new ScannerDescriptor(columns, timestamp, startRow, stopRow, + filters); + } catch (JSONException e) { + throw new HBaseRestException("error parsing json string", e); + } + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getRowUpdateDescriptor + * (byte[], byte[][]) + */ + public RowUpdateDescriptor getRowUpdateDescriptor(byte[] input, + byte[][] pathSegments) throws HBaseRestException { + + RowUpdateDescriptor rud = new RowUpdateDescriptor(); + JSONArray a; + + rud.setTableName(Bytes.toString(pathSegments[0])); + rud.setRowName(Bytes.toString(pathSegments[2])); + + try { + JSONObject updateObject = new JSONObject(new String(input)); + a = updateObject.getJSONArray(RESTConstants.COLUMNS); + for (int i = 0; i < a.length(); i++) { + rud.getColVals().put( + Bytes.toBytes(a.getJSONObject(i).getString(RESTConstants.NAME)), + org.apache.hadoop.hbase.util.Base64.decode(a.getJSONObject(i) + .getString(RESTConstants.VALUE))); + } + } catch (JSONException e) { + throw new HBaseRestException("Error parsing row update json", e); + } + return rud; + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java b/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java new file mode 100644 index 000000000000..a8037e85b5fd --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/parser/XMLRestParser.java @@ -0,0 +1,291 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.parser; + +import java.io.ByteArrayInputStream; +import java.util.ArrayList; + +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType; +import org.apache.hadoop.hbase.rest.RESTConstants; +import org.apache.hadoop.hbase.rest.descriptors.RowUpdateDescriptor; +import org.apache.hadoop.hbase.rest.descriptors.ScannerDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.thrift.generated.Hbase; +import org.apache.hadoop.hbase.util.Bytes; +import org.w3c.dom.Document; +import org.w3c.dom.Element; +import org.w3c.dom.Node; +import org.w3c.dom.NodeList; + +/** + * + */ +public class XMLRestParser implements IHBaseRestParser { + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getTableDescriptor + * (byte[]) + */ + public HTableDescriptor getTableDescriptor(byte[] input) + throws HBaseRestException { + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory + .newInstance(); + docBuilderFactory.setIgnoringComments(true); + + DocumentBuilder builder = null; + Document doc = null; + HTableDescriptor htd = null; + + try { + builder = docBuilderFactory.newDocumentBuilder(); + ByteArrayInputStream is = new ByteArrayInputStream(input); + doc = builder.parse(is); + } catch (Exception e) { + throw new HBaseRestException(e); + } + + try { + Node name_node = doc.getElementsByTagName("name").item(0); + String table_name = name_node.getFirstChild().getNodeValue(); + + htd = new HTableDescriptor(table_name); + NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); + for (int i = 0; i < columnfamily_nodes.getLength(); i++) { + Element columnfamily = (Element) columnfamily_nodes.item(i); + htd.addFamily(this.getColumnDescriptor(columnfamily)); + } + } catch (Exception e) { + throw new HBaseRestException(e); + } + return htd; + } + + public HColumnDescriptor getColumnDescriptor(Element columnfamily) { + return this.getColumnDescriptor(columnfamily, null); + } + + private HColumnDescriptor getColumnDescriptor(Element columnfamily, + HTableDescriptor currentTDesp) { + Node name_node = columnfamily.getElementsByTagName("name").item(0); + String colname = makeColumnName(name_node.getFirstChild().getNodeValue()); + + int max_versions = HColumnDescriptor.DEFAULT_VERSIONS; + CompressionType compression = HColumnDescriptor.DEFAULT_COMPRESSION; + boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY; + boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE; + int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH; + int ttl = HColumnDescriptor.DEFAULT_TTL; + boolean bloomfilter = HColumnDescriptor.DEFAULT_BLOOMFILTER; + + if (currentTDesp != null) { + HColumnDescriptor currentCDesp = currentTDesp.getFamily(Bytes + .toBytes(colname)); + if (currentCDesp != null) { + max_versions = currentCDesp.getMaxVersions(); + // compression = currentCDesp.getCompression(); + in_memory = currentCDesp.isInMemory(); + block_cache = currentCDesp.isBlockCacheEnabled(); + max_cell_size = currentCDesp.getMaxValueLength(); + ttl = currentCDesp.getTimeToLive(); + bloomfilter = currentCDesp.isBloomfilter(); + } + } + + NodeList max_versions_list = columnfamily + .getElementsByTagName("max-versions"); + if (max_versions_list.getLength() > 0) { + max_versions = Integer.parseInt(max_versions_list.item(0).getFirstChild() + .getNodeValue()); + } + + NodeList compression_list = columnfamily + .getElementsByTagName("compression"); + if (compression_list.getLength() > 0) { + compression = CompressionType.valueOf(compression_list.item(0) + .getFirstChild().getNodeValue()); + } + + NodeList in_memory_list = columnfamily.getElementsByTagName("in-memory"); + if (in_memory_list.getLength() > 0) { + in_memory = Boolean.valueOf(in_memory_list.item(0).getFirstChild() + .getNodeValue()); + } + + NodeList block_cache_list = columnfamily + .getElementsByTagName("block-cache"); + if (block_cache_list.getLength() > 0) { + block_cache = Boolean.valueOf(block_cache_list.item(0).getFirstChild() + .getNodeValue()); + } + + NodeList max_cell_size_list = columnfamily + .getElementsByTagName("max-cell-size"); + if (max_cell_size_list.getLength() > 0) { + max_cell_size = Integer.valueOf(max_cell_size_list.item(0) + .getFirstChild().getNodeValue()); + } + + NodeList ttl_list = columnfamily.getElementsByTagName("time-to-live"); + if (ttl_list.getLength() > 0) { + ttl = Integer.valueOf(ttl_list.item(0).getFirstChild().getNodeValue()); + } + + NodeList bloomfilter_list = columnfamily + .getElementsByTagName("bloomfilter"); + if (bloomfilter_list.getLength() > 0) { + bloomfilter = Boolean.valueOf(bloomfilter_list.item(0).getFirstChild() + .getNodeValue()); + } + + HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(colname), + max_versions, compression, in_memory, block_cache, max_cell_size, ttl, + bloomfilter); + + NodeList metadataList = columnfamily.getElementsByTagName("metadata"); + for (int i = 0; i < metadataList.getLength(); i++) { + Element metadataColumn = (Element) metadataList.item(i); + // extract the name and value children + Node mname_node = metadataColumn.getElementsByTagName("name").item(0); + String mname = mname_node.getFirstChild().getNodeValue(); + Node mvalue_node = metadataColumn.getElementsByTagName("value").item(0); + String mvalue = mvalue_node.getFirstChild().getNodeValue(); + hcd.setValue(mname, mvalue); + } + + return hcd; + } + + protected String makeColumnName(String column) { + String returnColumn = column; + if (column.indexOf(':') == -1) + returnColumn += ':'; + return returnColumn; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getColumnDescriptors + * (byte[]) + */ + public ArrayList getColumnDescriptors(byte[] input) + throws HBaseRestException { + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory + .newInstance(); + docBuilderFactory.setIgnoringComments(true); + + DocumentBuilder builder = null; + Document doc = null; + ArrayList columns = new ArrayList(); + + try { + builder = docBuilderFactory.newDocumentBuilder(); + ByteArrayInputStream is = new ByteArrayInputStream(input); + doc = builder.parse(is); + } catch (Exception e) { + throw new HBaseRestException(e); + } + + NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily"); + for (int i = 0; i < columnfamily_nodes.getLength(); i++) { + Element columnfamily = (Element) columnfamily_nodes.item(i); + columns.add(this.getColumnDescriptor(columnfamily)); + } + + return columns; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getScannerDescriptor + * (byte[]) + */ + public ScannerDescriptor getScannerDescriptor(byte[] input) + throws HBaseRestException { + // TODO Auto-generated method stub + return null; + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.parser.IHBaseRestParser#getRowUpdateDescriptor + * (byte[], byte[][]) + */ + public RowUpdateDescriptor getRowUpdateDescriptor(byte[] input, + byte[][] pathSegments) throws HBaseRestException { + RowUpdateDescriptor rud = new RowUpdateDescriptor(); + + rud.setTableName(Bytes.toString(pathSegments[0])); + rud.setRowName(Bytes.toString(pathSegments[2])); + + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory + .newInstance(); + docBuilderFactory.setIgnoringComments(true); + + DocumentBuilder builder = null; + Document doc = null; + + try { + builder = docBuilderFactory.newDocumentBuilder(); + ByteArrayInputStream is = new ByteArrayInputStream(input); + doc = builder.parse(is); + } catch (Exception e) { + throw new HBaseRestException(e.getMessage(), e); + } + + NodeList cell_nodes = doc.getElementsByTagName(RESTConstants.COLUMN); + System.out.println("cell_nodes.length: " + cell_nodes.getLength()); + for (int i = 0; i < cell_nodes.getLength(); i++) { + String columnName = null; + byte[] value = null; + + Element cell = (Element) cell_nodes.item(i); + + NodeList item = cell.getElementsByTagName(RESTConstants.NAME); + if (item.getLength() > 0) { + columnName = item.item(0).getFirstChild().getNodeValue(); + } + + NodeList item1 = cell.getElementsByTagName(RESTConstants.VALUE); + if (item1.getLength() > 0) { + value = org.apache.hadoop.hbase.util.Base64.decode(item1 + .item(0).getFirstChild().getNodeValue()); + } + + if (columnName != null && value != null) { + rud.getColVals().put(columnName.getBytes(), value); + } + } + return rud; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java new file mode 100644 index 000000000000..c055e7fe2c00 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/AbstractRestSerializer.java @@ -0,0 +1,58 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import javax.servlet.http.HttpServletResponse; + +/** + * + * Abstract object that is used as the base of all serializers in the + * REST based interface. + */ +public abstract class AbstractRestSerializer implements IRestSerializer { + + // keep the response object to write back to the stream + protected final HttpServletResponse response; + // Used to denote if pretty printing of the output should be used + protected final boolean prettyPrint; + + /** + * marking the default constructor as private so it will never be used. + */ + @SuppressWarnings("unused") + private AbstractRestSerializer() { + response = null; + prettyPrint = false; + } + + /** + * Public constructor for AbstractRestSerializer. This is the constructor that + * should be called whenever creating a RestSerializer object. + * + * @param response + */ + public AbstractRestSerializer(HttpServletResponse response, + boolean prettyPrint) { + super(); + this.response = response; + this.prettyPrint = prettyPrint; + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java new file mode 100644 index 000000000000..e91db354d1ff --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/IRestSerializer.java @@ -0,0 +1,173 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata; +import org.apache.hadoop.hbase.rest.Status.StatusMessage; +import org.apache.hadoop.hbase.rest.TableModel.Regions; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + * Interface that is implemented to return serialized objects back to + * the output stream. + */ +public interface IRestSerializer { + /** + * Serializes an object into the appropriate format and writes it to the + * output stream. + * + * This is the main point of entry when for an object to be serialized to the + * output stream. + * + * @param o + * @throws HBaseRestException + */ + public void writeOutput(Object o) throws HBaseRestException; + + /** + * serialize the database metadata + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param databaseMetadata + * @throws HBaseRestException + */ + public void serializeDatabaseMetadata(DatabaseMetadata databaseMetadata) + throws HBaseRestException; + + /** + * serialize the HTableDescriptor object + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param tableDescriptor + * @throws HBaseRestException + */ + public void serializeTableDescriptor(HTableDescriptor tableDescriptor) + throws HBaseRestException; + + /** + * serialize an HColumnDescriptor to the output stream. + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param column + * @throws HBaseRestException + */ + public void serializeColumnDescriptor(HColumnDescriptor column) + throws HBaseRestException; + + /** + * serialize the region data for a table to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param regions + * @throws HBaseRestException + */ + public void serializeRegionData(Regions regions) throws HBaseRestException; + + /** + * serialize the status message object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param message + * @throws HBaseRestException + */ + public void serializeStatusMessage(StatusMessage message) + throws HBaseRestException; + + /** + * serialize the ScannerIdentifier object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param scannerIdentifier + * @throws HBaseRestException + */ + public void serializeScannerIdentifier(ScannerIdentifier scannerIdentifier) + throws HBaseRestException; + + /** + * serialize a RowResult object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param rowResult + * @throws HBaseRestException + */ + public void serializeRowResult(RowResult rowResult) throws HBaseRestException; + + /** + * serialize a RowResult array to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param rows + * @throws HBaseRestException + */ + public void serializeRowResultArray(RowResult[] rows) + throws HBaseRestException; + + /** + * serialize a cell object to the output stream + * + * Implementation of this method is optional, IF all the work is done in the + * writeOutput(Object o) method + * + * @param cell + * @throws HBaseRestException + */ + public void serializeCell(Cell cell) throws HBaseRestException; + + /** + * serialize a Cell array to the output stream + * + * @param cells + * @throws HBaseRestException + */ + public void serializeCellArray(Cell[] cells) throws HBaseRestException; + + + /** + * serialize a description of the timestamps available for a row + * to the output stream. + * + * @param timestampsDescriptor + * @throws HBaseRestException + */ + public void serializeTimestamps(TimestampsDescriptor timestampsDescriptor) throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java b/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java new file mode 100644 index 000000000000..d4828543bbc9 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/ISerializable.java @@ -0,0 +1,42 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + * Interface for objects that wish to write back to the REST based + * interface output stream. Objects should implement this interface, + * then use the IRestSerializer passed to it to call the appropriate + * serialization method. + */ +public interface ISerializable { + /** + * visitor pattern method where the object implementing this interface will + * call back on the IRestSerializer with the correct method to run to + * serialize the output of the object to the stream. + * + * @param serializer + * @throws HBaseRestException + */ + public void restSerialize(IRestSerializer serializer) + throws HBaseRestException; +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/JSONSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/JSONSerializer.java new file mode 100644 index 000000000000..d54df8d44094 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/JSONSerializer.java @@ -0,0 +1,213 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata; +import org.apache.hadoop.hbase.rest.Status.StatusMessage; +import org.apache.hadoop.hbase.rest.TableModel.Regions; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +import agilejson.JSON; + +/** + * + * Serializes objects into JSON strings and prints them back out on the output + * stream. It should be noted that this JSON implementation uses annotations on + * the objects to be serialized. + * + * Since these annotations are used to describe the serialization of the objects + * the only method that is implemented is writeOutput(Object o). The other + * methods in the interface do not need to be implemented. + */ +public class JSONSerializer extends AbstractRestSerializer { + + /** + * @param response + */ + public JSONSerializer(HttpServletResponse response) { + super(response, false); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#writeOutput(java + * .lang.Object, javax.servlet.http.HttpServletResponse) + */ + public void writeOutput(Object o) throws HBaseRestException { + response.setContentType("application/json"); + + try { + // LOG.debug("At top of send data"); + String data = JSON.toJSON(o); + response.setContentLength(data.length()); + response.getWriter().println(data); + } catch (Exception e) { + // LOG.debug("Error sending data: " + e.toString()); + throw new HBaseRestException(e); + } + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) + */ + public void serializeColumnDescriptor(HColumnDescriptor column) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeDatabaseMetadata + * (org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata) + */ + public void serializeDatabaseMetadata(DatabaseMetadata databaseMetadata) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRegionData + * (org.apache.hadoop.hbase.rest.TableModel.Regions) + */ + public void serializeRegionData(Regions regions) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) + */ + public void serializeTableDescriptor(HTableDescriptor tableDescriptor) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeStatusMessage + * (org.apache.hadoop.hbase.rest.Status.StatusMessage) + */ + public void serializeStatusMessage(StatusMessage message) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeScannerIdentifier(org.apache.hadoop.hbase.rest.ScannerIdentifier) + */ + public void serializeScannerIdentifier(ScannerIdentifier scannerIdentifier) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResult + * (org.apache.hadoop.hbase.io.RowResult) + */ + public void serializeRowResult(RowResult rowResult) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResultArray + * (org.apache.hadoop.hbase.io.RowResult[]) + */ + public void serializeRowResultArray(RowResult[] rows) + throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCell(org + * .apache.hadoop.hbase.io.Cell) + */ + public void serializeCell(Cell cell) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCellArray + * (org.apache.hadoop.hbase.io.Cell[]) + */ + public void serializeCellArray(Cell[] cells) throws HBaseRestException { + // No implementation needed for the JSON serializer + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeTimestamps + * (org.apache.hadoop.hbase.rest.RowModel.TimestampsDescriptor) + */ + public void serializeTimestamps(TimestampsDescriptor timestampsDescriptor) + throws HBaseRestException { + // No implementation needed for the JSON serializer + } + +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java b/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java new file mode 100644 index 000000000000..9284da0de8c6 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/RestSerializerFactory.java @@ -0,0 +1,56 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hbase.rest.Dispatcher.ContentType; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; + +/** + * + * Factory used to return a Rest Serializer tailored to the HTTP + * Requesters accept type in the header. + * + */ +public class RestSerializerFactory { + + public static AbstractRestSerializer getSerializer( + HttpServletRequest request, HttpServletResponse response) + throws HBaseRestException { + ContentType ct = ContentType.getContentType(request.getHeader("accept")); + AbstractRestSerializer serializer = null; + + // TODO refactor this so it uses reflection to create the new objects. + switch (ct) { + case XML: + serializer = new SimpleXMLSerializer(response); + break; + case JSON: + serializer = new JSONSerializer(response); + break; + default: + serializer = new SimpleXMLSerializer(response); + break; + } + return serializer; + } +} diff --git a/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java b/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java new file mode 100644 index 000000000000..12b30a841857 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/rest/serializer/SimpleXMLSerializer.java @@ -0,0 +1,464 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.serializer; + +import java.io.IOException; +import java.io.PrintWriter; + +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata; +import org.apache.hadoop.hbase.rest.Status.StatusMessage; +import org.apache.hadoop.hbase.rest.TableModel.Regions; +import org.apache.hadoop.hbase.rest.descriptors.RestCell; +import org.apache.hadoop.hbase.rest.descriptors.ScannerIdentifier; +import org.apache.hadoop.hbase.rest.descriptors.TimestampsDescriptor; +import org.apache.hadoop.hbase.rest.exception.HBaseRestException; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * + * Basic first pass at implementing an XML serializer for the REST interface. + * This should probably be refactored into something better. + * + */ +public class SimpleXMLSerializer extends AbstractRestSerializer { + + private final AbstractPrinter printer; + + /** + * @param response + * @throws HBaseRestException + */ + @SuppressWarnings("synthetic-access") + public SimpleXMLSerializer(HttpServletResponse response) + throws HBaseRestException { + super(response, false); + printer = new SimplePrinter(response); + } + + @SuppressWarnings("synthetic-access") + public SimpleXMLSerializer(HttpServletResponse response, boolean prettyPrint) + throws HBaseRestException { + super(response, prettyPrint); + if (prettyPrint) { + printer = new PrettyPrinter(response); + } else { + printer = new SimplePrinter(response); + } + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#writeOutput(java + * .lang.Object, java.io.OutputStream) + */ + public void writeOutput(Object o) throws HBaseRestException { + response.setContentType("text/xml"); + response.setCharacterEncoding(HConstants.UTF8_ENCODING); + + if (o instanceof ISerializable) { + ((ISerializable) o).restSerialize(this); + } else if (o.getClass().isArray() + && o.getClass().getComponentType() == RowResult.class) { + this.serializeRowResultArray((RowResult[]) o); + } else if (o.getClass().isArray() + && o.getClass().getComponentType() == Cell.class) { + this.serializeCellArray((Cell[]) o); + } else { + throw new HBaseRestException( + "Object does not conform to the ISerializable " + + "interface. Unable to generate xml output."); + } + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeDatabaseMetadata + * (org.apache.hadoop.hbase.rest.DatabaseModel.DatabaseMetadata) + */ + public void serializeDatabaseMetadata(DatabaseMetadata databaseMetadata) + throws HBaseRestException { + printer.print(""); + for (HTableDescriptor table : databaseMetadata.getTables()) { + table.restSerialize(this); + } + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) + */ + public void serializeTableDescriptor(HTableDescriptor tableDescriptor) + throws HBaseRestException { + printer.print(""); + // name element + printer.print(""); + printer.print(tableDescriptor.getNameAsString()); + printer.print(""); + // column families + printer.print(""); + for (HColumnDescriptor column : tableDescriptor.getColumnFamilies()) { + column.restSerialize(this); + } + printer.print(""); + printer.print("
"); + printer.flush(); + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) + */ + public void serializeColumnDescriptor(HColumnDescriptor column) + throws HBaseRestException { + + printer.print(""); + // name + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(column.getName())); + printer.print(""); + // compression + printer.print(""); + printer.print(column.getCompression().toString()); + printer.print(""); + // bloomfilter + printer.print(""); + printer.print(column.getCompressionType().toString()); + printer.print(""); + // max-versions + printer.print(""); + printer.print(column.getMaxVersions()); + printer.print(""); + // max-length + printer.print(""); + printer.print(column.getMaxValueLength()); + printer.print(""); + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRegionData + * (org.apache.hadoop.hbase.rest.TableModel.Regions) + */ + public void serializeRegionData(Regions regions) throws HBaseRestException { + + printer.print(""); + for (byte[] region : regions.getRegionKey()) { + printer.print(""); + printer.print(Bytes.toString(region)); + printer.print(""); + } + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeStatusMessage + * (org.apache.hadoop.hbase.rest.Status.StatusMessage) + */ + public void serializeStatusMessage(StatusMessage message) + throws HBaseRestException { + + printer.print(""); + printer.print(""); + printer.print(message.getStatusCode()); + printer.print(""); + printer.print(""); + printer.print(message.getMessage().toString()); + printer.print(""); + printer.print(""); + printer.print(message.getError()); + printer.print(""); + printer.print(""); + printer.flush(); + + } + + /* + * (non-Javadoc) + * + * @seeorg.apache.hadoop.hbase.rest.serializer.IRestSerializer# + * serializeScannerIdentifier(org.apache.hadoop.hbase.rest.ScannerIdentifier) + */ + public void serializeScannerIdentifier(ScannerIdentifier scannerIdentifier) + throws HBaseRestException { + + printer.print(""); + printer.print(""); + printer.print(scannerIdentifier.getId()); + printer.print(""); + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResult + * (org.apache.hadoop.hbase.io.RowResult) + */ + public void serializeRowResult(RowResult rowResult) throws HBaseRestException { + + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(rowResult + .getRow())); + printer.print(""); + printer.print(""); + for (RestCell cell : rowResult.getCells()) { + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(cell + .getName())); + printer.print(""); + printer.print(""); + printer.print(cell.getTimestamp()); + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(cell + .getValue())); + printer.print(""); + printer.print(""); + printer.flush(); + } + printer.print(""); + printer.print(""); + printer.flush(); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeRowResultArray + * (org.apache.hadoop.hbase.io.RowResult[]) + */ + public void serializeRowResultArray(RowResult[] rows) + throws HBaseRestException { + printer.print(""); + for (RowResult row : rows) { + row.restSerialize(this); + } + printer.print(""); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCell(org + * .apache.hadoop.hbase.io.Cell) + */ + public void serializeCell(Cell cell) throws HBaseRestException { + printer.print(""); + printer.print(""); + printer.print(org.apache.hadoop.hbase.util.Base64.encodeBytes(cell + .getValue())); + printer.print(""); + printer.print(""); + printer.print(cell.getTimestamp()); + printer.print(""); + printer.print(""); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeCellArray + * (org.apache.hadoop.hbase.io.Cell[]) + */ + public void serializeCellArray(Cell[] cells) throws HBaseRestException { + printer.print(""); + for (Cell cell : cells) { + cell.restSerialize(this); + } + printer.print(""); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.IRestSerializer#serializeTimestamps + * (org.apache.hadoop.hbase.rest.RowModel.TimestampsDescriptor) + */ + public void serializeTimestamps(TimestampsDescriptor timestampsDescriptor) + throws HBaseRestException { + // TODO Auto-generated method stub + + } + + // Private classes used for printing the output + + private interface IPrinter { + public void print(String output); + + public void print(int output); + + public void print(long output); + + public void print(boolean output); + + public void flush(); + } + + private abstract class AbstractPrinter implements IPrinter { + protected final PrintWriter writer; + + @SuppressWarnings("unused") + private AbstractPrinter() { + writer = null; + } + + public AbstractPrinter(HttpServletResponse response) + throws HBaseRestException { + try { + writer = response.getWriter(); + } catch (IOException e) { + throw new HBaseRestException(e.getMessage(), e); + } + } + + public void flush() { + writer.flush(); + } + } + + private class SimplePrinter extends AbstractPrinter { + private SimplePrinter(HttpServletResponse response) + throws HBaseRestException { + super(response); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.Printer#print + * (java.io.PrintWriter, java.lang.String) + */ + public void print(final String output) { + writer.print(output); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(int) + */ + public void print(int output) { + writer.print(output); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(long) + */ + public void print(long output) { + writer.print(output); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter#print(boolean) + */ + public void print(boolean output) { + writer.print(output); + } + } + + private class PrettyPrinter extends AbstractPrinter { + private PrettyPrinter(HttpServletResponse response) + throws HBaseRestException { + super(response); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.Printer#print + * (java.io.PrintWriter, java.lang.String) + */ + public void print(String output) { + writer.println(output); + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(int) + */ + public void print(int output) { + writer.println(output); + + } + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter# + * print(long) + */ + public void print(long output) { + writer.println(output); + } + + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.rest.serializer.SimpleXMLSerializer.IPrinter#print(boolean) + */ + public void print(boolean output) { + writer.println(output); + } + } +} diff --git a/src/webapps/rest/WEB-INF/web.xml b/src/webapps/rest/WEB-INF/web.xml index f9db246ff5c3..01aa3b73c428 100644 --- a/src/webapps/rest/WEB-INF/web.xml +++ b/src/webapps/rest/WEB-INF/web.xml @@ -1,14 +1,14 @@ - rest + jsonrest - Hbase REST Interface + Hbase JSONREST Interface api api org.apache.hadoop.hbase.rest.Dispatcher api - /* + /api/*