diff --git a/pom.xml b/pom.xml
index d6061ead..b4564958 100644
--- a/pom.xml
+++ b/pom.xml
@@ -61,6 +61,7 @@
kafkaspark
+ resthbase-connectors-assembly
@@ -154,6 +155,16 @@
3.0.1-b08hbase-hadoop2-compatfalse
+ 2.10.1
+ 1.3.8
+ 2.3.2
+ 1.0.1
+ 2.28.2
+ 1.3.9-1
+ 1.60
+ 4.2.0-incubating
+ 1.1.0
+ com.google.protobuf
@@ -290,6 +301,36 @@
hbase-mapreduce${hbase.version}
+
+ org.apache.kerby
+ kerb-core
+ ${kerby.version}
+
+
+ org.apache.kerby
+ kerb-simplekdc
+ ${kerby.version}
+
+
+ org.apache.hbase
+ hbase-http
+ ${hbase.version}
+
+
+ org.apache.hbase.thirdparty
+ hbase-shaded-protobuf
+ ${hbase-thirdparty.version}
+
+
+ com.github.stephenc.findbugs
+ findbugs-annotations
+ ${findbugs-annotations.version}
+
+
+ org.apache.htrace
+ htrace-core4
+ ${htrace.version}
+ hbase-itorg.apache.hbase
@@ -303,6 +344,18 @@
${hbase.version}test
+
+ org.apache.hadoop
+ hadoop-minikdc
+ ${hadoop-three.version}
+ test
+
+
+ org.mockito
+ mockito-core
+ ${mockito-core.version}
+ test
+
@@ -343,7 +396,7 @@
org.apache.maven.pluginsmaven-shade-plugin
- 3.2.1
+ 3.1.1org.apache.maven.plugins
@@ -431,6 +484,45 @@
true
+
+ net.revelc.code
+ warbucks-maven-plugin
+ ${maven.warbucks.version}
+
+ false
+
+
+
+ (?!.*(.generated.|.tmpl.|\$)).*
+ false
+ true
+ false
+ false
+ false
+ org[.]apache[.]yetus[.]audience[.]InterfaceAudience.*
+
+
+
+
+
+ run-warbucks
+
+ check
+
+
+
+
+
+ org.xolstice.maven.plugins
+ protobuf-maven-plugin
+ ${protobuf.plugin.version}
+
+ ${external.protobuf.groupid}:protoc:${external.protobuf.version}:exe:${os.detected.classifier}
+ ${basedir}/src/main/protobuf/
+ false
+ true
+
+
diff --git a/rest/hbase-rest-protocol/pom.xml b/rest/hbase-rest-protocol/pom.xml
new file mode 100755
index 00000000..84001189
--- /dev/null
+++ b/rest/hbase-rest-protocol/pom.xml
@@ -0,0 +1,282 @@
+
+
+ 4.0.0
+
+
+
+
+ rest
+ org.apache.hbase.connectors
+ ${revision}
+
+
+ org.apache.hbase.connectors.rest
+ hbase-rest-protocol
+ ${revision}
+ Apache HBase - Rest Protocol
+ protobuf protocol classes used by HBase REST internally.
+
+ true
+
+ 3.5.1-1
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+
+ maven-assembly-plugin
+
+ true
+
+
+
+ org.xolstice.maven.plugins
+ protobuf-maven-plugin
+
+
+ compile-protoc
+ generate-sources
+
+ compile
+
+
+ com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier}
+ true
+
+
+
+
+
+
+ com.google.code.maven-replacer-plugin
+ replacer
+ 1.5.3
+
+
+ process-sources
+
+ replace
+
+
+
+
+ ${basedir}/target/generated-sources/
+
+ **/*.java
+
+
+ true
+
+
+ ([^\.])com.google.protobuf
+ $1org.apache.hbase.thirdparty.com.google.protobuf
+
+
+ (public)(\W+static)?(\W+final)?(\W+class)
+ @javax.annotation.Generated("proto") $1$2$3$4
+
+
+
+ (@javax.annotation.Generated\("proto"\) ){2}
+ $1
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+ package
+
+ shade
+
+
+ true
+ true
+ false
+
+
+
+ com.google.protobuf
+ org.apache.hadoop.hbase.shaded.com.google.protobuf
+
+
+
+
+
+ javax.annotation:javax.annotation-api
+
+ org.apache.hbase.thirdparty:*
+ com.google.protobuf:protobuf-java
+ com.google.code.findbugs:*
+ com.google.j2objc:j2objc-annotations
+ org.codehaus.mojo:animal-sniffer-annotations
+ junit:junit
+ log4j:log4j
+ commons-logging:commons-logging
+ org.slf4j:slf4j-api
+ org.apache.yetus:audience-annotations
+ com.github.stephenc.fingbugs:*
+ com.github.spotbugs:*
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-checkstyle-plugin
+
+ true
+
+
+
+ net.revelc.code
+ warbucks-maven-plugin
+
+
+
+
+
+
+
+ org.apache.hbase.thirdparty
+ hbase-shaded-protobuf
+
+
+ junit
+ junit
+ test
+
+
+ org.apache.htrace
+ htrace-core4
+
+
+
+
+
+ skip-protocol-shaded-tests
+
+
+ skip-protocol-shaded-tests
+
+
+
+ true
+ true
+
+
+
+ build-with-jdk11
+
+ [1.11,)
+
+
+
+ javax.annotation
+ javax.annotation-api
+
+
+
+
+ eclipse-specific
+
+
+ m2e.version
+
+
+
+
+
+
+
+ org.eclipse.m2e
+ lifecycle-mapping
+
+
+
+
+
+ org.apache.hadoop
+ hadoop-maven-plugins
+ [2.0.5-alpha,)
+
+ protoc
+
+
+
+
+
+
+
+
+
+ com.google.code.maven-replacer-plugin
+
+ replacer
+ [1.5.3,)
+
+ replace
+
+
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/CellMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellMessage.proto
new file mode 100755
index 00000000..75b6f01e
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellMessage.proto
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message Cell {
+ optional bytes row = 1; // unused if Cell is in a CellSet
+ optional bytes column = 2;
+ optional int64 timestamp = 3;
+ optional bytes data = 4;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/CellSetMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellSetMessage.proto
new file mode 100755
index 00000000..68a6b05b
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellSetMessage.proto
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+import "rest/CellMessage.proto";
+
+message CellSet {
+ message Row {
+ required bytes key = 1;
+ repeated Cell values = 2;
+ }
+ repeated Row rows = 1;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/ColumnSchemaMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/ColumnSchemaMessage.proto
new file mode 100755
index 00000000..8b5e4795
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/ColumnSchemaMessage.proto
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message ColumnSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ // optional helpful encodings of commonly used attributes
+ optional int32 ttl = 3;
+ optional int32 maxVersions = 4;
+ optional string compression = 5;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacePropertiesMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacePropertiesMessage.proto
new file mode 100755
index 00000000..d0a1a4eb
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacePropertiesMessage.proto
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message NamespaceProperties {
+ message Property {
+ required string key = 1;
+ required string value = 2;
+ }
+ repeated Property props = 1;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacesMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacesMessage.proto
new file mode 100755
index 00000000..229a1f66
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacesMessage.proto
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message Namespaces {
+ repeated string namespace = 1;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/ScannerMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/ScannerMessage.proto
new file mode 100755
index 00000000..78aa85b4
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/ScannerMessage.proto
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message Scanner {
+ optional bytes startRow = 1;
+ optional bytes endRow = 2;
+ repeated bytes columns = 3;
+ optional int32 batch = 4;
+ optional int64 startTime = 5;
+ optional int64 endTime = 6;
+ optional int32 maxVersions = 7;
+ optional string filter = 8;
+ optional int32 caching = 9; // specifies REST scanner caching
+ repeated string labels = 10;
+ optional bool cacheBlocks = 11; // server side block caching hint
+ optional int32 limit = 12;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/StorageClusterStatusMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/StorageClusterStatusMessage.proto
new file mode 100755
index 00000000..c39e2395
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/StorageClusterStatusMessage.proto
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message StorageClusterStatus {
+ message Region {
+ required bytes name = 1;
+ optional int32 stores = 2;
+ optional int32 storefiles = 3;
+ optional int32 storefileSizeMB = 4;
+ optional int32 memStoreSizeMB = 5;
+ optional int64 storefileIndexSizeKB = 6;
+ optional int64 readRequestsCount = 7;
+ optional int64 writeRequestsCount = 8;
+ optional int32 rootIndexSizeKB = 9;
+ optional int32 totalStaticIndexSizeKB = 10;
+ optional int32 totalStaticBloomSizeKB = 11;
+ optional int64 totalCompactingKVs = 12;
+ optional int64 currentCompactedKVs = 13;
+ optional int64 cpRequestsCount = 14;
+ }
+ message Node {
+ required string name = 1; // name:port
+ optional int64 startCode = 2;
+ optional int64 requests = 3;
+ optional int32 heapSizeMB = 4;
+ optional int32 maxHeapSizeMB = 5;
+ repeated Region regions = 6;
+ }
+ // node status
+ repeated Node liveNodes = 1;
+ repeated string deadNodes = 2;
+ // summary statistics
+ optional int32 regions = 3;
+ optional int64 requests = 4;
+ optional double averageLoad = 5;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/TableInfoMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableInfoMessage.proto
new file mode 100755
index 00000000..344ee1d2
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableInfoMessage.proto
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message TableInfo {
+ required string name = 1;
+ message Region {
+ required string name = 1;
+ optional bytes startKey = 2;
+ optional bytes endKey = 3;
+ optional int64 id = 4;
+ optional string location = 5;
+ }
+ repeated Region regions = 2;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/TableListMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableListMessage.proto
new file mode 100755
index 00000000..bf3857e5
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableListMessage.proto
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message TableList {
+ repeated string name = 1;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/TableSchemaMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableSchemaMessage.proto
new file mode 100755
index 00000000..6135716c
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableSchemaMessage.proto
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+import "rest/ColumnSchemaMessage.proto";
+
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message TableSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ repeated ColumnSchema columns = 3;
+ // optional helpful encodings of commonly used attributes
+ optional bool inMemory = 4;
+ optional bool readOnly = 5;
+}
diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/VersionMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/VersionMessage.proto
new file mode 100755
index 00000000..742d8828
--- /dev/null
+++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/VersionMessage.proto
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto2";
+package org.apache.hadoop.hbase.shaded.rest.protobuf.generated;
+
+message Version {
+ optional string restVersion = 1;
+ optional string jvmVersion = 2;
+ optional string osVersion = 3;
+ optional string serverVersion = 4;
+ optional string jerseyVersion = 5;
+}
diff --git a/rest/hbase-rest/pom.xml b/rest/hbase-rest/pom.xml
new file mode 100755
index 00000000..77cd7eff
--- /dev/null
+++ b/rest/hbase-rest/pom.xml
@@ -0,0 +1,493 @@
+
+
+ 4.0.0
+ jar
+
+
+
+
+ org.apache.hbase.connectors
+ rest
+ ${revision}
+ ../
+
+
+ org.apache.hbase.connectors.rest
+ hbase-rest
+ Apache HBase - Rest Server
+ HBase Rest Server
+
+
+ surefire-junit47
+
+
+
+
+
+
+
+
+
+ ${project.build.directory}
+
+ hbase-webapps/**
+
+
+
+
+
+ ../hbase-rest/src/test/resources
+
+ **/**
+
+
+
+
+
+
+ maven-assembly-plugin
+
+ true
+
+
+
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ %regex[.*(Cat|Dog).*Test.*]
+
+
+
+
+
+ maven-antrun-plugin
+
+
+
+ generate
+ generate-sources
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ run
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+
+ jspcSource-packageInfo-source
+ generate-sources
+
+ add-source
+
+
+
+ ${project.build.directory}/generated-sources/java
+
+
+
+
+
+
+
+ maven-surefire-plugin
+
+
+ ../target/test-classes/webapps
+
+
+
+
+
+ org.apache.maven.surefire
+ ${surefire.provider}
+ ${surefire.version}
+
+
+
+
+ net.revelc.code
+ warbucks-maven-plugin
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 8
+ 8
+
+
+
+
+
+
+ org.apache.hbase.connectors.rest
+ hbase-rest-protocol
+
+
+
+ javax.ws.rs
+ javax.ws.rs-api
+
+
+
+ org.apache.hbase
+ hbase-common
+
+
+ org.apache.hbase
+ hbase-mapreduce
+
+
+ org.apache.hbase
+ hbase-client
+
+
+ org.apache.hbase
+ hbase-hadoop-compat
+
+
+ org.apache.hbase
+ hbase-testing-util
+ test
+
+
+
+ com.sun.jersey
+ jersey-core
+
+
+
+
+
+ org.apache.hbase.thirdparty
+ hbase-shaded-miscellaneous
+
+
+ org.apache.hbase.thirdparty
+ hbase-shaded-protobuf
+
+
+ org.apache.httpcomponents
+ httpclient
+
+
+ org.apache.httpcomponents
+ httpcore
+
+
+ org.apache.commons
+ commons-lang3
+
+
+ org.slf4j
+ slf4j-api
+
+
+ javax.xml.bind
+ jaxb-api
+
+
+ javax.servlet
+ javax.servlet-api
+
+
+ com.sun.activation
+ javax.activation
+
+
+ org.eclipse.jetty
+ jetty-server
+
+
+ org.eclipse.jetty
+ jetty-servlet
+
+
+ org.eclipse.jetty
+ jetty-util
+
+
+ org.eclipse.jetty
+ jetty-http
+
+
+ org.eclipse.jetty
+ jetty-jmx
+
+
+ org.glassfish.jersey.containers
+ jersey-container-servlet-core
+
+
+ com.fasterxml.jackson.jaxrs
+ jackson-jaxrs-json-provider
+ ${jackson.version}
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+ ${jackson.version}
+
+
+ com.fasterxml.jackson.core
+ jackson-core
+ ${jackson.version}
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+ ${jackson.version}
+
+
+
+ org.codehaus.jettison
+ jettison
+
+
+ stax
+ stax-api
+
+
+
+
+
+ org.glassfish.web
+ javax.servlet.jsp
+
+
+
+ org.glassfish
+ javax.el
+
+
+ org.apache.kerby
+ kerb-simplekdc
+ test
+
+
+ org.apache.hadoop
+ hadoop-minikdc
+ test
+
+
+ org.apache.kerby
+ kerb-core
+ test
+
+
+ commons-io
+ commons-io
+ test
+
+
+ junit
+ junit
+ test
+
+
+ org.mockito
+ mockito-core
+ test
+
+
+ com.github.stephenc.findbugs
+ findbugs-annotations
+ compile
+ true
+
+
+
+ org.bouncycastle
+ bcprov-jdk15on
+ test
+
+
+
+
+
+ skipRestTests
+
+
+ skipRestTests
+
+
+
+ true
+ true
+
+
+
+
+
+ hadoop-3.0
+
+ !hadoop.profile
+
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-server-nodemanager
+
+
+ com.sun.jersey
+ jersey-core
+
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-server-resourcemanager
+
+
+ com.sun.jersey
+ jersey-core
+
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-server-timelineservice
+
+
+ javax.ws.rs
+ jsr311-api
+
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-common
+
+
+ com.sun.jersey
+ jersey-core
+
+
+
+
+
+
+
+ org.apache.hadoop
+ hadoop-annotations
+
+
+ org.apache.hadoop
+ hadoop-common
+
+
+ org.apache.hadoop
+ hadoop-auth
+
+
+
+ org.glassfish.jaxb
+ jaxb-runtime
+ 2.3.2
+
+
+
+
+ eclipse-specific
+
+
+ m2e.version
+
+
+
+
+
+
+
+
+ org.eclipse.m2e
+ lifecycle-mapping
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+ [1.6,)
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
new file mode 100755
index 00000000..4cf8a93e
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Common constants for org.apache.hadoop.hbase.rest
+ */
+@InterfaceAudience.Public
+public interface Constants {
+ // All constants in a public interface are 'public static final'
+
+ String VERSION_STRING = "0.0.3";
+
+ int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours
+
+ int DEFAULT_LISTEN_PORT = 8080;
+
+ String MIMETYPE_TEXT = "text/plain";
+ String MIMETYPE_HTML = "text/html";
+ String MIMETYPE_XML = "text/xml";
+ String MIMETYPE_BINARY = "application/octet-stream";
+ String MIMETYPE_PROTOBUF = "application/x-protobuf";
+ String MIMETYPE_PROTOBUF_IETF = "application/protobuf";
+ String MIMETYPE_JSON = "application/json";
+
+ String CRLF = "\r\n";
+
+ String REST_KEYTAB_FILE = "hbase.rest.keytab.file";
+ String REST_KERBEROS_PRINCIPAL = "hbase.rest.kerberos.principal";
+ String REST_AUTHENTICATION_TYPE = "hbase.rest.authentication.type";
+ String REST_AUTHENTICATION_PRINCIPAL = "hbase.rest.authentication.kerberos.principal";
+
+ String REST_SSL_ENABLED = "hbase.rest.ssl.enabled";
+ String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store";
+ String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password";
+ String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword";
+ String REST_SSL_EXCLUDE_CIPHER_SUITES = "hbase.rest.ssl.exclude.cipher.suites";
+ String REST_SSL_INCLUDE_CIPHER_SUITES = "hbase.rest.ssl.include.cipher.suites";
+ String REST_SSL_EXCLUDE_PROTOCOLS = "hbase.rest.ssl.exclude.protocols";
+ String REST_SSL_INCLUDE_PROTOCOLS = "hbase.rest.ssl.include.protocols";
+
+ String REST_THREAD_POOL_THREADS_MAX = "hbase.rest.threads.max";
+ String REST_THREAD_POOL_THREADS_MIN = "hbase.rest.threads.min";
+ String REST_THREAD_POOL_TASK_QUEUE_SIZE = "hbase.rest.task.queue.size";
+ String REST_THREAD_POOL_THREAD_IDLE_TIMEOUT = "hbase.rest.thread.idle.timeout";
+ String REST_CONNECTOR_ACCEPT_QUEUE_SIZE = "hbase.rest.connector.accept.queue.size";
+
+ String REST_DNS_NAMESERVER = "hbase.rest.dns.nameserver";
+ String REST_DNS_INTERFACE = "hbase.rest.dns.interface";
+
+ String FILTER_CLASSES = "hbase.rest.filter.classes";
+ String SCAN_START_ROW = "startrow";
+ String SCAN_END_ROW = "endrow";
+ String SCAN_COLUMN = "column";
+ String SCAN_START_TIME = "starttime";
+ String SCAN_END_TIME = "endtime";
+ String SCAN_MAX_VERSIONS = "maxversions";
+ String SCAN_BATCH_SIZE = "batchsize";
+ String SCAN_LIMIT = "limit";
+ String SCAN_FETCH_SIZE = "hbase.rest.scan.fetchsize";
+ String SCAN_FILTER = "filter";
+ String SCAN_REVERSED = "reversed";
+ String SCAN_CACHE_BLOCKS = "cacheblocks";
+ String CUSTOM_FILTERS = "hbase.rest.custom.filters";
+
+ String ROW_KEYS_PARAM_NAME = "row";
+ /** If this query parameter is present when processing row or scanner resources,
+ it disables server side block caching */
+ String NOCACHE_PARAM_NAME = "nocache";
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
new file mode 100755
index 00000000..aefd8475
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java
@@ -0,0 +1,74 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+
+@InterfaceAudience.Private
+public class ExistsResource extends ResourceBase {
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @throws IOException
+ */
+ public ExistsResource(TableResource tableResource) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY})
+ public Response get(final @Context UriInfo uriInfo) {
+ try {
+ if (!tableResource.exists()) {
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ } catch (IOException e) {
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ ResponseBuilder response = Response.ok();
+ response.cacheControl(cacheControl);
+ return response.build();
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
new file mode 100755
index 00000000..57325b76
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java
@@ -0,0 +1,129 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class MetricsREST {
+
+ public MetricsRESTSource getSource() {
+ return source;
+ }
+
+ private MetricsRESTSource source;
+
+ public MetricsREST() {
+ source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class);
+ }
+
+ /**
+ * @param inc How much to add to requests.
+ */
+ public void incrementRequests(final int inc) {
+ source.incrementRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulGetCount.
+ */
+ public void incrementSucessfulGetRequests(final int inc) {
+ source.incrementSucessfulGetRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulPutCount.
+ */
+ public void incrementSucessfulPutRequests(final int inc) {
+ source.incrementSucessfulPutRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedPutCount.
+ */
+ public void incrementFailedPutRequests(final int inc) {
+ source.incrementFailedPutRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedGetCount.
+ */
+ public void incrementFailedGetRequests(final int inc) {
+ source.incrementFailedGetRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulDeleteCount.
+ */
+ public void incrementSucessfulDeleteRequests(final int inc) {
+ source.incrementSucessfulDeleteRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedDeleteCount.
+ */
+ public void incrementFailedDeleteRequests(final int inc) {
+ source.incrementFailedDeleteRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulScanCount.
+ */
+ public synchronized void incrementSucessfulScanRequests(final int inc) {
+ source.incrementSucessfulScanRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedScanCount.
+ */
+ public void incrementFailedScanRequests(final int inc) {
+ source.incrementFailedScanRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulAppendCount.
+ */
+ public synchronized void incrementSucessfulAppendRequests(final int inc) {
+ source.incrementSucessfulAppendRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedAppendCount.
+ */
+ public void incrementFailedAppendRequests(final int inc) {
+ source.incrementFailedAppendRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to sucessfulIncrementCount.
+ */
+ public synchronized void incrementSucessfulIncrementRequests(final int inc) {
+ source.incrementSucessfulIncrementRequests(inc);
+ }
+
+ /**
+ * @param inc How much to add to failedIncrementCount.
+ */
+ public void incrementFailedIncrementRequests(final int inc) {
+ source.incrementFailedIncrementRequests(inc);
+ }
+
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
new file mode 100755
index 00000000..5bd7e2b2
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -0,0 +1,123 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+
+@InterfaceAudience.Private
+public class MultiRowResource extends ResourceBase implements Constants {
+ private static final Logger LOG = LoggerFactory.getLogger(MultiRowResource.class);
+
+ TableResource tableResource;
+ Integer versions = null;
+ String[] columns = null;
+
+ /**
+ * Constructor
+ *
+ * @param tableResource
+ * @param versions
+ * @throws IOException
+ */
+ public MultiRowResource(TableResource tableResource, String versions, String columnsStr)
+ throws IOException {
+ super();
+ this.tableResource = tableResource;
+
+ if (columnsStr != null && !columnsStr.equals("")) {
+ this.columns = columnsStr.split(",");
+ }
+
+ if (versions != null) {
+ this.versions = Integer.valueOf(versions);
+
+ }
+ }
+
+ @GET
+ @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
+ public Response get(final @Context UriInfo uriInfo) {
+ MultivaluedMap params = uriInfo.getQueryParameters();
+
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ CellSetModel model = new CellSetModel();
+ for (String rk : params.get(ROW_KEYS_PARAM_NAME)) {
+ RowSpec rowSpec = new RowSpec(rk);
+
+ if (this.versions != null) {
+ rowSpec.setMaxVersions(this.versions);
+ }
+
+ if (this.columns != null) {
+ for (int i = 0; i < this.columns.length; i++) {
+ rowSpec.addColumn(Bytes.toBytes(this.columns[i]));
+ }
+ }
+
+ ResultGenerator generator =
+ ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null,
+ !params.containsKey(NOCACHE_PARAM_NAME));
+ Cell value = null;
+ RowModel rowModel = new RowModel(rowSpec.getRow());
+ if (generator.hasNext()) {
+ while ((value = generator.next()) != null) {
+ rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil
+ .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value)));
+ }
+ model.addRow(rowModel);
+ } else {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("The row : " + rk + " not found in the table.");
+ }
+ }
+ }
+
+ if (model.getRows().isEmpty()) {
+ //If no rows found.
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("No rows found." + CRLF)
+ .build();
+ } else {
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return Response.ok(model).build();
+ }
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
new file mode 100755
index 00000000..649395f2
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
@@ -0,0 +1,290 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.rest.model.NamespacesInstanceModel;
+import org.apache.hadoop.hbase.rest.model.TableListModel;
+import org.apache.hadoop.hbase.rest.model.TableModel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.ServletContext;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Implements the following REST end points:
+ *
+ * /namespaces/{namespace} GET: get namespace properties.
+ * /namespaces/{namespace} POST: create namespace.
+ * /namespaces/{namespace} PUT: alter namespace.
+ * /namespaces/{namespace} DELETE: drop namespace.
+ * /namespaces/{namespace}/tables GET: list namespace's tables.
+ *
+ */
+@InterfaceAudience.Private
+public class NamespacesInstanceResource extends ResourceBase {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NamespacesInstanceResource.class);
+ String namespace;
+ boolean queryTables = false;
+
+ /**
+ * Constructor for standard NamespaceInstanceResource.
+ * @throws IOException
+ */
+ public NamespacesInstanceResource(String namespace) throws IOException {
+ this(namespace, false);
+ }
+
+ /**
+ * Constructor for querying namespace table list via NamespaceInstanceResource.
+ * @throws IOException
+ */
+ public NamespacesInstanceResource(String namespace, boolean queryTables) throws IOException {
+ super();
+ this.namespace = namespace;
+ this.queryTables = queryTables;
+ }
+
+ /**
+ * Build a response for GET namespace description or GET list of namespace tables.
+ * @param context servlet context
+ * @param uriInfo (JAX-RS context variable) request URL
+ * @return A response containing NamespacesInstanceModel for a namespace descriptions and
+ * TableListModel for a list of namespace tables.
+ */
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context ServletContext context,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+
+ // Respond to list of namespace tables requests.
+ if(queryTables){
+ TableListModel tableModel = new TableListModel();
+ try{
+ List tables =
+ servlet.getAdmin().listTableDescriptorsByNamespace(Bytes.toBytes(namespace));
+ for (TableDescriptor table : tables) {
+ tableModel.add(new TableModel(table.getTableName().getQualifierAsString()));
+ }
+
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return Response.ok(tableModel).build();
+ }catch(IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ throw new RuntimeException("Cannot retrieve table list for '" + namespace + "'.");
+ }
+ }
+
+ // Respond to namespace description requests.
+ try {
+ NamespacesInstanceModel rowModel =
+ new NamespacesInstanceModel(servlet.getAdmin(), namespace);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return Response.ok(rowModel).build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ throw new RuntimeException("Cannot retrieve info for '" + namespace + "'.");
+ }
+ }
+
+ /**
+ * Build a response for PUT alter namespace with properties specified.
+ * @param model properties used for alter.
+ * @param uriInfo (JAX-RS context variable) request URL
+ * @return response code.
+ */
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) {
+ return processUpdate(model, true, uriInfo);
+ }
+
+ /**
+ * Build a response for POST create namespace with properties specified.
+ * @param model properties used for create.
+ * @param uriInfo (JAX-RS context variable) request URL
+ * @return response code.
+ */
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response post(final NamespacesInstanceModel model,
+ final @Context UriInfo uriInfo) {
+ return processUpdate(model, false, uriInfo);
+ }
+
+
+ // Check that POST or PUT is valid and then update namespace.
+ private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting,
+ final UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace((updateExisting ? "PUT " : "POST ") + uriInfo.getAbsolutePath());
+ }
+ if (model == null) {
+ try {
+ model = new NamespacesInstanceModel(namespace);
+ } catch(IOException ioe) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ throw new RuntimeException("Cannot retrieve info for '" + namespace + "'.");
+ }
+ }
+ servlet.getMetrics().incrementRequests(1);
+
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
+ .entity("Forbidden" + CRLF).build();
+ }
+
+ Admin admin = null;
+ boolean namespaceExists = false;
+ try {
+ admin = servlet.getAdmin();
+ namespaceExists = doesNamespaceExist(admin, namespace);
+ }catch (IOException e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+
+ // Do not allow creation if namespace already exists.
+ if(!updateExisting && namespaceExists){
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).
+ entity("Namespace '" + namespace + "' already exists. Use REST PUT " +
+ "to alter the existing namespace.").build();
+ }
+
+ // Do not allow altering if namespace does not exist.
+ if (updateExisting && !namespaceExists){
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).
+ entity("Namespace '" + namespace + "' does not exist. Use " +
+ "REST POST to create the namespace.").build();
+ }
+
+ return createOrUpdate(model, uriInfo, admin, updateExisting);
+ }
+
+ // Do the actual namespace create or alter.
+ private Response createOrUpdate(final NamespacesInstanceModel model, final UriInfo uriInfo,
+ final Admin admin, final boolean updateExisting) {
+ NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace);
+ builder.addConfiguration(model.getProperties());
+ if(model.getProperties().size() > 0){
+ builder.addConfiguration(model.getProperties());
+ }
+ NamespaceDescriptor nsd = builder.build();
+
+ try{
+ if(updateExisting){
+ admin.modifyNamespace(nsd);
+ }else{
+ admin.createNamespace(nsd);
+ }
+ }catch (IOException e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+
+ return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() :
+ Response.created(uriInfo.getAbsolutePath()).build();
+ }
+
+ private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException{
+ NamespaceDescriptor[] nd = admin.listNamespaceDescriptors();
+ for(int i = 0; i < nd.length; i++){
+ if(nd[i].getName().equals(namespaceName)){
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Build a response for DELETE delete namespace.
+ * @param message value not used.
+ * @param headers value not used.
+ * @return response code.
+ */
+ @DELETE
+ public Response deleteNoBody(final byte[] message,
+ final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("DELETE " + uriInfo.getAbsolutePath());
+ }
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
+ .entity("Forbidden" + CRLF).build();
+ }
+
+ try{
+ Admin admin = servlet.getAdmin();
+ if (!doesNamespaceExist(admin, namespace)){
+ return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT).
+ entity("Namespace '" + namespace + "' does not exists. Cannot " +
+ "drop namespace.").build();
+ }
+
+ admin.deleteNamespace(namespace);
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ return Response.ok().build();
+
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return processException(e);
+ }
+ }
+
+ /**
+ * Dispatch to NamespaceInstanceResource for getting list of tables.
+ */
+ @Path("tables")
+ public NamespacesInstanceResource getNamespaceInstanceResource(
+ final @PathParam("tables") String namespace) throws IOException {
+ return new NamespacesInstanceResource(this.namespace, true);
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
new file mode 100755
index 00000000..47b98f6a
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
@@ -0,0 +1,89 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.rest.model.NamespacesModel;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.ServletContext;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+
+/**
+ * Implements REST GET list of all namespaces.
+ *
+ * /namespaces
+ *
+ */
+@InterfaceAudience.Private
+public class NamespacesResource extends ResourceBase {
+
+ private static final Logger LOG = LoggerFactory.getLogger(NamespacesResource.class);
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public NamespacesResource() throws IOException {
+ super();
+ }
+
+ /**
+ * Build a response for a list of all namespaces request.
+ * @param context servlet context
+ * @param uriInfo (JAX-RS context variable) request URL
+ * @return a response for a version request
+ */
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ NamespacesModel rowModel = null;
+ rowModel = new NamespacesModel(servlet.getAdmin());
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return Response.ok(rowModel).build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ throw new RuntimeException("Cannot retrieve list of namespaces.");
+ }
+ }
+
+ /**
+ * Dispatch to NamespaceInstanceResource
+ */
+ @Path("{namespace}")
+ public NamespacesInstanceResource getNamespaceInstanceResource(
+ final @PathParam("namespace") String namespace) throws IOException {
+ return new NamespacesInstanceResource(namespace);
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
new file mode 100755
index 00000000..d5e4354e
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java
@@ -0,0 +1,46 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Common interface for models capable of supporting protobuf marshalling
+ * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and
+ * ProtobufMessageBodyProducer adapters.
+ */
+@InterfaceAudience.Private
+public interface ProtobufMessageHandler {
+ /**
+ * @return the protobuf represention of the model
+ */
+ byte[] createProtobufOutput();
+
+ /**
+ * Initialize the model from a protobuf representation.
+ * @param message the raw bytes of the protobuf message
+ * @return reference to self for convenience
+ * @throws IOException
+ */
+ ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException;
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
new file mode 100755
index 00000000..1c137e4b
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.List;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.StreamingOutput;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class ProtobufStreamingOutput implements StreamingOutput {
+ private static final Logger LOG = LoggerFactory.getLogger(ProtobufStreamingOutput.class);
+
+ private String contentType;
+ private ResultScanner resultScanner;
+ private int limit;
+ private int fetchSize;
+
+ protected ProtobufStreamingOutput(ResultScanner scanner, String type, int limit, int fetchSize) {
+ this.resultScanner = scanner;
+ this.contentType = type;
+ this.limit = limit;
+ this.fetchSize = fetchSize;
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Created StreamingOutput with content type = " + this.contentType
+ + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize);
+ }
+ }
+
+ @Override
+ public void write(OutputStream outStream) throws IOException, WebApplicationException {
+ Result[] rowsToSend;
+ if(limit < fetchSize){
+ rowsToSend = this.resultScanner.next(limit);
+ writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream);
+ } else {
+ int count = limit;
+ while (count > 0) {
+ if (count < fetchSize) {
+ rowsToSend = this.resultScanner.next(count);
+ } else {
+ rowsToSend = this.resultScanner.next(this.fetchSize);
+ }
+ if(rowsToSend.length == 0){
+ break;
+ }
+ count = count - rowsToSend.length;
+ writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream);
+ }
+ }
+ }
+
+ private void writeToStream(CellSetModel model, String contentType, OutputStream outStream)
+ throws IOException {
+ byte[] objectBytes = model.createProtobufOutput();
+ outStream.write(Bytes.toBytes((short)objectBytes.length));
+ outStream.write(objectBytes);
+ outStream.flush();
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully.");
+ }
+ }
+
+ private CellSetModel createModelFromResults(Result[] results) {
+ CellSetModel cellSetModel = new CellSetModel();
+ for (Result rs : results) {
+ byte[] rowKey = rs.getRow();
+ RowModel rModel = new RowModel(rowKey);
+ List kvs = rs.listCells();
+ for (Cell kv : kvs) {
+ rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv
+ .getTimestamp(), CellUtil.cloneValue(kv)));
+ }
+ cellSetModel.addRow(rModel);
+ }
+ return cellSetModel;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
new file mode 100755
index 00000000..ae91d11e
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -0,0 +1,449 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider;
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ArrayBlockingQueue;
+import javax.servlet.DispatcherType;
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.log.HBaseMarkers;
+import org.apache.hadoop.hbase.rest.filter.AuthFilter;
+import org.apache.hadoop.hbase.rest.filter.GzipFilter;
+import org.apache.hadoop.hbase.rest.filter.RestCsrfPreventionFilter;
+import org.apache.hadoop.hbase.rest.http.ClickjackingPreventionFilter;
+import org.apache.hadoop.hbase.rest.http.HttpServerUtil;
+import org.apache.hadoop.hbase.rest.http.InfoServer;
+import org.apache.hadoop.hbase.rest.http.SecurityHeadersFilter;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.DNS;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.eclipse.jetty.http.HttpVersion;
+import org.eclipse.jetty.jmx.MBeanContainer;
+import org.eclipse.jetty.server.HttpConfiguration;
+import org.eclipse.jetty.server.HttpConnectionFactory;
+import org.eclipse.jetty.server.SecureRequestCustomizer;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.server.SslConnectionFactory;
+import org.eclipse.jetty.servlet.FilterHolder;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.util.ssl.SslContextFactory;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
+import org.glassfish.jersey.server.ResourceConfig;
+import org.glassfish.jersey.servlet.ServletContainer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
+import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
+import org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
+import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
+import org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser;
+
+/**
+ * Main class for launching REST gateway as a servlet hosted by Jetty.
+ *
+ * The following options are supported:
+ *
+ *
-p --port : service port
+ *
-ro --readonly : server mode
+ *
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
+public class RESTServer implements Constants {
+ static Logger LOG = LoggerFactory.getLogger("RESTServer");
+
+ static final String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled";
+ static final boolean REST_CSRF_ENABLED_DEFAULT = false;
+ boolean restCSRFEnabled = false;
+ static final String REST_CSRF_CUSTOM_HEADER_KEY ="hbase.rest.csrf.custom.header";
+ static final String REST_CSRF_CUSTOM_HEADER_DEFAULT = "X-XSRF-HEADER";
+ static final String REST_CSRF_METHODS_TO_IGNORE_KEY = "hbase.rest.csrf.methods.to.ignore";
+ static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE";
+ public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login";
+ static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k
+
+ private static final String PATH_SPEC_ANY = "/*";
+
+ static final String REST_HTTP_ALLOW_OPTIONS_METHOD = "hbase.rest.http.allow.options.method";
+ // HTTP OPTIONS method is commonly used in REST APIs for negotiation. So it is enabled by default.
+ private static boolean REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT = true;
+ static final String REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY =
+ "hbase.rest-csrf.browser-useragents-regex";
+
+ // HACK, making this static for AuthFilter to get at our configuration. Necessary for unit tests.
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+ value={"ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL"},
+ justification="For testing")
+ public static Configuration conf = null;
+ private final UserProvider userProvider;
+ private Server server;
+ private InfoServer infoServer;
+
+ public RESTServer(Configuration conf) {
+ RESTServer.conf = conf;
+ this.userProvider = UserProvider.instantiate(conf);
+ }
+
+ private static void printUsageAndExit(Options options, int exitCode) {
+ HelpFormatter formatter = new HelpFormatter();
+ formatter.printHelp("hbase rest start", "", options,
+ "\nTo run the REST server as a daemon, execute " +
+ "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", true);
+ System.exit(exitCode);
+ }
+
+ void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) {
+ restCSRFEnabled = conf.getBoolean(REST_CSRF_ENABLED_KEY, REST_CSRF_ENABLED_DEFAULT);
+ if (restCSRFEnabled) {
+ Map restCsrfParams = RestCsrfPreventionFilter
+ .getFilterParams(conf, "hbase.rest-csrf.");
+ FilterHolder holder = new FilterHolder();
+ holder.setName("csrf");
+ holder.setClassName(RestCsrfPreventionFilter.class.getName());
+ holder.setInitParameters(restCsrfParams);
+ ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class));
+ }
+ }
+
+ private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler,
+ Configuration conf) {
+ FilterHolder holder = new FilterHolder();
+ holder.setName("clickjackingprevention");
+ holder.setClassName(ClickjackingPreventionFilter.class.getName());
+ holder.setInitParameters(ClickjackingPreventionFilter.getDefaultParameters(conf));
+ ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class));
+ }
+
+ private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf) {
+ FilterHolder holder = new FilterHolder();
+ holder.setName("securityheaders");
+ holder.setClassName(SecurityHeadersFilter.class.getName());
+ holder.setInitParameters(SecurityHeadersFilter.getDefaultParameters(conf));
+ ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class));
+ }
+
+ // login the server principal (if using secure Hadoop)
+ private static Pair> loginServerPrincipal(
+ UserProvider userProvider, Configuration conf) throws Exception {
+ Class extends ServletContainer> containerClass = ServletContainer.class;
+ if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) {
+ String machineName = Strings.domainNamePointerToHostName(
+ DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
+ conf.get(REST_DNS_NAMESERVER, "default")));
+ String keytabFilename = conf.get(REST_KEYTAB_FILE);
+ Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(),
+ REST_KEYTAB_FILE + " should be set if security is enabled");
+ String principalConfig = conf.get(REST_KERBEROS_PRINCIPAL);
+ Preconditions.checkArgument(principalConfig != null && !principalConfig.isEmpty(),
+ REST_KERBEROS_PRINCIPAL + " should be set if security is enabled");
+ // Hook for unit tests, this will log out any other user and mess up tests.
+ if (!conf.getBoolean(SKIP_LOGIN_KEY, false)) {
+ userProvider.login(REST_KEYTAB_FILE, REST_KERBEROS_PRINCIPAL, machineName);
+ }
+ if (conf.get(REST_AUTHENTICATION_TYPE) != null) {
+ containerClass = RESTServletContainer.class;
+ FilterHolder authFilter = new FilterHolder();
+ authFilter.setClassName(AuthFilter.class.getName());
+ authFilter.setName("AuthenticationFilter");
+ return new Pair<>(authFilter,containerClass);
+ }
+ }
+ return new Pair<>(null, containerClass);
+ }
+
+ private static void parseCommandLine(String[] args, Configuration conf) {
+ Options options = new Options();
+ options.addOption("p", "port", true, "Port to bind to [default: " + DEFAULT_LISTEN_PORT + "]");
+ options.addOption("ro", "readonly", false, "Respond only to GET HTTP " +
+ "method requests [default: false]");
+ options.addOption("i", "infoport", true, "Port for WEB UI");
+
+ CommandLine commandLine = null;
+ try {
+ commandLine = new PosixParser().parse(options, args);
+ } catch (ParseException e) {
+ LOG.error("Could not parse: ", e);
+ printUsageAndExit(options, -1);
+ }
+
+ // check for user-defined port setting, if so override the conf
+ if (commandLine != null && commandLine.hasOption("port")) {
+ String val = commandLine.getOptionValue("port");
+ conf.setInt("hbase.rest.port", Integer.parseInt(val));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("port set to " + val);
+ }
+ }
+
+ // check if server should only process GET requests, if so override the conf
+ if (commandLine != null && commandLine.hasOption("readonly")) {
+ conf.setBoolean("hbase.rest.readonly", true);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("readonly set to true");
+ }
+ }
+
+ // check for user-defined info server port setting, if so override the conf
+ if (commandLine != null && commandLine.hasOption("infoport")) {
+ String val = commandLine.getOptionValue("infoport");
+ conf.setInt("hbase.rest.info.port", Integer.parseInt(val));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("WEB UI port set to " + val);
+ }
+ }
+
+ if (commandLine != null && commandLine.hasOption("skipLogin")) {
+ conf.setBoolean(SKIP_LOGIN_KEY, true);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Skipping Kerberos login for REST server");
+ }
+ }
+
+ List remainingArgs = commandLine != null ? commandLine.getArgList() : new ArrayList<>();
+ if (remainingArgs.size() != 1) {
+ printUsageAndExit(options, 1);
+ }
+
+ String command = remainingArgs.get(0);
+ if ("start".equals(command)) {
+ // continue and start container
+ } else if ("stop".equals(command)) {
+ System.exit(1);
+ } else {
+ printUsageAndExit(options, 1);
+ }
+ }
+
+
+ /**
+ * Runs the REST server.
+ */
+ public synchronized void run() throws Exception {
+ Pair> pair = loginServerPrincipal(
+ userProvider, conf);
+ FilterHolder authFilter = pair.getFirst();
+ Class extends ServletContainer> containerClass = pair.getSecond();
+ RESTServlet servlet = RESTServlet.getInstance(conf, userProvider);
+
+ // set up the Jersey servlet container for Jetty
+ ResourceConfig application = new ResourceConfig().
+ packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class);
+ // Using our custom ServletContainer is tremendously important. This is what makes sure the
+ // UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself.
+ ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application);
+ ServletHolder sh = new ServletHolder(servletContainer);
+
+ // Set the default max thread number to 100 to limit
+ // the number of concurrent requests so that REST server doesn't OOM easily.
+ // Jetty set the default max thread number to 250, if we don't set it.
+ //
+ // Our default min thread number 2 is the same as that used by Jetty.
+ int maxThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MAX, 100);
+ int minThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MIN, 2);
+ // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use
+ // bounded {@link ArrayBlockingQueue} with the given size
+ int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1);
+ int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000);
+ QueuedThreadPool threadPool = queueSize > 0 ?
+ new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) :
+ new QueuedThreadPool(maxThreads, minThreads, idleTimeout);
+
+ this.server = new Server(threadPool);
+
+ // Setup JMX
+ MBeanContainer mbContainer=new MBeanContainer(ManagementFactory.getPlatformMBeanServer());
+ server.addEventListener(mbContainer);
+ server.addBean(mbContainer);
+
+
+ String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0");
+ int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080);
+ HttpConfiguration httpConfig = new HttpConfiguration();
+ httpConfig.setSecureScheme("https");
+ httpConfig.setSecurePort(servicePort);
+ httpConfig.setHeaderCacheSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
+ httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
+ httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE);
+ httpConfig.setSendServerVersion(false);
+ httpConfig.setSendDateHeader(false);
+
+ ServerConnector serverConnector;
+ if (conf.getBoolean(REST_SSL_ENABLED, false)) {
+ HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
+ httpsConfig.addCustomizer(new SecureRequestCustomizer());
+
+ SslContextFactory sslCtxFactory = new SslContextFactory();
+ String keystore = conf.get(REST_SSL_KEYSTORE_STORE);
+ String password = HBaseConfiguration.getPassword(conf,
+ REST_SSL_KEYSTORE_PASSWORD, null);
+ String keyPassword = HBaseConfiguration.getPassword(conf,
+ REST_SSL_KEYSTORE_KEYPASSWORD, password);
+ sslCtxFactory.setKeyStorePath(keystore);
+ sslCtxFactory.setKeyStorePassword(password);
+ sslCtxFactory.setKeyManagerPassword(keyPassword);
+
+ String[] excludeCiphers = servlet.getConfiguration().getStrings(
+ REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
+ if (excludeCiphers.length != 0) {
+ sslCtxFactory.setExcludeCipherSuites(excludeCiphers);
+ }
+ String[] includeCiphers = servlet.getConfiguration().getStrings(
+ REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY);
+ if (includeCiphers.length != 0) {
+ sslCtxFactory.setIncludeCipherSuites(includeCiphers);
+ }
+
+ String[] excludeProtocols = servlet.getConfiguration().getStrings(
+ REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
+ if (excludeProtocols.length != 0) {
+ sslCtxFactory.setExcludeProtocols(excludeProtocols);
+ }
+ String[] includeProtocols = servlet.getConfiguration().getStrings(
+ REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY);
+ if (includeProtocols.length != 0) {
+ sslCtxFactory.setIncludeProtocols(includeProtocols);
+ }
+
+ serverConnector = new ServerConnector(server,
+ new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()),
+ new HttpConnectionFactory(httpsConfig));
+ } else {
+ serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig));
+ }
+
+ int acceptQueueSize = servlet.getConfiguration().getInt(REST_CONNECTOR_ACCEPT_QUEUE_SIZE, -1);
+ if (acceptQueueSize >= 0) {
+ serverConnector.setAcceptQueueSize(acceptQueueSize);
+ }
+
+ serverConnector.setPort(servicePort);
+ serverConnector.setHost(host);
+
+ server.addConnector(serverConnector);
+ server.setStopAtShutdown(true);
+
+ // set up context
+ ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS);
+ ctxHandler.addServlet(sh, PATH_SPEC_ANY);
+ if (authFilter != null) {
+ ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
+ }
+
+ // Load filters from configuration.
+ String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES,
+ GzipFilter.class.getName());
+ for (String filter : filterClasses) {
+ filter = filter.trim();
+ ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST));
+ }
+ addCSRFFilter(ctxHandler, conf);
+ addClickjackingPreventionFilter(ctxHandler, conf);
+ addSecurityHeadersFilter(ctxHandler, conf);
+ HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration()
+ .getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT));
+
+ // Put up info server.
+ int port = conf.getInt("hbase.rest.info.port", 8085);
+ if (port >= 0) {
+ conf.setLong("startcode", System.currentTimeMillis());
+ String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0");
+ this.infoServer = new InfoServer("rest", a, port, false, conf);
+ this.infoServer.setAttribute("hbase.conf", conf);
+ this.infoServer.start();
+ }
+ try {
+ // start server
+ server.start();
+ } catch (Exception e) {
+ LOG.error(HBaseMarkers.FATAL, "Failed to start server", e);
+ throw e;
+ }
+ }
+
+ public synchronized void join() throws Exception {
+ if (server == null) {
+ throw new IllegalStateException("Server is not running");
+ }
+ server.join();
+ }
+
+ public synchronized void stop() throws Exception {
+ if (server == null) {
+ throw new IllegalStateException("Server is not running");
+ }
+ server.stop();
+ server = null;
+ RESTServlet.stop();
+ }
+
+ public synchronized int getPort() {
+ if (server == null) {
+ throw new IllegalStateException("Server is not running");
+ }
+ return ((ServerConnector) server.getConnectors()[0]).getLocalPort();
+ }
+
+ @SuppressWarnings("deprecation")
+ public synchronized int getInfoPort() {
+ if (infoServer == null) {
+ throw new IllegalStateException("InfoServer is not running");
+ }
+ return infoServer.getPort();
+ }
+
+ public Configuration getConf() {
+ return conf;
+ }
+
+ /**
+ * The main method for the HBase rest server.
+ * @param args command-line arguments
+ * @throws Exception exception
+ */
+ public static void main(String[] args) throws Exception {
+ LOG.info("***** STARTING service '" + RESTServer.class.getSimpleName() + "' *****");
+ VersionInfo.logVersion();
+ final Configuration conf = HBaseConfiguration.create();
+ parseCommandLine(args, conf);
+ RESTServer server = new RESTServer(conf);
+
+ try {
+ server.run();
+ server.join();
+ } catch (Exception e) {
+ System.exit(1);
+ }
+
+ LOG.info("***** STOPPING service '" + RESTServer.class.getSimpleName() + "' *****");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
new file mode 100755
index 00000000..4213fa9c
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.rest.util.ConnectionCache;
+import org.apache.hadoop.hbase.rest.util.JvmPauseMonitor;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.ParseFilter;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+
+/**
+ * Singleton class encapsulating global REST servlet state and functions.
+ */
+@InterfaceAudience.Private
+public class RESTServlet implements Constants {
+ private static final Logger LOG = LoggerFactory.getLogger(RESTServlet.class);
+ private static RESTServlet INSTANCE;
+ private final Configuration conf;
+ private final MetricsREST metrics;
+ private final ConnectionCache connectionCache;
+ private final UserGroupInformation realUser;
+ private final JvmPauseMonitor pauseMonitor;
+
+ public static final String CLEANUP_INTERVAL = "hbase.rest.connection.cleanup-interval";
+ public static final String MAX_IDLETIME = "hbase.rest.connection.max-idletime";
+ static final String HBASE_REST_SUPPORT_PROXYUSER = "hbase.rest.support.proxyuser";
+
+ UserGroupInformation getRealUser() {
+ return realUser;
+ }
+
+ /**
+ * @return the RESTServlet singleton instance
+ */
+ public synchronized static RESTServlet getInstance() {
+ assert(INSTANCE != null);
+ return INSTANCE;
+ }
+
+ /**
+ * @return the ConnectionCache instance
+ */
+ public ConnectionCache getConnectionCache() {
+ return connectionCache;
+ }
+
+ /**
+ * @param conf Existing configuration to use in rest servlet
+ * @param userProvider the login user provider
+ * @return the RESTServlet singleton instance
+ * @throws IOException
+ */
+ public synchronized static RESTServlet getInstance(Configuration conf,
+ UserProvider userProvider) throws IOException {
+ if (INSTANCE == null) {
+ INSTANCE = new RESTServlet(conf, userProvider);
+ }
+ return INSTANCE;
+ }
+
+ public synchronized static void stop() {
+ if (INSTANCE != null) {
+ INSTANCE.shutdown();
+ INSTANCE = null;
+ }
+ }
+
+ /**
+ * Constructor with existing configuration
+ * @param conf existing configuration
+ * @param userProvider the login user provider
+ * @throws IOException
+ */
+ RESTServlet(final Configuration conf,
+ final UserProvider userProvider) throws IOException {
+ this.realUser = userProvider.getCurrent().getUGI();
+ this.conf = conf;
+ registerCustomFilter(conf);
+
+ int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
+ int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
+ connectionCache = new ConnectionCache(
+ conf, userProvider, cleanInterval, maxIdleTime);
+ if (supportsProxyuser()) {
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+ }
+
+ metrics = new MetricsREST();
+
+ pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource());
+ pauseMonitor.start();
+ }
+
+ Admin getAdmin() throws IOException {
+ return connectionCache.getAdmin();
+ }
+
+ /**
+ * Caller closes the table afterwards.
+ */
+ Table getTable(String tableName) throws IOException {
+ return connectionCache.getTable(tableName);
+ }
+
+ Configuration getConfiguration() {
+ return conf;
+ }
+
+ MetricsREST getMetrics() {
+ return metrics;
+ }
+
+ /**
+ * Helper method to determine if server should
+ * only respond to GET HTTP method requests.
+ * @return boolean for server read-only state
+ */
+ boolean isReadOnly() {
+ return getConfiguration().getBoolean("hbase.rest.readonly", false);
+ }
+
+ void setEffectiveUser(String effectiveUser) {
+ connectionCache.setEffectiveUser(effectiveUser);
+ }
+
+ /**
+ * Shutdown any services that need to stop
+ */
+ void shutdown() {
+ if (pauseMonitor != null) pauseMonitor.stop();
+ if (connectionCache != null) connectionCache.shutdown();
+ }
+
+ boolean supportsProxyuser() {
+ return conf.getBoolean(HBASE_REST_SUPPORT_PROXYUSER, false);
+ }
+
+ private void registerCustomFilter(Configuration conf) {
+ String[] filterList = conf.getStrings(Constants.CUSTOM_FILTERS);
+ if (filterList != null) {
+ for (String filterClass : filterList) {
+ String[] filterPart = filterClass.split(":");
+ if (filterPart.length != 2) {
+ LOG.warn(
+ "Invalid filter specification " + filterClass + " - skipping");
+ } else {
+ ParseFilter.registerFilter(filterPart[0], filterPart[1]);
+ }
+ }
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
new file mode 100755
index 00000000..2b6a6c7f
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.glassfish.jersey.server.ResourceConfig;
+import org.glassfish.jersey.servlet.ServletContainer;
+
+/**
+ * REST servlet container. It is used to get the remote request user
+ * without going through @HttpContext, so that we can minimize code changes.
+ */
+@InterfaceAudience.Private
+public class RESTServletContainer extends ServletContainer {
+ private static final long serialVersionUID = -2474255003443394314L;
+
+ public RESTServletContainer(ResourceConfig config) {
+ super(config);
+ }
+
+ /**
+ * This container is used only if authentication and
+ * impersonation is enabled. The remote request user is used
+ * as a proxy user for impersonation in invoking any REST service.
+ */
+ @Override
+ public void service(final HttpServletRequest request,
+ final HttpServletResponse response) throws ServletException, IOException {
+ final String doAsUserFromQuery = request.getParameter("doAs");
+ RESTServlet servlet = RESTServlet.getInstance();
+ if (doAsUserFromQuery != null) {
+ Configuration conf = servlet.getConfiguration();
+ if (!servlet.supportsProxyuser()) {
+ throw new ServletException("Support for proxyuser is not configured");
+ }
+ // Authenticated remote user is attempting to do 'doAs' proxy user.
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser());
+ // create and attempt to authorize a proxy user (the client is attempting
+ // to do proxy user)
+ ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
+ // validate the proxy user authorization
+ try {
+ ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf);
+ } catch(AuthorizationException e) {
+ throw new ServletException(e.getMessage());
+ }
+ servlet.setEffectiveUser(doAsUserFromQuery);
+ } else {
+ String effectiveUser = request.getRemoteUser();
+ servlet.setEffectiveUser(effectiveUser);
+ }
+ super.service(request, response);
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
new file mode 100755
index 00000000..b70c13d2
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
@@ -0,0 +1,108 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.rest.model.TableInfoModel;
+import org.apache.hadoop.hbase.rest.model.TableRegionModel;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class RegionsResource extends ResourceBase {
+ private static final Logger LOG = LoggerFactory.getLogger(RegionsResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @throws IOException
+ */
+ public RegionsResource(TableResource tableResource) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ TableName tableName = TableName.valueOf(tableResource.getName());
+ TableInfoModel model = new TableInfoModel(tableName.getNameAsString());
+
+ Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration());
+ List> regions = MetaTableAccessor
+ .getTableRegionsAndLocations(connection, tableName);
+ connection.close();
+ for (Pair e: regions) {
+ RegionInfo hri = e.getFirst();
+ ServerName addr = e.getSecond();
+ model.add(
+ new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(),
+ hri.getStartKey(), hri.getEndKey(), addr.getAddress().toString()));
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (TableNotFoundException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
new file mode 100755
index 00000000..a0deb7e7
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Response;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class ResourceBase implements Constants {
+
+ RESTServlet servlet;
+ Class> accessDeniedClazz;
+
+ public ResourceBase() throws IOException {
+ servlet = RESTServlet.getInstance();
+ try {
+ accessDeniedClazz = Class.forName("org.apache.hadoop.hbase.security.AccessDeniedException");
+ } catch (ClassNotFoundException e) {
+ }
+ }
+
+ protected Response processException(Throwable exp) {
+ Throwable curr = exp;
+ if(accessDeniedClazz != null) {
+ //some access denied exceptions are buried
+ while (curr != null) {
+ if(accessDeniedClazz.isAssignableFrom(curr.getClass())) {
+ throw new WebApplicationException(
+ Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ curr = curr.getCause();
+ }
+ }
+ //TableNotFound may also be buried one level deep
+ if (exp instanceof TableNotFoundException ||
+ exp.getCause() instanceof TableNotFoundException) {
+ throw new WebApplicationException(
+ Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ if (exp instanceof NoSuchColumnFamilyException){
+ throw new WebApplicationException(
+ Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ if (exp instanceof RuntimeException) {
+ throw new WebApplicationException(
+ Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+ if (exp instanceof RetriesExhaustedException) {
+ RetriesExhaustedException retryException = (RetriesExhaustedException) exp;
+ processException(retryException.getCause());
+ }
+ throw new WebApplicationException(
+ Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF +
+ StringUtils.stringifyException(exp) + CRLF)
+ .build());
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
new file mode 100755
index 00000000..d48bcb45
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java
@@ -0,0 +1,49 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+@InterfaceAudience.Private
+public abstract class ResultGenerator implements Iterator {
+
+ public static ResultGenerator fromRowSpec(final String table,
+ final RowSpec rowspec, final Filter filter, final boolean cacheBlocks)
+ throws IOException {
+ if (rowspec.isSingleRow()) {
+ return new RowResultGenerator(table, rowspec, filter, cacheBlocks);
+ } else {
+ return new ScannerResultGenerator(table, rowspec, filter, cacheBlocks);
+ }
+ }
+
+ public static Filter buildFilter(final String filter) throws Exception {
+ return ScannerModel.buildFilter(filter);
+ }
+
+ public abstract void putBack(Cell kv);
+
+ public abstract void close();
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
new file mode 100755
index 00000000..98217451
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
@@ -0,0 +1,110 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.rest.model.TableListModel;
+import org.apache.hadoop.hbase.rest.model.TableModel;
+
+@Path("/")
+@InterfaceAudience.Private
+public class RootResource extends ResourceBase {
+ private static final Logger LOG = LoggerFactory.getLogger(RootResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public RootResource() throws IOException {
+ super();
+ }
+
+ private final TableListModel getTableList() throws IOException {
+ TableListModel tableList = new TableListModel();
+ TableName[] tableNames = servlet.getAdmin().listTableNames();
+ for (TableName name: tableNames) {
+ tableList.add(new TableModel(name.getNameAsString()));
+ }
+ return tableList;
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ ResponseBuilder response = Response.ok(getTableList());
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+
+ @Path("status/cluster")
+ public StorageClusterStatusResource getClusterStatusResource()
+ throws IOException {
+ return new StorageClusterStatusResource();
+ }
+
+ @Path("version")
+ public VersionResource getVersionResource() throws IOException {
+ return new VersionResource();
+ }
+
+ @Path("{table}")
+ public TableResource getTableResource(
+ final @PathParam("table") String table) throws IOException {
+ return new TableResource(table);
+ }
+
+ @Path("namespaces")
+ public NamespacesResource getNamespaceResource() throws IOException {
+ return new NamespacesResource();
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
new file mode 100755
index 00000000..3ac74723
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -0,0 +1,889 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.Cell.Type;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class RowResource extends ResourceBase {
+ private static final Logger LOG = LoggerFactory.getLogger(RowResource.class);
+
+ private static final String CHECK_PUT = "put";
+ private static final String CHECK_DELETE = "delete";
+ private static final String CHECK_APPEND = "append";
+ private static final String CHECK_INCREMENT = "increment";
+
+ private TableResource tableResource;
+ private RowSpec rowspec;
+ private String check = null;
+ private boolean returnResult = false;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @param rowspec
+ * @param versions
+ * @param check
+ * @param returnResult
+ * @throws IOException
+ */
+ public RowResource(TableResource tableResource, String rowspec,
+ String versions, String check, String returnResult) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ this.rowspec = new RowSpec(rowspec);
+ if (versions != null) {
+ this.rowspec.setMaxVersions(Integer.parseInt(versions));
+ }
+ this.check = check;
+ if (returnResult != null) {
+ this.returnResult = Boolean.valueOf(returnResult);
+ }
+ }
+
+ @GET
+ @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ MultivaluedMap params = uriInfo.getQueryParameters();
+ try {
+ ResultGenerator generator =
+ ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
+ !params.containsKey(NOCACHE_PARAM_NAME));
+ if (!generator.hasNext()) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ int count = 0;
+ CellSetModel model = new CellSetModel();
+ Cell value = generator.next();
+ byte[] rowKey = CellUtil.cloneRow(value);
+ RowModel rowModel = new RowModel(rowKey);
+ do {
+ if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
+ model.addRow(rowModel);
+ rowKey = CellUtil.cloneRow(value);
+ rowModel = new RowModel(rowKey);
+ }
+ rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
+ value.getTimestamp(), CellUtil.cloneValue(value)));
+ if (++count > rowspec.getMaxValues()) {
+ break;
+ }
+ value = generator.next();
+ } while (value != null);
+ model.addRow(rowModel);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return Response.ok(model).build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+ }
+
+ @GET
+ @Produces(MIMETYPE_BINARY)
+ public Response getBinary(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+ }
+ servlet.getMetrics().incrementRequests(1);
+ // doesn't make sense to use a non specific coordinate as this can only
+ // return a single cell
+ if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+ .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " +
+ "in the row. Using the 'Accept' header with one of these formats lets you " +
+ "retrieve the entire row if it has multiple columns: " +
+ // Same as the @Produces list for the get method.
+ MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " +
+ MIMETYPE_PROTOBUF + ", " + MIMETYPE_PROTOBUF_IETF +
+ CRLF).build();
+ }
+ MultivaluedMap params = uriInfo.getQueryParameters();
+ try {
+ ResultGenerator generator =
+ ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null,
+ !params.containsKey(NOCACHE_PARAM_NAME));
+ if (!generator.hasNext()) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ Cell value = generator.next();
+ ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
+ response.header("X-Timestamp", value.getTimestamp());
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+
+ Response update(final CellSetModel model, final boolean replace) {
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+
+ if (CHECK_PUT.equalsIgnoreCase(check)) {
+ return checkAndPut(model);
+ } else if (CHECK_DELETE.equalsIgnoreCase(check)) {
+ return checkAndDelete(model);
+ } else if (CHECK_APPEND.equalsIgnoreCase(check)) {
+ return append(model);
+ } else if (CHECK_INCREMENT.equalsIgnoreCase(check)) {
+ return increment(model);
+ } else if (check != null && check.length() > 0) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF)
+ .build();
+ }
+
+ Table table = null;
+ try {
+ List rows = model.getRows();
+ List puts = new ArrayList<>();
+ for (RowModel row: rows) {
+ byte[] key = row.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+ if (key == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF)
+ .build();
+ }
+ Put put = new Put(key);
+ int i = 0;
+ for (CellModel cell: row.getCells()) {
+ byte[] col = cell.getColumn();
+ if (col == null) try {
+ col = rowspec.getColumns()[i++];
+ } catch (ArrayIndexOutOfBoundsException e) {
+ col = null;
+ }
+ if (col == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+ byte [][] parts = CellUtil.parseColumn(col);
+ if (parts.length != 2) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
+ .setRow(put.getRow())
+ .setFamily(parts[0])
+ .setQualifier(parts[1])
+ .setTimestamp(cell.getTimestamp())
+ .setType(Type.Put)
+ .setValue(cell.getValue())
+ .build());
+ }
+ puts.add(put);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("PUT " + put.toString());
+ }
+ }
+ table = servlet.getTable(tableResource.getName());
+ table.put(puts);
+ ResponseBuilder response = Response.ok();
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ }
+
+ // This currently supports only update of one row at a time.
+ Response updateBinary(final byte[] message, final HttpHeaders headers,
+ final boolean replace) {
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ Table table = null;
+ try {
+ byte[] row = rowspec.getRow();
+ byte[][] columns = rowspec.getColumns();
+ byte[] column = null;
+ if (columns != null) {
+ column = columns[0];
+ }
+ long timestamp = HConstants.LATEST_TIMESTAMP;
+ List vals = headers.getRequestHeader("X-Row");
+ if (vals != null && !vals.isEmpty()) {
+ row = Bytes.toBytes(vals.get(0));
+ }
+ vals = headers.getRequestHeader("X-Column");
+ if (vals != null && !vals.isEmpty()) {
+ column = Bytes.toBytes(vals.get(0));
+ }
+ vals = headers.getRequestHeader("X-Timestamp");
+ if (vals != null && !vals.isEmpty()) {
+ timestamp = Long.parseLong(vals.get(0));
+ }
+ if (column == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+ Put put = new Put(row);
+ byte parts[][] = CellUtil.parseColumn(column);
+ if (parts.length != 2) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
+ .setRow(put.getRow())
+ .setFamily(parts[0])
+ .setQualifier(parts[1])
+ .setTimestamp(timestamp)
+ .setType(Type.Put)
+ .setValue(message)
+ .build());
+ table = servlet.getTable(tableResource.getName());
+ table.put(put);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("PUT " + put.toString());
+ }
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return Response.ok().build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response put(final CellSetModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("PUT " + uriInfo.getAbsolutePath()
+ + " " + uriInfo.getQueryParameters());
+ }
+ return update(model, true);
+ }
+
+ @PUT
+ @Consumes(MIMETYPE_BINARY)
+ public Response putBinary(final byte[] message,
+ final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+ }
+ return updateBinary(message, headers, true);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response post(final CellSetModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("POST " + uriInfo.getAbsolutePath()
+ + " " + uriInfo.getQueryParameters());
+ }
+ return update(model, false);
+ }
+
+ @POST
+ @Consumes(MIMETYPE_BINARY)
+ public Response postBinary(final byte[] message,
+ final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
+ }
+ return updateBinary(message, headers, false);
+ }
+
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ Delete delete = null;
+ if (rowspec.hasTimestamp())
+ delete = new Delete(rowspec.getRow(), rowspec.getTimestamp());
+ else
+ delete = new Delete(rowspec.getRow());
+
+ for (byte[] column: rowspec.getColumns()) {
+ byte[][] split = CellUtil.parseColumn(column);
+ if (rowspec.hasTimestamp()) {
+ if (split.length == 1) {
+ delete.addFamily(split[0], rowspec.getTimestamp());
+ } else if (split.length == 2) {
+ delete.addColumns(split[0], split[1], rowspec.getTimestamp());
+ } else {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ } else {
+ if (split.length == 1) {
+ delete.addFamily(split[0]);
+ } else if (split.length == 2) {
+ delete.addColumns(split[0], split[1]);
+ } else {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ }
+ }
+ Table table = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ table.delete(delete);
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("DELETE " + delete.toString());
+ }
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ return Response.ok().build();
+ }
+
+ /**
+ * Validates the input request parameters, parses columns from CellSetModel,
+ * and invokes checkAndPut on HTable.
+ *
+ * @param model instance of CellSetModel
+ * @return Response 200 OK, 304 Not modified, 400 Bad request
+ */
+ Response checkAndPut(final CellSetModel model) {
+ Table table = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ if (model.getRows().size() != 1) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+ .entity("Bad request: Number of rows specified is not 1." + CRLF).build();
+ }
+
+ RowModel rowModel = model.getRows().get(0);
+ byte[] key = rowModel.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+
+ List cellModels = rowModel.getCells();
+ int cellModelCount = cellModels.size();
+ if (key == null || cellModelCount <= 1) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response
+ .status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT)
+ .entity(
+ "Bad request: Either row key is null or no data found for columns specified." + CRLF)
+ .build();
+ }
+
+ Put put = new Put(key);
+ boolean retValue;
+ CellModel valueToCheckCell = cellModels.get(cellModelCount - 1);
+ byte[] valueToCheckColumn = valueToCheckCell.getColumn();
+ byte[][] valueToPutParts = CellUtil.parseColumn(valueToCheckColumn);
+ if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) {
+ CellModel valueToPutCell = null;
+
+ // Copy all the cells to the Put request
+ // and track if the check cell's latest value is also sent
+ for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
+ CellModel cell = cellModels.get(i);
+ byte[] col = cell.getColumn();
+
+ if (col == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+
+ byte [][] parts = CellUtil.parseColumn(col);
+
+ if (parts.length != 2) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
+ .setRow(put.getRow())
+ .setFamily(parts[0])
+ .setQualifier(parts[1])
+ .setTimestamp(cell.getTimestamp())
+ .setType(Type.Put)
+ .setValue(cell.getValue())
+ .build());
+ if(Bytes.equals(col,
+ valueToCheckCell.getColumn())) {
+ valueToPutCell = cell;
+ }
+ }
+
+ if (valueToPutCell == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT)
+ .entity("Bad request: The column to put and check do not match." + CRLF).build();
+ } else {
+ retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1])
+ .ifEquals(valueToCheckCell.getValue()).thenPut(put);
+ }
+ } else {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
+ .build();
+ }
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("CHECK-AND-PUT " + put.toString() + ", returns " + retValue);
+ }
+ if (!retValue) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF)
+ .build();
+ }
+ ResponseBuilder response = Response.ok();
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ }
+
+ /**
+ * Validates the input request parameters, parses columns from CellSetModel,
+ * and invokes checkAndDelete on HTable.
+ *
+ * @param model instance of CellSetModel
+ * @return Response 200 OK, 304 Not modified, 400 Bad request
+ */
+ Response checkAndDelete(final CellSetModel model) {
+ Table table = null;
+ Delete delete = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ if (model.getRows().size() != 1) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF)
+ .build();
+ }
+ RowModel rowModel = model.getRows().get(0);
+ byte[] key = rowModel.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+ if (key == null) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF)
+ .build();
+ }
+
+ List cellModels = rowModel.getCells();
+ int cellModelCount = cellModels.size();
+
+ delete = new Delete(key);
+ boolean retValue;
+ CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1);
+ byte[] valueToDeleteColumn = valueToDeleteCell.getColumn();
+ if (valueToDeleteColumn == null) {
+ try {
+ valueToDeleteColumn = rowspec.getColumns()[0];
+ } catch (final ArrayIndexOutOfBoundsException e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF)
+ .build();
+ }
+ }
+
+ byte[][] parts ;
+ // Copy all the cells to the Delete request if extra cells are sent
+ if(cellModelCount > 1) {
+ for (int i = 0, n = cellModelCount - 1; i < n; i++) {
+ CellModel cell = cellModels.get(i);
+ byte[] col = cell.getColumn();
+
+ if (col == null) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+
+ parts = CellUtil.parseColumn(col);
+
+ if (parts.length == 1) {
+ // Only Column Family is specified
+ delete.addFamily(parts[0], cell.getTimestamp());
+ } else if (parts.length == 2) {
+ delete.addColumn(parts[0], parts[1], cell.getTimestamp());
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT)
+ .entity("Bad request: Column to delete incorrectly specified." + CRLF)
+ .build();
+ }
+ }
+ }
+
+ parts = CellUtil.parseColumn(valueToDeleteColumn);
+ if (parts.length == 2) {
+ if (parts[1].length != 0) {
+ // To support backcompat of deleting a cell
+ // if that is the only cell passed to the rest api
+ if(cellModelCount == 1) {
+ delete.addColumns(parts[0], parts[1]);
+ }
+ retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1])
+ .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete);
+ } else {
+ // The case of empty qualifier.
+ if(cellModelCount == 1) {
+ delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY));
+ }
+ retValue = table.checkAndMutate(key, parts[0])
+ .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete);
+ }
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column to check incorrectly specified." + CRLF)
+ .build();
+ }
+
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns "
+ + retValue);
+ }
+
+ if (!retValue) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF)
+ .build();
+ }
+ ResponseBuilder response = Response.ok();
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table", ioe);
+ }
+ }
+ }
+
+ /**
+ * Validates the input request parameters, parses columns from CellSetModel,
+ * and invokes Append on HTable.
+ *
+ * @param model instance of CellSetModel
+ * @return Response 200 OK, 304 Not modified, 400 Bad request
+ */
+ Response append(final CellSetModel model) {
+ Table table = null;
+ Append append = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ if (model.getRows().size() != 1) {
+ servlet.getMetrics().incrementFailedAppendRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF)
+ .build();
+ }
+ RowModel rowModel = model.getRows().get(0);
+ byte[] key = rowModel.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+ if (key == null) {
+ servlet.getMetrics().incrementFailedAppendRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF)
+ .build();
+ }
+
+ append = new Append(key);
+ append.setReturnResults(returnResult);
+ int i = 0;
+ for (CellModel cell: rowModel.getCells()) {
+ byte[] col = cell.getColumn();
+ if (col == null) {
+ try {
+ col = rowspec.getColumns()[i++];
+ } catch (ArrayIndexOutOfBoundsException e) {
+ col = null;
+ }
+ }
+ if (col == null) {
+ servlet.getMetrics().incrementFailedAppendRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+ byte [][] parts = CellUtil.parseColumn(col);
+ if (parts.length != 2) {
+ servlet.getMetrics().incrementFailedAppendRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
+ .build();
+ }
+ append.addColumn(parts[0], parts[1], cell.getValue());
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("APPEND " + append.toString());
+ }
+ Result result = table.append(append);
+ if (returnResult) {
+ if (result.isEmpty()) {
+ servlet.getMetrics().incrementFailedAppendRequests(1);
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity("Append return empty." + CRLF)
+ .build();
+ }
+
+ CellSetModel rModel = new CellSetModel();
+ RowModel rRowModel = new RowModel(result.getRow());
+ for (Cell cell : result.listCells()) {
+ rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
+ cell.getTimestamp(), CellUtil.cloneValue(cell)));
+ }
+ rModel.addRow(rRowModel);
+ servlet.getMetrics().incrementSucessfulAppendRequests(1);
+ return Response.ok(rModel).build();
+ }
+ servlet.getMetrics().incrementSucessfulAppendRequests(1);
+ return Response.ok().build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedAppendRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table" + table.getName(), ioe);
+ }
+ }
+ }
+
+ /**
+ * Validates the input request parameters, parses columns from CellSetModel,
+ * and invokes Increment on HTable.
+ *
+ * @param model instance of CellSetModel
+ * @return Response 200 OK, 304 Not modified, 400 Bad request
+ */
+ Response increment(final CellSetModel model) {
+ Table table = null;
+ Increment increment = null;
+ try {
+ table = servlet.getTable(tableResource.getName());
+ if (model.getRows().size() != 1) {
+ servlet.getMetrics().incrementFailedIncrementRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF)
+ .build();
+ }
+ RowModel rowModel = model.getRows().get(0);
+ byte[] key = rowModel.getKey();
+ if (key == null) {
+ key = rowspec.getRow();
+ }
+ if (key == null) {
+ servlet.getMetrics().incrementFailedIncrementRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF)
+ .build();
+ }
+
+ increment = new Increment(key);
+ increment.setReturnResults(returnResult);
+ int i = 0;
+ for (CellModel cell: rowModel.getCells()) {
+ byte[] col = cell.getColumn();
+ if (col == null) {
+ try {
+ col = rowspec.getColumns()[i++];
+ } catch (ArrayIndexOutOfBoundsException e) {
+ col = null;
+ }
+ }
+ if (col == null) {
+ servlet.getMetrics().incrementFailedIncrementRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF)
+ .build();
+ }
+ byte [][] parts = CellUtil.parseColumn(col);
+ if (parts.length != 2) {
+ servlet.getMetrics().incrementFailedIncrementRequests(1);
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF)
+ .build();
+ }
+ increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue())));
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("INCREMENT " + increment.toString());
+ }
+ Result result = table.increment(increment);
+
+ if (returnResult) {
+ if (result.isEmpty()) {
+ servlet.getMetrics().incrementFailedIncrementRequests(1);
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF)
+ .build();
+ }
+
+ CellSetModel rModel = new CellSetModel();
+ RowModel rRowModel = new RowModel(result.getRow());
+ for (Cell cell : result.listCells()) {
+ rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
+ cell.getTimestamp(), CellUtil.cloneValue(cell)));
+ }
+ rModel.addRow(rowModel);
+ servlet.getMetrics().incrementSucessfulIncrementRequests(1);
+ return Response.ok(rModel).build();
+ }
+
+ ResponseBuilder response = Response.ok();
+ servlet.getMetrics().incrementSucessfulIncrementRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedIncrementRequests(1);
+ return processException(e);
+ } finally {
+ if (table != null) try {
+ table.close();
+ } catch (IOException ioe) {
+ LOG.debug("Exception received while closing the table " + table.getName(), ioe);
+ }
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
new file mode 100755
index 00000000..3d81c414
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java
@@ -0,0 +1,131 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+
+import org.apache.hadoop.util.StringUtils;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class RowResultGenerator extends ResultGenerator {
+ private static final Logger LOG = LoggerFactory.getLogger(RowResultGenerator.class);
+
+ private Iterator valuesI;
+ private Cell cache;
+
+ public RowResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter, final boolean cacheBlocks)
+ throws IllegalArgumentException, IOException {
+ try (Table table = RESTServlet.getInstance().getTable(tableName)) {
+ Get get = new Get(rowspec.getRow());
+ if (rowspec.hasColumns()) {
+ for (byte[] col : rowspec.getColumns()) {
+ byte[][] split = CellUtil.parseColumn(col);
+ if (split.length == 1) {
+ get.addFamily(split[0]);
+ } else if (split.length == 2) {
+ get.addColumn(split[0], split[1]);
+ } else {
+ throw new IllegalArgumentException("Invalid column specifier.");
+ }
+ }
+ }
+ get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ get.readVersions(rowspec.getMaxVersions());
+ if (filter != null) {
+ get.setFilter(filter);
+ }
+ get.setCacheBlocks(cacheBlocks);
+ Result result = table.get(get);
+ if (result != null && !result.isEmpty()) {
+ valuesI = result.listCells().iterator();
+ }
+ } catch (DoNotRetryIOException e) {
+ // Warn here because Stargate will return 404 in the case if multiple
+ // column families were specified but one did not exist -- currently
+ // HBase will fail the whole Get.
+ // Specifying multiple columns in a URI should be uncommon usage but
+ // help to avoid confusion by leaving a record of what happened here in
+ // the log.
+ LOG.warn(StringUtils.stringifyException(e));
+ // Lets get the exception rethrown to get a more meaningful error message than 404
+ if (e instanceof AccessDeniedException) {
+ throw e;
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (cache != null) {
+ return true;
+ }
+ if (valuesI == null) {
+ return false;
+ }
+ return valuesI.hasNext();
+ }
+
+ @Override
+ public Cell next() {
+ if (cache != null) {
+ Cell kv = cache;
+ cache = null;
+ return kv;
+ }
+ if (valuesI == null) {
+ return null;
+ }
+ try {
+ return valuesI.next();
+ } catch (NoSuchElementException e) {
+ return null;
+ }
+ }
+
+ @Override
+ public void putBack(Cell kv) {
+ this.cache = kv;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
new file mode 100755
index 00000000..c510c9ed
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java
@@ -0,0 +1,407 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Parses a path based row/column/timestamp specification into its component
+ * elements.
+ *
+ *
+ */
+@InterfaceAudience.Private
+public class RowSpec {
+ public static final long DEFAULT_START_TIMESTAMP = 0;
+ public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
+
+ private byte[] row = HConstants.EMPTY_START_ROW;
+ private byte[] endRow = null;
+ private TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR);
+ private List labels = new ArrayList<>();
+ private long startTime = DEFAULT_START_TIMESTAMP;
+ private long endTime = DEFAULT_END_TIMESTAMP;
+ private int maxVersions = 1;
+ private int maxValues = Integer.MAX_VALUE;
+
+ public RowSpec(String path) throws IllegalArgumentException {
+ int i = 0;
+ while (path.charAt(i) == '/') {
+ i++;
+ }
+ i = parseRowKeys(path, i);
+ i = parseColumns(path, i);
+ i = parseTimestamp(path, i);
+ i = parseQueryParams(path, i);
+ }
+
+ private int parseRowKeys(final String path, int i)
+ throws IllegalArgumentException {
+ String startRow = null, endRow = null;
+ try {
+ StringBuilder sb = new StringBuilder();
+ char c;
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ sb.append(c);
+ i++;
+ }
+ i++;
+ String row = startRow = sb.toString();
+ int idx = startRow.indexOf(',');
+ if (idx != -1) {
+ startRow = URLDecoder.decode(row.substring(0, idx),
+ HConstants.UTF8_ENCODING);
+ endRow = URLDecoder.decode(row.substring(idx + 1),
+ HConstants.UTF8_ENCODING);
+ } else {
+ startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING);
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e);
+ }
+ // HBase does not support wildcards on row keys so we will emulate a
+ // suffix glob by synthesizing appropriate start and end row keys for
+ // table scanning
+ if (startRow.charAt(startRow.length() - 1) == '*') {
+ if (endRow != null)
+ throw new IllegalArgumentException("invalid path: start row "+
+ "specified with wildcard");
+ this.row = Bytes.toBytes(startRow.substring(0,
+ startRow.lastIndexOf("*")));
+ this.endRow = new byte[this.row.length + 1];
+ System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
+ this.endRow[this.row.length] = (byte)255;
+ } else {
+ this.row = Bytes.toBytes(startRow.toString());
+ if (endRow != null) {
+ this.endRow = Bytes.toBytes(endRow.toString());
+ }
+ }
+ return i;
+ }
+
+ private int parseColumns(final String path, int i) throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ try {
+ char c;
+ StringBuilder column = new StringBuilder();
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ if (c == ',') {
+ if (column.length() < 1) {
+ throw new IllegalArgumentException("invalid path");
+ }
+ String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
+ this.columns.add(Bytes.toBytes(s));
+ column.setLength(0);
+ i++;
+ continue;
+ }
+ column.append(c);
+ i++;
+ }
+ i++;
+ // trailing list entry
+ if (column.length() > 0) {
+ String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING);
+ this.columns.add(Bytes.toBytes(s));
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ } catch (UnsupportedEncodingException e) {
+ // shouldn't happen
+ throw new RuntimeException(e);
+ }
+ return i;
+ }
+
+ private int parseTimestamp(final String path, int i)
+ throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ long time0 = 0, time1 = 0;
+ try {
+ char c = 0;
+ StringBuilder stamp = new StringBuilder();
+ while (i < path.length()) {
+ c = path.charAt(i);
+ if (c == '/' || c == ',') {
+ break;
+ }
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time0 = Long.parseLong(URLDecoder.decode(stamp.toString(),
+ HConstants.UTF8_ENCODING));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ if (c == ',') {
+ stamp = new StringBuilder();
+ i++;
+ while (i < path.length() && ((c = path.charAt(i)) != '/')) {
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time1 = Long.parseLong(URLDecoder.decode(stamp.toString(),
+ HConstants.UTF8_ENCODING));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+ if (c == '/') {
+ i++;
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ } catch (UnsupportedEncodingException e) {
+ // shouldn't happen
+ throw new RuntimeException(e);
+ }
+ if (time1 != 0) {
+ startTime = time0;
+ endTime = time1;
+ } else {
+ endTime = time0;
+ }
+ return i;
+ }
+
+ private int parseQueryParams(final String path, int i) {
+ if (i >= path.length()) {
+ return i;
+ }
+ StringBuilder query = new StringBuilder();
+ try {
+ query.append(URLDecoder.decode(path.substring(i),
+ HConstants.UTF8_ENCODING));
+ } catch (UnsupportedEncodingException e) {
+ // should not happen
+ throw new RuntimeException(e);
+ }
+ i += query.length();
+ int j = 0;
+ while (j < query.length()) {
+ char c = query.charAt(j);
+ if (c != '?' && c != '&') {
+ break;
+ }
+ if (++j > query.length()) {
+ throw new IllegalArgumentException("malformed query parameter");
+ }
+ char what = query.charAt(j);
+ if (++j > query.length()) {
+ break;
+ }
+ c = query.charAt(j);
+ if (c != '=') {
+ throw new IllegalArgumentException("malformed query parameter");
+ }
+ if (++j > query.length()) {
+ break;
+ }
+ switch (what) {
+ case 'm': {
+ StringBuilder sb = new StringBuilder();
+ while (j <= query.length()) {
+ c = query.charAt(j);
+ if (c < '0' || c > '9') {
+ j--;
+ break;
+ }
+ sb.append(c);
+ }
+ maxVersions = Integer.parseInt(sb.toString());
+ } break;
+ case 'n': {
+ StringBuilder sb = new StringBuilder();
+ while (j <= query.length()) {
+ c = query.charAt(j);
+ if (c < '0' || c > '9') {
+ j--;
+ break;
+ }
+ sb.append(c);
+ }
+ maxValues = Integer.parseInt(sb.toString());
+ } break;
+ default:
+ throw new IllegalArgumentException("unknown parameter '" + c + "'");
+ }
+ }
+ return i;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ Collections.addAll(this.columns, columns);
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, Collection columns,
+ long startTime, long endTime, int maxVersions, Collection labels) {
+ this(startRow, endRow, columns, startTime, endTime, maxVersions);
+ if(labels != null) {
+ this.labels.addAll(labels);
+ }
+ }
+ public RowSpec(byte[] startRow, byte[] endRow, Collection columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ this.columns.addAll(columns);
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public boolean isSingleRow() {
+ return endRow == null;
+ }
+
+ public int getMaxVersions() {
+ return maxVersions;
+ }
+
+ public void setMaxVersions(final int maxVersions) {
+ this.maxVersions = maxVersions;
+ }
+
+ public int getMaxValues() {
+ return maxValues;
+ }
+
+ public void setMaxValues(final int maxValues) {
+ this.maxValues = maxValues;
+ }
+
+ public boolean hasColumns() {
+ return !columns.isEmpty();
+ }
+
+ public boolean hasLabels() {
+ return !labels.isEmpty();
+ }
+
+ public byte[] getRow() {
+ return row;
+ }
+
+ public byte[] getStartRow() {
+ return row;
+ }
+
+ public boolean hasEndRow() {
+ return endRow != null;
+ }
+
+ public byte[] getEndRow() {
+ return endRow;
+ }
+
+ public void addColumn(final byte[] column) {
+ columns.add(column);
+ }
+
+ public byte[][] getColumns() {
+ return columns.toArray(new byte[columns.size()][]);
+ }
+
+ public List getLabels() {
+ return labels;
+ }
+
+ public boolean hasTimestamp() {
+ return (startTime == 0) && (endTime != Long.MAX_VALUE);
+ }
+
+ public long getTimestamp() {
+ return endTime;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public void setStartTime(final long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long getEndTime() {
+ return endTime;
+ }
+
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("{startRow => '");
+ if (row != null) {
+ result.append(Bytes.toString(row));
+ }
+ result.append("', endRow => '");
+ if (endRow != null) {
+ result.append(Bytes.toString(endRow));
+ }
+ result.append("', columns => [");
+ for (byte[] col: columns) {
+ result.append(" '");
+ result.append(Bytes.toString(col));
+ result.append("'");
+ }
+ result.append(" ], startTime => ");
+ result.append(Long.toString(startTime));
+ result.append(", endTime => ");
+ result.append(Long.toString(endTime));
+ result.append(", maxVersions => ");
+ result.append(Integer.toString(maxVersions));
+ result.append(", maxValues => ");
+ result.append(Integer.toString(maxValues));
+ result.append("}");
+ return result.toString();
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
new file mode 100755
index 00000000..4a8f0bea
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
@@ -0,0 +1,215 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Base64;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+@InterfaceAudience.Private
+public class ScannerInstanceResource extends ResourceBase {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ScannerInstanceResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ ResultGenerator generator = null;
+ String id = null;
+ int batch = 1;
+
+ public ScannerInstanceResource() throws IOException { }
+
+ public ScannerInstanceResource(String table, String id,
+ ResultGenerator generator, int batch) throws IOException {
+ this.id = id;
+ this.generator = generator;
+ this.batch = batch;
+ }
+
+ @GET
+ @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo,
+ @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (generator == null) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ } else {
+ // Updated the connection access time for each client next() call
+ RESTServlet.getInstance().getConnectionCache().updateConnectionAccessTime();
+ }
+ CellSetModel model = new CellSetModel();
+ RowModel rowModel = null;
+ byte[] rowKey = null;
+ int limit = batch;
+ if (maxValues > 0) {
+ limit = maxValues;
+ }
+ int count = limit;
+ do {
+ Cell value = null;
+ try {
+ value = generator.next();
+ } catch (IllegalStateException e) {
+ if (ScannerResource.delete(id)) {
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ }
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.GONE)
+ .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
+ .build();
+ } catch (IllegalArgumentException e) {
+ Throwable t = e.getCause();
+ if (t instanceof TableNotFoundException) {
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ }
+ throw e;
+ }
+ if (value == null) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("generator exhausted");
+ }
+ // respond with 204 (No Content) if an empty cell set would be
+ // returned
+ if (count == limit) {
+ return Response.noContent().build();
+ }
+ break;
+ }
+ if (rowKey == null) {
+ rowKey = CellUtil.cloneRow(value);
+ rowModel = new RowModel(rowKey);
+ }
+ if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
+ // if maxRows was given as a query param, stop if we would exceed the
+ // specified number of rows
+ if (maxRows > 0) {
+ if (--maxRows == 0) {
+ generator.putBack(value);
+ break;
+ }
+ }
+ model.addRow(rowModel);
+ rowKey = CellUtil.cloneRow(value);
+ rowModel = new RowModel(rowKey);
+ }
+ rowModel.addCell(
+ new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
+ value.getTimestamp(), CellUtil.cloneValue(value)));
+ } while (--count > 0);
+ model.addRow(rowModel);
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ }
+
+ @GET
+ @Produces(MIMETYPE_BINARY)
+ public Response getBinary(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " +
+ MIMETYPE_BINARY);
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ Cell value = generator.next();
+ if (value == null) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("generator exhausted");
+ }
+ return Response.noContent().build();
+ }
+ ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
+ response.cacheControl(cacheControl);
+ response.header("X-Row", Bytes.toString(Base64.getEncoder().encode(
+ CellUtil.cloneRow(value))));
+ response.header("X-Column", Bytes.toString(Base64.getEncoder().encode(
+ CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)))));
+ response.header("X-Timestamp", value.getTimestamp());
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (IllegalStateException e) {
+ if (ScannerResource.delete(id)) {
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ }
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.GONE)
+ .type(MIMETYPE_TEXT).entity("Gone" + CRLF)
+ .build();
+ }
+ }
+
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ if (ScannerResource.delete(id)) {
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ } else {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ }
+ return Response.ok().build();
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
new file mode 100755
index 00000000..f9b2d13b
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
@@ -0,0 +1,167 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.core.JsonParseException;
+import com.fasterxml.jackson.databind.JsonMappingException;
+
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+
+@InterfaceAudience.Private
+public class ScannerResource extends ResourceBase {
+
+ private static final Logger LOG = LoggerFactory.getLogger(ScannerResource.class);
+
+ static final Map scanners =
+ Collections.synchronizedMap(new HashMap());
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ * @param tableResource
+ * @throws IOException
+ */
+ public ScannerResource(TableResource tableResource)throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ static boolean delete(final String id) {
+ ScannerInstanceResource instance = scanners.remove(id);
+ if (instance != null) {
+ instance.generator.close();
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ Response update(final ScannerModel model, final boolean replace,
+ final UriInfo uriInfo) {
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
+ RowSpec spec = null;
+ if (model.getLabels() != null) {
+ spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
+ model.getEndTime(), model.getMaxVersions(), model.getLabels());
+ } else {
+ spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
+ model.getEndTime(), model.getMaxVersions());
+ }
+
+ try {
+ Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
+ String tableName = tableResource.getName();
+ ScannerResultGenerator gen =
+ new ScannerResultGenerator(tableName, spec, filter, model.getCaching(),
+ model.getCacheBlocks(), model.getLimit());
+ String id = gen.getID();
+ ScannerInstanceResource instance =
+ new ScannerInstanceResource(tableName, id, gen, model.getBatch());
+ scanners.put(id, instance);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("new scanner: " + id);
+ }
+ UriBuilder builder = uriInfo.getAbsolutePathBuilder();
+ URI uri = builder.path(id).build();
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return Response.created(uri).build();
+ } catch (Exception e) {
+ LOG.error("Exception occurred while processing " + uriInfo.getAbsolutePath() + " : ", e);
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ if (e instanceof TableNotFoundException) {
+ return Response.status(Response.Status.NOT_FOUND)
+ .type(MIMETYPE_TEXT).entity("Not found" + CRLF)
+ .build();
+ } else if (e instanceof RuntimeException
+ || e instanceof JsonMappingException | e instanceof JsonParseException) {
+ return Response.status(Response.Status.BAD_REQUEST)
+ .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
+ .build();
+ }
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response put(final ScannerModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("PUT " + uriInfo.getAbsolutePath());
+ }
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response post(final ScannerModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("POST " + uriInfo.getAbsolutePath());
+ }
+ return update(model, false, uriInfo);
+ }
+
+ @Path("{scanner: .+}")
+ public ScannerInstanceResource getScannerInstanceResource(
+ final @PathParam("scanner") String id) throws IOException {
+ ScannerInstanceResource instance = scanners.get(id);
+ if (instance == null) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return new ScannerInstanceResource();
+ } else {
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ }
+ return instance;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
new file mode 100755
index 00000000..304930c4
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java
@@ -0,0 +1,210 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.security.visibility.Authorizations;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class ScannerResultGenerator extends ResultGenerator {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ScannerResultGenerator.class);
+
+ public static Filter buildFilterFromModel(final ScannerModel model)
+ throws Exception {
+ String filter = model.getFilter();
+ if (filter == null || filter.length() == 0) {
+ return null;
+ }
+ return buildFilter(filter);
+ }
+
+ private String id;
+ private Iterator rowI;
+ private Cell cache;
+ private ResultScanner scanner;
+ private Result cached;
+
+ public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter, final boolean cacheBlocks)
+ throws IllegalArgumentException, IOException {
+ this(tableName, rowspec, filter, -1, cacheBlocks);
+ }
+
+ public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter, final int caching, final boolean cacheBlocks)
+ throws IllegalArgumentException, IOException {
+ this(tableName, rowspec, filter, caching, cacheBlocks, -1);
+ }
+
+ public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter, final int caching ,final boolean cacheBlocks, int limit) throws IOException {
+ Table table = RESTServlet.getInstance().getTable(tableName);
+ try {
+ Scan scan;
+ if (rowspec.hasEndRow()) {
+ scan = new Scan().withStartRow(rowspec.getStartRow()).withStopRow(rowspec.getEndRow());
+ } else {
+ scan = new Scan().withStartRow(rowspec.getStartRow());
+ }
+ if (rowspec.hasColumns()) {
+ byte[][] columns = rowspec.getColumns();
+ for (byte[] column: columns) {
+ byte[][] split = CellUtil.parseColumn(column);
+ if (split.length == 1) {
+ scan.addFamily(split[0]);
+ } else if (split.length == 2) {
+ scan.addColumn(split[0], split[1]);
+ } else {
+ throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
+ }
+ }
+ }
+ scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ scan.readVersions(rowspec.getMaxVersions());
+ if (filter != null) {
+ scan.setFilter(filter);
+ }
+ if (caching > 0 ) {
+ scan.setCaching(caching);
+ }
+ if (limit > 0) {
+ scan.setLimit(limit);
+ }
+ scan.setCacheBlocks(cacheBlocks);
+ if (rowspec.hasLabels()) {
+ scan.setAuthorizations(new Authorizations(rowspec.getLabels()));
+ }
+ scanner = table.getScanner(scan);
+ cached = null;
+ id = Long.toString(System.currentTimeMillis()) +
+ Integer.toHexString(scanner.hashCode());
+ } finally {
+ table.close();
+ }
+ }
+
+ public String getID() {
+ return id;
+ }
+
+ @Override
+ public void close() {
+ if (scanner != null) {
+ scanner.close();
+ scanner = null;
+ }
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (cache != null) {
+ return true;
+ }
+ if (rowI != null && rowI.hasNext()) {
+ return true;
+ }
+ if (cached != null) {
+ return true;
+ }
+ try {
+ Result result = scanner.next();
+ if (result != null && !result.isEmpty()) {
+ cached = result;
+ }
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ return cached != null;
+ }
+
+ @Override
+ public Cell next() {
+ if (cache != null) {
+ Cell kv = cache;
+ cache = null;
+ return kv;
+ }
+ boolean loop;
+ do {
+ loop = false;
+ if (rowI != null) {
+ if (rowI.hasNext()) {
+ return rowI.next();
+ } else {
+ rowI = null;
+ }
+ }
+ if (cached != null) {
+ rowI = cached.listCells().iterator();
+ loop = true;
+ cached = null;
+ } else {
+ Result result = null;
+ try {
+ result = scanner.next();
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (TableNotEnabledException tnee) {
+ throw new IllegalStateException(tnee);
+ } catch (TableNotFoundException tnfe) {
+ throw new IllegalArgumentException(tnfe);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ if (result != null && !result.isEmpty()) {
+ rowI = result.listCells().iterator();
+ loop = true;
+ }
+ }
+ } while (loop);
+ return null;
+ }
+
+ @Override
+ public void putBack(Cell kv) {
+ this.cache = kv;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
new file mode 100755
index 00000000..65f5bba8
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotEnabledException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import javax.xml.namespace.QName;
+import java.io.IOException;
+import java.util.Map;
+
+@InterfaceAudience.Private
+public class SchemaResource extends ResourceBase {
+ private static final Logger LOG = LoggerFactory.getLogger(SchemaResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ TableResource tableResource;
+
+ /**
+ * Constructor
+ */
+ public SchemaResource(TableResource tableResource) throws IOException {
+ super();
+ this.tableResource = tableResource;
+ }
+
+ private HTableDescriptor getTableSchema() throws IOException, TableNotFoundException {
+ try (Table table = servlet.getTable(tableResource.getName())) {
+ return new HTableDescriptor(table.getDescriptor());
+ }
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ ResponseBuilder response =
+ Response.ok(new TableSchemaModel(getTableSchema()));
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return processException(e);
+ }
+ }
+
+ private Response replace(final TableName name, final TableSchemaModel model,
+ final UriInfo uriInfo, final Admin admin) {
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ try {
+ TableDescriptorBuilder tableDescriptorBuilder =
+ TableDescriptorBuilder.newBuilder(name);
+ for (Map.Entry e : model.getAny().entrySet()) {
+ tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ for (ColumnSchemaModel family : model.getColumns()) {
+ ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder =
+ ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
+ for (Map.Entry e : family.getAny().entrySet()) {
+ columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(),
+ e.getValue().toString());
+ }
+ tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build());
+ }
+ TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
+ if (admin.tableExists(name)) {
+ admin.disableTable(name);
+ admin.modifyTable(tableDescriptor);
+ admin.enableTable(name);
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ } else {
+ try {
+ admin.createTable(tableDescriptor);
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ } catch (TableExistsException e) {
+ // race, someone else created a table with the same name
+ return Response.status(Response.Status.NOT_MODIFIED)
+ .type(MIMETYPE_TEXT).entity("Not modified" + CRLF)
+ .build();
+ }
+ }
+ return Response.created(uriInfo.getAbsolutePath()).build();
+ } catch (Exception e) {
+ LOG.info("Caught exception", e);
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+ }
+
+ private Response update(final TableName name, final TableSchemaModel model,
+ final UriInfo uriInfo, final Admin admin) {
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN)
+ .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF)
+ .build();
+ }
+ try {
+ TableDescriptorBuilder tableDescriptorBuilder =
+ TableDescriptorBuilder.newBuilder(admin.getDescriptor(name));
+ admin.disableTable(name);
+ try {
+ for (ColumnSchemaModel family : model.getColumns()) {
+ ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder =
+ ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName()));
+ for (Map.Entry e : family.getAny().entrySet()) {
+ columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(),
+ e.getValue().toString());
+ }
+ TableDescriptor tableDescriptor = tableDescriptorBuilder.build();
+ ColumnFamilyDescriptor columnFamilyDescriptor = columnFamilyDescriptorBuilder.build();
+ if (tableDescriptor.hasColumnFamily(columnFamilyDescriptor.getName())) {
+ admin.modifyColumnFamily(name, columnFamilyDescriptor);
+ } else {
+ admin.addColumnFamily(name, columnFamilyDescriptor);
+ }
+ }
+ } catch (IOException e) {
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ } finally {
+ admin.enableTable(TableName.valueOf(tableResource.getName()));
+ }
+ servlet.getMetrics().incrementSucessfulPutRequests(1);
+ return Response.ok().build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ return processException(e);
+ }
+ }
+
+ private Response update(final TableSchemaModel model, final boolean replace,
+ final UriInfo uriInfo) {
+ try {
+ TableName name = TableName.valueOf(tableResource.getName());
+ Admin admin = servlet.getAdmin();
+ if (replace || !admin.tableExists(name)) {
+ return replace(name, model, uriInfo, admin);
+ } else {
+ return update(name, model, uriInfo, admin);
+ }
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedPutRequests(1);
+ // Avoid re-unwrapping the exception
+ if (e instanceof WebApplicationException) {
+ throw (WebApplicationException) e;
+ }
+ return processException(e);
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response put(final TableSchemaModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("PUT " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response post(final TableSchemaModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("PUT " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ return update(model, false, uriInfo);
+ }
+
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE",
+ justification="Expected")
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ if (servlet.isReadOnly()) {
+ return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT)
+ .entity("Forbidden" + CRLF).build();
+ }
+ try {
+ Admin admin = servlet.getAdmin();
+ try {
+ admin.disableTable(TableName.valueOf(tableResource.getName()));
+ } catch (TableNotEnabledException e) { /* this is what we want anyway */ }
+ admin.deleteTable(TableName.valueOf(tableResource.getName()));
+ servlet.getMetrics().incrementSucessfulDeleteRequests(1);
+ return Response.ok().build();
+ } catch (Exception e) {
+ servlet.getMetrics().incrementFailedDeleteRequests(1);
+ return processException(e);
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
new file mode 100755
index 00000000..abcd87cf
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -0,0 +1,117 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Map;
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import org.apache.hadoop.hbase.ClusterMetrics;
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerMetrics;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class StorageClusterStatusResource extends ResourceBase {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(StorageClusterStatusResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public StorageClusterStatusResource() throws IOException {
+ super();
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ ClusterMetrics status = servlet.getAdmin().getClusterMetrics(
+ EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS));
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.setRegions(status.getRegionCount());
+ model.setRequests(status.getRequestCount());
+ model.setAverageLoad(status.getAverageLoad());
+ for (Map.Entry entry: status.getLiveServerMetrics().entrySet()) {
+ ServerName sn = entry.getKey();
+ ServerMetrics load = entry.getValue();
+ StorageClusterStatusModel.Node node =
+ model.addLiveNode(
+ sn.getHostname() + ":" +
+ Integer.toString(sn.getPort()),
+ sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE),
+ (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE));
+ node.setRequests(load.getRequestCount());
+ for (RegionMetrics region: load.getRegionMetrics().values()) {
+ node.addRegion(region.getRegionName(), region.getStoreCount(),
+ region.getStoreFileCount(),
+ (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE),
+ (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE),
+ (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE),
+ region.getReadRequestCount(),
+ region.getWriteRequestCount(),
+ (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE),
+ (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE),
+ (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE),
+ region.getCompactingCellCount(),
+ region.getCompactedCellCount());
+ }
+ }
+ for (ServerName name: status.getDeadServerNames()) {
+ model.addDeadNode(name.toString());
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
new file mode 100755
index 00000000..67cc8c46
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -0,0 +1,81 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.hadoop.hbase.ClusterMetrics.Option;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.util.EnumSet;
+
+@InterfaceAudience.Private
+public class StorageClusterVersionResource extends ResourceBase {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(StorageClusterVersionResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public StorageClusterVersionResource() throws IOException {
+ super();
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ StorageClusterVersionModel model = new StorageClusterVersionModel();
+ model.setVersion(
+ servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION))
+ .getHBaseVersion());
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ } catch (IOException e) {
+ servlet.getMetrics().incrementFailedGetRequests(1);
+ return Response.status(Response.Status.SERVICE_UNAVAILABLE)
+ .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF)
+ .build();
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
new file mode 100755
index 00000000..0fad4427
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
@@ -0,0 +1,205 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.ParseFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.DefaultValue;
+import javax.ws.rs.Encoded;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+import java.io.IOException;
+import java.util.List;
+
+@InterfaceAudience.Private
+public class TableResource extends ResourceBase {
+
+ String table;
+ private static final Logger LOG = LoggerFactory.getLogger(TableResource.class);
+
+ /**
+ * Constructor
+ * @param table
+ * @throws IOException
+ */
+ public TableResource(String table) throws IOException {
+ super();
+ this.table = table;
+ }
+
+ /** @return the table name */
+ String getName() {
+ return table;
+ }
+
+ /**
+ * @return true if the table exists
+ * @throws IOException
+ */
+ boolean exists() throws IOException {
+ return servlet.getAdmin().tableExists(TableName.valueOf(table));
+ }
+
+ @Path("exists")
+ public ExistsResource getExistsResource() throws IOException {
+ return new ExistsResource(this);
+ }
+
+ @Path("regions")
+ public RegionsResource getRegionsResource() throws IOException {
+ return new RegionsResource(this);
+ }
+
+ @Path("scanner")
+ public ScannerResource getScannerResource() throws IOException {
+ return new ScannerResource(this);
+ }
+
+ @Path("schema")
+ public SchemaResource getSchemaResource() throws IOException {
+ return new SchemaResource(this);
+ }
+
+ @Path("{multiget: multiget.*}")
+ public MultiRowResource getMultipleRowResource(final @QueryParam("v") String versions,
+ @PathParam("multiget") String path) throws IOException {
+ return new MultiRowResource(this, versions, path.replace("multiget", "").replace("/", ""));
+ }
+
+ @Path("{rowspec: [^*]+}")
+ public RowResource getRowResource(
+ // We need the @Encoded decorator so Jersey won't urldecode before
+ // the RowSpec constructor has a chance to parse
+ final @PathParam("rowspec") @Encoded String rowspec,
+ final @QueryParam("v") String versions,
+ final @QueryParam("check") String check,
+ final @QueryParam("rr") String returnResult) throws IOException {
+ return new RowResource(this, rowspec, versions, check, returnResult);
+ }
+
+ @Path("{suffixglobbingspec: .*\\*/.+}")
+ public RowResource getRowResourceWithSuffixGlobbing(
+ // We need the @Encoded decorator so Jersey won't urldecode before
+ // the RowSpec constructor has a chance to parse
+ final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec,
+ final @QueryParam("v") String versions,
+ final @QueryParam("check") String check,
+ final @QueryParam("rr") String returnResult) throws IOException {
+ return new RowResource(this, suffixglobbingspec, versions, check, returnResult);
+ }
+
+ @Path("{scanspec: .*[*]$}")
+ public TableScanResource getScanResource(
+ final @PathParam("scanspec") String scanSpec,
+ @DefaultValue(Integer.MAX_VALUE + "")
+ @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit,
+ @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow,
+ @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow,
+ @QueryParam(Constants.SCAN_COLUMN) List column,
+ @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions,
+ @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
+ @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
+ @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
+ @DefaultValue("true") @QueryParam(Constants.SCAN_CACHE_BLOCKS) boolean cacheBlocks,
+ @DefaultValue("false") @QueryParam(Constants.SCAN_REVERSED) boolean reversed,
+ @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String paramFilter) {
+ try {
+ Filter prefixFilter = null;
+ Scan tableScan = new Scan();
+ if (scanSpec.indexOf('*') > 0) {
+ String prefix = scanSpec.substring(0, scanSpec.indexOf('*'));
+ byte[] prefixBytes = Bytes.toBytes(prefix);
+ prefixFilter = new PrefixFilter(Bytes.toBytes(prefix));
+ if (startRow.isEmpty()) {
+ tableScan.withStartRow(prefixBytes);
+ }
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow
+ + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime
+ + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => "
+ + maxVersions + " Batch Size => " + batchSize);
+ }
+ Table hTable = RESTServlet.getInstance().getTable(this.table);
+ tableScan.setBatch(batchSize);
+ tableScan.readVersions(maxVersions);
+ tableScan.setTimeRange(startTime, endTime);
+ if (!startRow.isEmpty()) {
+ tableScan.withStartRow(Bytes.toBytes(startRow));
+ }
+ tableScan.withStopRow(Bytes.toBytes(endRow));
+ for (String col : column) {
+ byte [][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim()));
+ if (parts.length == 1) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Scan family : " + Bytes.toStringBinary(parts[0]));
+ }
+ tableScan.addFamily(parts[0]);
+ } else if (parts.length == 2) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0])
+ + " " + Bytes.toStringBinary(parts[1]));
+ }
+ tableScan.addColumn(parts[0], parts[1]);
+ } else {
+ throw new IllegalArgumentException("Invalid column specifier.");
+ }
+ }
+ FilterList filterList = new FilterList();
+ if (StringUtils.isNotEmpty(paramFilter)) {
+ ParseFilter pf = new ParseFilter();
+ Filter parsedParamFilter = pf.parseFilterString(paramFilter);
+ if (parsedParamFilter != null) {
+ filterList.addFilter(parsedParamFilter);
+ }
+ if (prefixFilter != null) {
+ filterList.addFilter(prefixFilter);
+ }
+ }
+ if (filterList.size() > 0) {
+ tableScan.setFilter(filterList);
+ }
+
+ int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
+ tableScan.setCaching(fetchSize);
+ tableScan.setReversed(reversed);
+ tableScan.setCacheBlocks(cacheBlocks);
+ return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit);
+ } catch (IOException exp) {
+ servlet.getMetrics().incrementFailedScanRequests(1);
+ processException(exp);
+ LOG.warn(exp.toString(), exp);
+ return null;
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
new file mode 100755
index 00000000..8f5535e8
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java
@@ -0,0 +1,158 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.HeaderParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.StreamingOutput;
+import javax.ws.rs.core.UriInfo;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@InterfaceAudience.Private
+public class TableScanResource extends ResourceBase {
+ private static final Logger LOG = LoggerFactory.getLogger(TableScanResource.class);
+
+ TableResource tableResource;
+ ResultScanner results;
+ int userRequestedLimit;
+
+ public TableScanResource(ResultScanner scanner, int userRequestedLimit) throws IOException {
+ super();
+ this.results = scanner;
+ this.userRequestedLimit = userRequestedLimit;
+ }
+
+ @GET
+ @Produces({ Constants.MIMETYPE_XML, Constants.MIMETYPE_JSON })
+ public CellSetModelStream get(final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ final int rowsToSend = userRequestedLimit;
+ servlet.getMetrics().incrementSucessfulScanRequests(1);
+ final Iterator itr = results.iterator();
+ return new CellSetModelStream(new ArrayList() {
+ @Override
+ public Iterator iterator() {
+ return new Iterator() {
+ int count = rowsToSend;
+
+ @Override
+ public boolean hasNext() {
+ return count > 0 && itr.hasNext();
+ }
+
+ @Override
+ public RowModel next() {
+ Result rs = itr.next();
+ if ((rs == null) || (count <= 0)) {
+ return null;
+ }
+ byte[] rowKey = rs.getRow();
+ RowModel rModel = new RowModel(rowKey);
+ List kvs = rs.listCells();
+ for (Cell kv : kvs) {
+ rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv),
+ kv.getTimestamp(), CellUtil.cloneValue(kv)));
+ }
+ count--;
+ if (count == 0) {
+ results.close();
+ }
+ return rModel;
+ }
+ };
+ }
+ });
+ }
+
+ @GET
+ @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF })
+ public Response getProtobuf(
+ final @Context UriInfo uriInfo,
+ final @HeaderParam("Accept") String contentType) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " +
+ MIMETYPE_BINARY);
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10);
+ StreamingOutput stream = new ProtobufStreamingOutput(this.results, contentType,
+ userRequestedLimit, fetchSize);
+ servlet.getMetrics().incrementSucessfulScanRequests(1);
+ ResponseBuilder response = Response.ok(stream);
+ response.header("content-type", contentType);
+ return response.build();
+ } catch (Exception exp) {
+ servlet.getMetrics().incrementFailedScanRequests(1);
+ processException(exp);
+ LOG.warn(exp.toString(), exp);
+ return null;
+ }
+ }
+
+ @XmlRootElement(name = "CellSet")
+ @XmlAccessorType(XmlAccessType.FIELD)
+ public static class CellSetModelStream {
+ // JAXB needs an arraylist for streaming
+ @XmlElement(name = "Row")
+ @JsonIgnore
+ private ArrayList Row;
+
+ public CellSetModelStream() {
+ }
+
+ public CellSetModelStream(final ArrayList rowList) {
+ this.Row = rowList;
+ }
+
+ // jackson needs an iterator for streaming
+ @JsonProperty("Row")
+ public Iterator getIterator() {
+ return Row.iterator();
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
new file mode 100755
index 00000000..c2123341
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
@@ -0,0 +1,103 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest;
+
+import java.io.IOException;
+
+import javax.servlet.ServletContext;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.rest.model.VersionModel;
+
+/**
+ * Implements REST software version reporting
+ *
+ * /version/rest
+ *
+ * /version (alias for /version/rest)
+ */
+@InterfaceAudience.Private
+public class VersionResource extends ResourceBase {
+
+ private static final Logger LOG = LoggerFactory.getLogger(VersionResource.class);
+
+ static CacheControl cacheControl;
+ static {
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Constructor
+ * @throws IOException
+ */
+ public VersionResource() throws IOException {
+ super();
+ }
+
+ /**
+ * Build a response for a version request.
+ * @param context servlet context
+ * @param uriInfo (JAX-RS context variable) request URL
+ * @return a response for a version request
+ */
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
+ MIMETYPE_PROTOBUF_IETF})
+ public Response get(final @Context ServletContext context,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ ResponseBuilder response = Response.ok(new VersionModel(context));
+ response.cacheControl(cacheControl);
+ servlet.getMetrics().incrementSucessfulGetRequests(1);
+ return response.build();
+ }
+
+ /**
+ * Dispatch to StorageClusterVersionResource
+ */
+ @Path("cluster")
+ public StorageClusterVersionResource getClusterVersionResource()
+ throws IOException {
+ return new StorageClusterVersionResource();
+ }
+
+ /**
+ * Dispatch /version/rest to self.
+ */
+ @Path("rest")
+ public VersionResource getVersionResource() {
+ return this;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
new file mode 100755
index 00000000..c2bc7c02
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
@@ -0,0 +1,727 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.entity.InputStreamEntity;
+import org.apache.http.impl.client.DefaultHttpClient;
+import org.apache.http.message.BasicHeader;
+import org.apache.http.params.CoreConnectionPNames;
+import org.apache.http.util.EntityUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * A wrapper around HttpClient which provides some useful function and
+ * semantics for interacting with the REST gateway.
+ */
+@InterfaceAudience.Public
+public class Client {
+ public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
+
+ private static final Logger LOG = LoggerFactory.getLogger(Client.class);
+
+ private HttpClient httpClient;
+ private Cluster cluster;
+ private boolean sslEnabled;
+ private HttpResponse resp;
+ private HttpGet httpGet = null;
+
+ private Map extraHeaders;
+
+ private static final String AUTH_COOKIE = "hadoop.auth";
+ private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "=";
+ private static final String COOKIE = "Cookie";
+
+ /**
+ * Default Constructor
+ */
+ public Client() {
+ this(null);
+ }
+
+ private void initialize(Cluster cluster, boolean sslEnabled) {
+ this.cluster = cluster;
+ this.sslEnabled = sslEnabled;
+ extraHeaders = new ConcurrentHashMap<>();
+ String clspath = System.getProperty("java.class.path");
+ LOG.debug("classpath " + clspath);
+ this.httpClient = new DefaultHttpClient();
+ this.httpClient.getParams().setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 2000);
+ }
+
+ /**
+ * Constructor
+ * @param cluster the cluster definition
+ */
+ public Client(Cluster cluster) {
+ initialize(cluster, false);
+ }
+
+ /**
+ * Constructor
+ * @param cluster the cluster definition
+ * @param sslEnabled enable SSL or not
+ */
+ public Client(Cluster cluster, boolean sslEnabled) {
+ initialize(cluster, sslEnabled);
+ }
+
+ /**
+ * Shut down the client. Close any open persistent connections.
+ */
+ public void shutdown() {
+ }
+
+ /**
+ * @return the wrapped HttpClient
+ */
+ public HttpClient getHttpClient() {
+ return httpClient;
+ }
+
+ /**
+ * Add extra headers. These extra headers will be applied to all http
+ * methods before they are removed. If any header is not used any more,
+ * client needs to remove it explicitly.
+ */
+ public void addExtraHeader(final String name, final String value) {
+ extraHeaders.put(name, value);
+ }
+
+ /**
+ * Get an extra header value.
+ */
+ public String getExtraHeader(final String name) {
+ return extraHeaders.get(name);
+ }
+
+ /**
+ * Get all extra headers (read-only).
+ */
+ public Map getExtraHeaders() {
+ return Collections.unmodifiableMap(extraHeaders);
+ }
+
+ /**
+ * Remove an extra header.
+ */
+ public void removeExtraHeader(final String name) {
+ extraHeaders.remove(name);
+ }
+
+ /**
+ * Execute a transaction method given only the path. Will select at random
+ * one of the members of the supplied cluster definition and iterate through
+ * the list until a transaction can be successfully completed. The
+ * definition of success here is a complete HTTP transaction, irrespective
+ * of result code.
+ * @param cluster the cluster definition
+ * @param method the transaction method
+ * @param headers HTTP header values to send
+ * @param path the properly urlencoded path
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method,
+ Header[] headers, String path) throws IOException {
+ IOException lastException;
+ if (cluster.nodes.size() < 1) {
+ throw new IOException("Cluster is empty");
+ }
+ int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random());
+ int i = start;
+ do {
+ cluster.lastHost = cluster.nodes.get(i);
+ try {
+ StringBuilder sb = new StringBuilder();
+ if (sslEnabled) {
+ sb.append("https://");
+ } else {
+ sb.append("http://");
+ }
+ sb.append(cluster.lastHost);
+ sb.append(path);
+ URI uri = new URI(sb.toString());
+ if (method instanceof HttpPut) {
+ HttpPut put = new HttpPut(uri);
+ put.setEntity(((HttpPut) method).getEntity());
+ put.setHeaders(method.getAllHeaders());
+ method = put;
+ } else if (method instanceof HttpGet) {
+ method = new HttpGet(uri);
+ } else if (method instanceof HttpHead) {
+ method = new HttpHead(uri);
+ } else if (method instanceof HttpDelete) {
+ method = new HttpDelete(uri);
+ } else if (method instanceof HttpPost) {
+ HttpPost post = new HttpPost(uri);
+ post.setEntity(((HttpPost) method).getEntity());
+ post.setHeaders(method.getAllHeaders());
+ method = post;
+ }
+ return executeURI(method, headers, uri.toString());
+ } catch (IOException e) {
+ lastException = e;
+ } catch (URISyntaxException use) {
+ lastException = new IOException(use);
+ }
+ } while (++i != start && i < cluster.nodes.size());
+ throw lastException;
+ }
+
+ /**
+ * Execute a transaction method given a complete URI.
+ * @param method the transaction method
+ * @param headers HTTP header values to send
+ * @param uri a properly urlencoded URI
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri)
+ throws IOException {
+ // method.setURI(new URI(uri, true));
+ for (Map.Entry e: extraHeaders.entrySet()) {
+ method.addHeader(e.getKey(), e.getValue());
+ }
+ if (headers != null) {
+ for (Header header: headers) {
+ method.addHeader(header);
+ }
+ }
+ long startTime = System.currentTimeMillis();
+ if (resp != null) EntityUtils.consumeQuietly(resp.getEntity());
+ resp = httpClient.execute(method);
+ if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) {
+ // Authentication error
+ LOG.debug("Performing negotiation with the server.");
+ negotiate(method, uri);
+ resp = httpClient.execute(method);
+ }
+
+ long endTime = System.currentTimeMillis();
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " +
+ resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms");
+ }
+ return resp;
+ }
+
+ /**
+ * Execute a transaction method. Will call either executePathOnly
+ * or executeURI depending on whether a path only is supplied in
+ * 'path', or if a complete URI is passed instead, respectively.
+ * @param cluster the cluster definition
+ * @param method the HTTP method
+ * @param headers HTTP header values to send
+ * @param path the properly urlencoded path or URI
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers,
+ String path) throws IOException {
+ if (path.startsWith("/")) {
+ return executePathOnly(cluster, method, headers, path);
+ }
+ return executeURI(method, headers, path);
+ }
+
+ /**
+ * Initiate client side Kerberos negotiation with the server.
+ * @param method method to inject the authentication token into.
+ * @param uri the String to parse as a URL.
+ * @throws IOException if unknown protocol is found.
+ */
+ private void negotiate(HttpUriRequest method, String uri) throws IOException {
+ try {
+ AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+ KerberosAuthenticator authenticator = new KerberosAuthenticator();
+ authenticator.authenticate(new URL(uri), token);
+ // Inject the obtained negotiated token in the method cookie
+ injectToken(method, token);
+ } catch (AuthenticationException e) {
+ LOG.error("Failed to negotiate with the server.", e);
+ throw new IOException(e);
+ }
+ }
+
+ /**
+ * Helper method that injects an authentication token to send with the method.
+ * @param method method to inject the authentication token into.
+ * @param token authentication token to inject.
+ */
+ private void injectToken(HttpUriRequest method, AuthenticatedURL.Token token) {
+ String t = token.toString();
+ if (t != null) {
+ if (!t.startsWith("\"")) {
+ t = "\"" + t + "\"";
+ }
+ method.addHeader(COOKIE, AUTH_COOKIE_EQ + t);
+ }
+ }
+
+ /**
+ * @return the cluster definition
+ */
+ public Cluster getCluster() {
+ return cluster;
+ }
+
+ /**
+ * @param cluster the cluster definition
+ */
+ public void setCluster(Cluster cluster) {
+ this.cluster = cluster;
+ }
+
+ /**
+ * Send a HEAD request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response head(String path) throws IOException {
+ return head(cluster, path, null);
+ }
+
+ /**
+ * Send a HEAD request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response head(Cluster cluster, String path, Header[] headers)
+ throws IOException {
+ HttpHead method = new HttpHead(path);
+ try {
+ HttpResponse resp = execute(cluster, method, null, path);
+ return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), null);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path) throws IOException {
+ return get(cluster, path);
+ }
+
+ /**
+ * Send a GET request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster cluster, String path) throws IOException {
+ return get(cluster, path, EMPTY_HEADER_ARRAY);
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @param accept Accept header value
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path, String accept) throws IOException {
+ return get(cluster, path, accept);
+ }
+
+ /**
+ * Send a GET request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param accept Accept header value
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster cluster, String path, String accept)
+ throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new BasicHeader("Accept", accept);
+ return get(cluster, path, headers);
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request,
+ * Accept must be supplied
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path, Header[] headers) throws IOException {
+ return get(cluster, path, headers);
+ }
+
+ /**
+ * Returns the response body of the HTTPResponse, if any, as an array of bytes.
+ * If response body is not available or cannot be read, returns null
+ *
+ * Note: This will cause the entire response body to be buffered in memory. A
+ * malicious server may easily exhaust all the VM memory. It is strongly
+ * recommended, to use getResponseAsStream if the content length of the response
+ * is unknown or reasonably large.
+ *
+ * @param resp HttpResponse
+ * @return The response body, null if body is empty
+ * @throws IOException If an I/O (transport) problem occurs while obtaining the
+ * response body.
+ */
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value =
+ "NP_LOAD_OF_KNOWN_NULL_VALUE", justification = "null is possible return value")
+ public static byte[] getResponseBody(HttpResponse resp) throws IOException {
+ if (resp.getEntity() == null) return null;
+ try (InputStream instream = resp.getEntity().getContent()) {
+ if (instream != null) {
+ long contentLength = resp.getEntity().getContentLength();
+ if (contentLength > Integer.MAX_VALUE) {
+ //guard integer cast from overflow
+ throw new IOException("Content too large to be buffered: " + contentLength +" bytes");
+ }
+ ByteArrayOutputStream outstream = new ByteArrayOutputStream(
+ contentLength > 0 ? (int) contentLength : 4*1024);
+ byte[] buffer = new byte[4096];
+ int len;
+ while ((len = instream.read(buffer)) > 0) {
+ outstream.write(buffer, 0, len);
+ }
+ outstream.close();
+ return outstream.toByteArray();
+ }
+ return null;
+ }
+ }
+
+ /**
+ * Send a GET request
+ * @param c the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster c, String path, Header[] headers)
+ throws IOException {
+ if (httpGet != null) {
+ httpGet.releaseConnection();
+ }
+ httpGet = new HttpGet(path);
+ HttpResponse resp = execute(c, httpGet, headers, path);
+ return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(),
+ resp, resp.getEntity() == null ? null : resp.getEntity().getContent());
+ }
+
+ /**
+ * Send a PUT request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(String path, String contentType, byte[] content)
+ throws IOException {
+ return put(cluster, path, contentType, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @param extraHdr extra Header to send
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(String path, String contentType, byte[] content, Header extraHdr)
+ throws IOException {
+ return put(cluster, path, contentType, content, extraHdr);
+ }
+
+ /**
+ * Send a PUT request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException for error
+ */
+ public Response put(Cluster cluster, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new BasicHeader("Content-Type", contentType);
+ return put(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @param extraHdr additional Header to send
+ * @return a Response object with response detail
+ * @throws IOException for error
+ */
+ public Response put(Cluster cluster, String path, String contentType,
+ byte[] content, Header extraHdr) throws IOException {
+ int cnt = extraHdr == null ? 1 : 2;
+ Header[] headers = new Header[cnt];
+ headers[0] = new BasicHeader("Content-Type", contentType);
+ if (extraHdr != null) {
+ headers[1] = extraHdr;
+ }
+ return put(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(String path, Header[] headers, byte[] content)
+ throws IOException {
+ return put(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(Cluster cluster, String path, Header[] headers,
+ byte[] content) throws IOException {
+ HttpPut method = new HttpPut(path);
+ try {
+ method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length));
+ HttpResponse resp = execute(cluster, method, headers, path);
+ headers = resp.getAllHeaders();
+ content = getResponseBody(resp);
+ return new Response(resp.getStatusLine().getStatusCode(), headers, content);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a POST request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(String path, String contentType, byte[] content)
+ throws IOException {
+ return post(cluster, path, contentType, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @param extraHdr additional Header to send
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(String path, String contentType, byte[] content, Header extraHdr)
+ throws IOException {
+ return post(cluster, path, contentType, content, extraHdr);
+ }
+
+ /**
+ * Send a POST request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException for error
+ */
+ public Response post(Cluster cluster, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new BasicHeader("Content-Type", contentType);
+ return post(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @param extraHdr additional Header to send
+ * @return a Response object with response detail
+ * @throws IOException for error
+ */
+ public Response post(Cluster cluster, String path, String contentType,
+ byte[] content, Header extraHdr) throws IOException {
+ int cnt = extraHdr == null ? 1 : 2;
+ Header[] headers = new Header[cnt];
+ headers[0] = new BasicHeader("Content-Type", contentType);
+ if (extraHdr != null) {
+ headers[1] = extraHdr;
+ }
+ return post(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(String path, Header[] headers, byte[] content)
+ throws IOException {
+ return post(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(Cluster cluster, String path, Header[] headers,
+ byte[] content) throws IOException {
+ HttpPost method = new HttpPost(path);
+ try {
+ method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length));
+ HttpResponse resp = execute(cluster, method, headers, path);
+ headers = resp.getAllHeaders();
+ content = getResponseBody(resp);
+ return new Response(resp.getStatusLine().getStatusCode(), headers, content);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a DELETE request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response delete(String path) throws IOException {
+ return delete(cluster, path);
+ }
+
+ /**
+ * Send a DELETE request
+ * @param path the path or URI
+ * @param extraHdr additional Header to send
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response delete(String path, Header extraHdr) throws IOException {
+ return delete(cluster, path, extraHdr);
+ }
+
+ /**
+ * Send a DELETE request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException for error
+ */
+ public Response delete(Cluster cluster, String path) throws IOException {
+ HttpDelete method = new HttpDelete(path);
+ try {
+ HttpResponse resp = execute(cluster, method, null, path);
+ Header[] headers = resp.getAllHeaders();
+ byte[] content = getResponseBody(resp);
+ return new Response(resp.getStatusLine().getStatusCode(), headers, content);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+
+ /**
+ * Send a DELETE request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException for error
+ */
+ public Response delete(Cluster cluster, String path, Header extraHdr) throws IOException {
+ HttpDelete method = new HttpDelete(path);
+ try {
+ Header[] headers = { extraHdr };
+ HttpResponse resp = execute(cluster, method, headers, path);
+ headers = resp.getAllHeaders();
+ byte[] content = getResponseBody(resp);
+ return new Response(resp.getStatusLine().getStatusCode(), headers, content);
+ } finally {
+ method.releaseConnection();
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
new file mode 100755
index 00000000..00847082
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
@@ -0,0 +1,108 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.client;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A list of 'host:port' addresses of HTTP servers operating as a single
+ * entity, for example multiple redundant web service gateways.
+ */
+@InterfaceAudience.Public
+public class Cluster {
+ protected List nodes =
+ Collections.synchronizedList(new ArrayList());
+ protected String lastHost;
+
+ /**
+ * Constructor
+ */
+ public Cluster() {}
+
+ /**
+ * Constructor
+ * @param nodes a list of service locations, in 'host:port' format
+ */
+ public Cluster(List nodes) {
+ this.nodes.addAll(nodes);
+ }
+
+ /**
+ * @return true if no locations have been added, false otherwise
+ */
+ public boolean isEmpty() {
+ return nodes.isEmpty();
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param node the service location in 'host:port' format
+ */
+ public Cluster add(String node) {
+ nodes.add(node);
+ return this;
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster add(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return add(sb.toString());
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param node the service location in 'host:port' format
+ */
+ public Cluster remove(String node) {
+ nodes.remove(node);
+ return this;
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster remove(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return remove(sb.toString());
+ }
+
+ @Override public String toString() {
+ return "Cluster{" +
+ "nodes=" + nodes +
+ ", lastHost='" + lastHost + '\'' +
+ '}';
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
new file mode 100755
index 00000000..0e91005a
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java
@@ -0,0 +1,170 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.client;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The HTTP result code, response headers, and body of an HTTP response.
+ */
+@InterfaceAudience.Public
+public class Response {
+ private static final Logger LOG = LoggerFactory.getLogger(Response.class);
+
+ private int code;
+ private Header[] headers;
+ private byte[] body;
+ private HttpResponse resp;
+ private InputStream stream;
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ */
+ public Response(int code) {
+ this(code, null, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ */
+ public Response(int code, Header[] headers) {
+ this(code, headers, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ * @param body the response body, can be null
+ */
+ public Response(int code, Header[] headers, byte[] body) {
+ this.code = code;
+ this.headers = headers;
+ this.body = body;
+ }
+
+ /**
+ * Constructor. Note: this is not thread-safe
+ *
+ * @param code the HTTP response code
+ * @param headers headers the HTTP response headers
+ * @param resp the response
+ * @param in Inputstream if the response had one.
+ */
+ public Response(int code, Header[] headers, HttpResponse resp, InputStream in) {
+ this.code = code;
+ this.headers = headers;
+ this.body = null;
+ this.resp = resp;
+ this.stream = in;
+ }
+
+ /**
+ * @return the HTTP response code
+ */
+ public int getCode() {
+ return code;
+ }
+
+ /**
+ * Gets the input stream instance.
+ *
+ * @return an instance of InputStream class.
+ */
+ public InputStream getStream(){
+ return this.stream;
+ }
+
+ /**
+ * @return the HTTP response headers
+ */
+ public Header[] getHeaders() {
+ return headers;
+ }
+
+ public String getHeader(String key) {
+ for (Header header : headers) {
+ if (header.getName().equalsIgnoreCase(key)) {
+ return header.getValue();
+ }
+ }
+ return null;
+ }
+
+ /**
+ * @return the value of the Location header
+ */
+ public String getLocation() {
+ return getHeader("Location");
+ }
+
+ /**
+ * @return true if a response body was sent
+ */
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ /**
+ * @return the HTTP response body
+ */
+ public byte[] getBody() {
+ if (body == null) {
+ try {
+ body = Client.getResponseBody(resp);
+ } catch (IOException ioe) {
+ LOG.debug("encountered ioe when obtaining body", ioe);
+ }
+ }
+ return body;
+ }
+
+ /**
+ * @param code the HTTP response code
+ */
+ public void setCode(int code) {
+ this.code = code;
+ }
+
+ /**
+ * @param headers the HTTP response headers
+ */
+ public void setHeaders(Header[] headers) {
+ this.headers = headers;
+ }
+
+ /**
+ * @param body the response body
+ */
+ public void setBody(byte[] body) {
+ this.body = body;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
new file mode 100755
index 00000000..b9b8a006
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.filter;
+
+import static org.apache.hadoop.hbase.rest.Constants.REST_AUTHENTICATION_PRINCIPAL;
+import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_INTERFACE;
+import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_NAMESERVER;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Properties;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.rest.RESTServer;
+import org.apache.hadoop.hbase.util.DNS;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Private
+public class AuthFilter extends AuthenticationFilter {
+ private static final Logger LOG = LoggerFactory.getLogger(AuthFilter.class);
+ private static final String REST_PREFIX = "hbase.rest.authentication.";
+ private static final int REST_PREFIX_LEN = REST_PREFIX.length();
+
+ /**
+ * Returns the configuration to be used by the authentication filter
+ * to initialize the authentication handler.
+ *
+ * This filter retrieves all HBase configurations and passes those started
+ * with REST_PREFIX to the authentication handler. It is useful to support
+ * plugging different authentication handlers.
+ */
+ @Override
+ protected Properties getConfiguration(
+ String configPrefix, FilterConfig filterConfig) throws ServletException {
+ Properties props = super.getConfiguration(configPrefix, filterConfig);
+ //setting the cookie path to root '/' so it is used for all resources.
+ props.setProperty(AuthenticationFilter.COOKIE_PATH, "/");
+
+ Configuration conf = null;
+ // Dirty hack to get at the RESTServer's configuration. These should be pulled out
+ // of the FilterConfig.
+ if (RESTServer.conf != null) {
+ conf = RESTServer.conf;
+ } else {
+ conf = HBaseConfiguration.create();
+ }
+ for (Map.Entry entry : conf) {
+ String name = entry.getKey();
+ if (name.startsWith(REST_PREFIX)) {
+ String value = entry.getValue();
+ if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) {
+ try {
+ String machineName = Strings.domainNamePointerToHostName(
+ DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"),
+ conf.get(REST_DNS_NAMESERVER, "default")));
+ value = SecurityUtil.getServerPrincipal(value, machineName);
+ } catch (IOException ie) {
+ throw new ServletException("Failed to retrieve server principal", ie);
+ }
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Setting property " + name + "=" + value);
+ }
+ name = name.substring(REST_PREFIX_LEN);
+ props.setProperty(name, value);
+ }
+ }
+ return props;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
new file mode 100755
index 00000000..f74e10ca
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java
@@ -0,0 +1,72 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.util.zip.GZIPInputStream;
+
+import javax.servlet.ReadListener;
+import javax.servlet.ServletInputStream;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPRequestStream extends ServletInputStream {
+ private GZIPInputStream in;
+
+ public GZIPRequestStream(HttpServletRequest request) throws IOException {
+ this.in = new GZIPInputStream(request.getInputStream());
+ }
+
+ @Override
+ public int read() throws IOException {
+ return in.read();
+ }
+
+ @Override
+ public int read(byte[] b) throws IOException {
+ return in.read(b);
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ return in.read(b, off, len);
+ }
+
+ @Override
+ public void close() throws IOException {
+ in.close();
+ }
+
+ @Override
+ public boolean isFinished() {
+ throw new UnsupportedOperationException("Asynchonous operation is not supported.");
+ }
+
+ @Override
+ public boolean isReady() {
+ throw new UnsupportedOperationException("Asynchonous operation is not supported.");
+ }
+
+ @Override
+ public void setReadListener(ReadListener listener) {
+ throw new UnsupportedOperationException("Asynchonous operation is not supported.");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
new file mode 100755
index 00000000..2290ecc0
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java
@@ -0,0 +1,51 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+import javax.servlet.ServletInputStream;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+
+@InterfaceAudience.Private
+public class GZIPRequestWrapper extends HttpServletRequestWrapper {
+ private ServletInputStream is;
+ private BufferedReader reader;
+
+ public GZIPRequestWrapper(HttpServletRequest request) throws IOException {
+ super(request);
+ this.is = new GZIPRequestStream(request);
+ this.reader = new BufferedReader(new InputStreamReader(this.is));
+ }
+
+ @Override
+ public ServletInputStream getInputStream() throws IOException {
+ return is;
+ }
+
+ @Override
+ public BufferedReader getReader() throws IOException {
+ return reader;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
new file mode 100755
index 00000000..3fa1ad6f
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java
@@ -0,0 +1,87 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.util.zip.GZIPOutputStream;
+
+import javax.servlet.ServletOutputStream;
+import javax.servlet.WriteListener;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPResponseStream extends ServletOutputStream {
+ private HttpServletResponse response;
+ private GZIPOutputStream out;
+
+ public GZIPResponseStream(HttpServletResponse response) throws IOException {
+ this.response = response;
+ this.out = new GZIPOutputStream(response.getOutputStream());
+ response.addHeader("Content-Encoding", "gzip");
+ }
+
+ public void resetBuffer() {
+ if (out != null && !response.isCommitted()) {
+ response.setHeader("Content-Encoding", null);
+ }
+ out = null;
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ out.write(b);
+ }
+
+ @Override
+ public void write(byte[] b) throws IOException {
+ out.write(b);
+ }
+
+ @Override
+ public void write(byte[] b, int off, int len) throws IOException {
+ out.write(b, off, len);
+ }
+
+ @Override
+ public void close() throws IOException {
+ finish();
+ out.close();
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ public void finish() throws IOException {
+ out.finish();
+ }
+
+ @Override
+ public boolean isReady() {
+ throw new UnsupportedOperationException("Asynchonous operation is not supported.");
+ }
+
+ @Override
+ public void setWriteListener(WriteListener writeListener) {
+ throw new UnsupportedOperationException("Asynchonous operation is not supported.");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
new file mode 100755
index 00000000..53a26ea1
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java
@@ -0,0 +1,147 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import javax.servlet.ServletOutputStream;
+import javax.servlet.http.HttpServletResponse;
+import javax.servlet.http.HttpServletResponseWrapper;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class GZIPResponseWrapper extends HttpServletResponseWrapper {
+ private HttpServletResponse response;
+ private ServletOutputStream os;
+ private PrintWriter writer;
+ private boolean compress = true;
+
+ public GZIPResponseWrapper(HttpServletResponse response) {
+ super(response);
+ this.response = response;
+ }
+
+ @Override
+ public void setStatus(int status) {
+ super.setStatus(status);
+ if (status < 200 || status >= 300) {
+ compress = false;
+ }
+ }
+
+ @Override
+ public void addHeader(String name, String value) {
+ if (!"content-length".equalsIgnoreCase(name)) {
+ super.addHeader(name, value);
+ }
+ }
+
+ @Override
+ public void setContentLength(int length) {
+ // do nothing
+ }
+
+ @Override
+ public void setIntHeader(String name, int value) {
+ if (!"content-length".equalsIgnoreCase(name)) {
+ super.setIntHeader(name, value);
+ }
+ }
+
+ @Override
+ public void setHeader(String name, String value) {
+ if (!"content-length".equalsIgnoreCase(name)) {
+ super.setHeader(name, value);
+ }
+ }
+
+ @Override
+ public void flushBuffer() throws IOException {
+ if (writer != null) {
+ writer.flush();
+ }
+ if (os != null && (os instanceof GZIPResponseStream)) {
+ ((GZIPResponseStream)os).finish();
+ } else {
+ getResponse().flushBuffer();
+ }
+ }
+
+ @Override
+ public void reset() {
+ super.reset();
+ if (os != null && (os instanceof GZIPResponseStream)) {
+ ((GZIPResponseStream)os).resetBuffer();
+ }
+ writer = null;
+ os = null;
+ compress = true;
+ }
+
+ @Override
+ public void resetBuffer() {
+ super.resetBuffer();
+ if (os != null && (os instanceof GZIPResponseStream)) {
+ ((GZIPResponseStream)os).resetBuffer();
+ }
+ writer = null;
+ os = null;
+ }
+
+ @Override
+ public void sendError(int status, String msg) throws IOException {
+ resetBuffer();
+ super.sendError(status, msg);
+ }
+
+ @Override
+ public void sendError(int status) throws IOException {
+ resetBuffer();
+ super.sendError(status);
+ }
+
+ @Override
+ public void sendRedirect(String location) throws IOException {
+ resetBuffer();
+ super.sendRedirect(location);
+ }
+
+ @Override
+ public ServletOutputStream getOutputStream() throws IOException {
+ if (os == null) {
+ if (!response.isCommitted() && compress) {
+ os = (ServletOutputStream)new GZIPResponseStream(response);
+ } else {
+ os = response.getOutputStream();
+ }
+ }
+ return os;
+ }
+
+ @Override
+ public PrintWriter getWriter() throws IOException {
+ if (writer == null) {
+ writer = new PrintWriter(getOutputStream());
+ }
+ return writer;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
new file mode 100755
index 00000000..4ba9eca3
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java
@@ -0,0 +1,85 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Set;
+import java.util.StringTokenizer;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class GzipFilter implements Filter {
+ private Set mimeTypes = new HashSet<>();
+
+ @Override
+ public void init(FilterConfig filterConfig) {
+ String s = filterConfig.getInitParameter("mimeTypes");
+ if (s != null) {
+ StringTokenizer tok = new StringTokenizer(s, ",", false);
+ while (tok.hasMoreTokens()) {
+ mimeTypes.add(tok.nextToken());
+ }
+ }
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ @Override
+ public void doFilter(ServletRequest req, ServletResponse rsp,
+ FilterChain chain) throws IOException, ServletException {
+ HttpServletRequest request = (HttpServletRequest)req;
+ HttpServletResponse response = (HttpServletResponse)rsp;
+ String contentEncoding = request.getHeader("content-encoding");
+ String acceptEncoding = request.getHeader("accept-encoding");
+ String contentType = request.getHeader("content-type");
+ if ((contentEncoding != null) &&
+ (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) {
+ request = new GZIPRequestWrapper(request);
+ }
+ if (((acceptEncoding != null) &&
+ (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) ||
+ ((contentType != null) && mimeTypes.contains(contentType))) {
+ response = new GZIPResponseWrapper(response);
+ }
+ chain.doFilter(request, response);
+ if (response instanceof GZIPResponseWrapper) {
+ OutputStream os = response.getOutputStream();
+ if (os instanceof GZIPResponseStream) {
+ ((GZIPResponseStream)os).finish();
+ }
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
new file mode 100755
index 00000000..94eb314e
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.filter;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This filter provides protection against cross site request forgery (CSRF)
+ * attacks for REST APIs. Enabling this filter on an endpoint results in the
+ * requirement of all client to send a particular (configurable) HTTP header
+ * with every request. In the absense of this header the filter will reject the
+ * attempt as a bad request.
+ */
+@InterfaceAudience.Public
+public class RestCsrfPreventionFilter implements Filter {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RestCsrfPreventionFilter.class);
+
+ public static final String HEADER_USER_AGENT = "User-Agent";
+ public static final String BROWSER_USER_AGENT_PARAM =
+ "browser-useragents-regex";
+ public static final String CUSTOM_HEADER_PARAM = "custom-header";
+ public static final String CUSTOM_METHODS_TO_IGNORE_PARAM =
+ "methods-to-ignore";
+ static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*";
+ public static final String HEADER_DEFAULT = "X-XSRF-HEADER";
+ static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE";
+ private String headerName = HEADER_DEFAULT;
+ private Set methodsToIgnore = null;
+ private Set browserUserAgents;
+
+ @Override
+ public void init(FilterConfig filterConfig) {
+ String customHeader = filterConfig.getInitParameter(CUSTOM_HEADER_PARAM);
+ if (customHeader != null) {
+ headerName = customHeader;
+ }
+ String customMethodsToIgnore =
+ filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM);
+ if (customMethodsToIgnore != null) {
+ parseMethodsToIgnore(customMethodsToIgnore);
+ } else {
+ parseMethodsToIgnore(METHODS_TO_IGNORE_DEFAULT);
+ }
+
+ String agents = filterConfig.getInitParameter(BROWSER_USER_AGENT_PARAM);
+ if (agents == null) {
+ agents = BROWSER_USER_AGENTS_DEFAULT;
+ }
+ parseBrowserUserAgents(agents);
+ LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, "
+ + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s",
+ headerName, methodsToIgnore, browserUserAgents));
+ }
+
+ void parseBrowserUserAgents(String userAgents) {
+ String[] agentsArray = userAgents.split(",");
+ browserUserAgents = new HashSet<>();
+ for (String patternString : agentsArray) {
+ browserUserAgents.add(Pattern.compile(patternString));
+ }
+ }
+
+ void parseMethodsToIgnore(String mti) {
+ String[] methods = mti.split(",");
+ methodsToIgnore = new HashSet<>();
+ Collections.addAll(methodsToIgnore, methods);
+ }
+
+ /**
+ * This method interrogates the User-Agent String and returns whether it
+ * refers to a browser. If its not a browser, then the requirement for the
+ * CSRF header will not be enforced; if it is a browser, the requirement will
+ * be enforced.
+ *
+ * A User-Agent String is considered to be a browser if it matches
+ * any of the regex patterns from browser-useragent-regex; the default
+ * behavior is to consider everything a browser that matches the following:
+ * "^Mozilla.*,^Opera.*". Subclasses can optionally override
+ * this method to use different behavior.
+ *
+ * @param userAgent The User-Agent String, or null if there isn't one
+ * @return true if the User-Agent String refers to a browser, false if not
+ */
+ protected boolean isBrowser(String userAgent) {
+ if (userAgent == null) {
+ return false;
+ }
+ for (Pattern pattern : browserUserAgents) {
+ Matcher matcher = pattern.matcher(userAgent);
+ if (matcher.matches()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Defines the minimal API requirements for the filter to execute its
+ * filtering logic. This interface exists to facilitate integration in
+ * components that do not run within a servlet container and therefore cannot
+ * rely on a servlet container to dispatch to the {@link #doFilter} method.
+ * Applications that do run inside a servlet container will not need to write
+ * code that uses this interface. Instead, they can use typical servlet
+ * container configuration mechanisms to insert the filter.
+ */
+ public interface HttpInteraction {
+ /**
+ * Returns the value of a header.
+ *
+ * @param header name of header
+ * @return value of header
+ */
+ String getHeader(String header);
+
+ /**
+ * Returns the method.
+ *
+ * @return method
+ */
+ String getMethod();
+
+ /**
+ * Called by the filter after it decides that the request may proceed.
+ *
+ * @throws IOException if there is an I/O error
+ * @throws ServletException if the implementation relies on the servlet API
+ * and a servlet API call has failed
+ */
+ void proceed() throws IOException, ServletException;
+
+ /**
+ * Called by the filter after it decides that the request is a potential
+ * CSRF attack and therefore must be rejected.
+ *
+ * @param code status code to send
+ * @param message response message
+ * @throws IOException if there is an I/O error
+ */
+ void sendError(int code, String message) throws IOException;
+ }
+
+ /**
+ * Handles an {@link HttpInteraction} by applying the filtering logic.
+ *
+ * @param httpInteraction caller's HTTP interaction
+ * @throws IOException if there is an I/O error
+ * @throws ServletException if the implementation relies on the servlet API
+ * and a servlet API call has failed
+ */
+ public void handleHttpInteraction(HttpInteraction httpInteraction)
+ throws IOException, ServletException {
+ if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) ||
+ methodsToIgnore.contains(httpInteraction.getMethod()) ||
+ httpInteraction.getHeader(headerName) != null) {
+ httpInteraction.proceed();
+ } else {
+ httpInteraction.sendError(HttpServletResponse.SC_BAD_REQUEST,
+ "Missing Required Header for CSRF Vulnerability Protection");
+ }
+ }
+
+ @Override
+ public void doFilter(ServletRequest request, ServletResponse response,
+ final FilterChain chain) throws IOException, ServletException {
+ final HttpServletRequest httpRequest = (HttpServletRequest)request;
+ final HttpServletResponse httpResponse = (HttpServletResponse)response;
+ handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest,
+ httpResponse, chain));
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ /**
+ * Constructs a mapping of configuration properties to be used for filter
+ * initialization. The mapping includes all properties that start with the
+ * specified configuration prefix. Property names in the mapping are trimmed
+ * to remove the configuration prefix.
+ *
+ * @param conf configuration to read
+ * @param confPrefix configuration prefix
+ * @return mapping of configuration properties to be used for filter
+ * initialization
+ */
+ public static Map getFilterParams(Configuration conf, String confPrefix) {
+ Map filterConfigMap = new HashMap<>();
+ for (Map.Entry entry : conf) {
+ String name = entry.getKey();
+ if (name.startsWith(confPrefix)) {
+ String value = conf.get(name);
+ name = name.substring(confPrefix.length());
+ filterConfigMap.put(name, value);
+ }
+ }
+ return filterConfigMap;
+ }
+
+ /**
+ * {@link HttpInteraction} implementation for use in the servlet filter.
+ */
+ private static final class ServletFilterHttpInteraction implements HttpInteraction {
+ private final FilterChain chain;
+ private final HttpServletRequest httpRequest;
+ private final HttpServletResponse httpResponse;
+
+ /**
+ * Creates a new ServletFilterHttpInteraction.
+ *
+ * @param httpRequest request to process
+ * @param httpResponse response to process
+ * @param chain filter chain to forward to if HTTP interaction is allowed
+ */
+ public ServletFilterHttpInteraction(HttpServletRequest httpRequest,
+ HttpServletResponse httpResponse, FilterChain chain) {
+ this.httpRequest = httpRequest;
+ this.httpResponse = httpResponse;
+ this.chain = chain;
+ }
+
+ @Override
+ public String getHeader(String header) {
+ return httpRequest.getHeader(header);
+ }
+
+ @Override
+ public String getMethod() {
+ return httpRequest.getMethod();
+ }
+
+ @Override
+ public void proceed() throws IOException, ServletException {
+ chain.doFilter(httpRequest, httpResponse);
+ }
+
+ @Override
+ public void sendError(int code, String message) throws IOException {
+ httpResponse.sendError(code, message);
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/AdminAuthorizedFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/AdminAuthorizedFilter.java
new file mode 100755
index 00000000..9f248241
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/AdminAuthorizedFilter.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+
+@InterfaceAudience.Private
+public class AdminAuthorizedFilter implements Filter {
+
+ private Configuration conf;
+ private AccessControlList adminsAcl;
+
+ @Override public void init(FilterConfig filterConfig) throws ServletException {
+ adminsAcl = (AccessControlList) filterConfig.getServletContext().getAttribute(
+ HttpServer.ADMINS_ACL);
+ conf = (Configuration) filterConfig.getServletContext().getAttribute(
+ HttpServer.CONF_CONTEXT_ATTRIBUTE);
+ }
+
+ @Override
+ public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
+ throws IOException, ServletException {
+ if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse)) {
+ throw new UnsupportedOperationException("Only accepts HTTP");
+ }
+ HttpServletRequest httpReq = (HttpServletRequest) request;
+ HttpServletResponse httpResp = (HttpServletResponse) response;
+
+ if (!HttpServer.hasAdministratorAccess(conf, adminsAcl, httpReq, httpResp)) {
+ return;
+ }
+
+ chain.doFilter(request, response);
+ }
+
+ @Override public void destroy() {}
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ClickjackingPreventionFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ClickjackingPreventionFilter.java
new file mode 100755
index 00000000..0db32b2e
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ClickjackingPreventionFilter.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class ClickjackingPreventionFilter implements Filter {
+ private FilterConfig filterConfig;
+ private static final String DEFAULT_XFRAMEOPTIONS = "DENY";
+
+ @Override
+ public void init(FilterConfig filterConfig) throws ServletException {
+ this.filterConfig = filterConfig;
+ }
+
+ @Override
+ public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain)
+ throws IOException, ServletException {
+ HttpServletResponse httpRes = (HttpServletResponse) res;
+ httpRes.addHeader("X-Frame-Options", filterConfig.getInitParameter("xframeoptions"));
+ chain.doFilter(req, res);
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ public static Map getDefaultParameters(Configuration conf) {
+ Map params = new HashMap<>();
+ params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode",
+ DEFAULT_XFRAMEOPTIONS));
+ return params;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HtmlQuoting.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HtmlQuoting.java
new file mode 100755
index 00000000..b47357a3
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HtmlQuoting.java
@@ -0,0 +1,230 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * This class is responsible for quoting HTML characters.
+ */
+@InterfaceAudience.Private
+public final class HtmlQuoting {
+ private static final byte[] ampBytes = Bytes.toBytes("&");
+ private static final byte[] aposBytes = Bytes.toBytes("'");
+ private static final byte[] gtBytes = Bytes.toBytes(">");
+ private static final byte[] ltBytes = Bytes.toBytes("<");
+ private static final byte[] quotBytes = Bytes.toBytes(""");
+
+ /**
+ * Does the given string need to be quoted?
+ * @param data the string to check
+ * @param off the starting position
+ * @param len the number of bytes to check
+ * @return does the string contain any of the active html characters?
+ */
+ public static boolean needsQuoting(byte[] data, int off, int len) {
+ if (off+len > data.length) {
+ throw new IllegalStateException("off+len=" + off+len + " should be lower"
+ + " than data length=" + data.length);
+ }
+ for(int i=off; i< off+len; ++i) {
+ switch(data[i]) {
+ case '&':
+ case '<':
+ case '>':
+ case '\'':
+ case '"':
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Does the given string need to be quoted?
+ * @param str the string to check
+ * @return does the string contain any of the active html characters?
+ */
+ public static boolean needsQuoting(String str) {
+ if (str == null) {
+ return false;
+ }
+ byte[] bytes = Bytes.toBytes(str);
+ return needsQuoting(bytes, 0 , bytes.length);
+ }
+
+ /**
+ * Quote all of the active HTML characters in the given string as they
+ * are added to the buffer.
+ * @param output the stream to write the output to
+ * @param buffer the byte array to take the characters from
+ * @param off the index of the first byte to quote
+ * @param len the number of bytes to quote
+ */
+ public static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len)
+ throws IOException {
+ for(int i=off; i < off+len; i++) {
+ switch (buffer[i]) {
+ case '&':
+ output.write(ampBytes);
+ break;
+ case '<':
+ output.write(ltBytes);
+ break;
+ case '>':
+ output.write(gtBytes);
+ break;
+ case '\'':
+ output.write(aposBytes);
+ break;
+ case '"':
+ output.write(quotBytes);
+ break;
+ default:
+ output.write(buffer, i, 1);
+ break;
+ }
+ }
+ }
+
+ /**
+ * Quote the given item to make it html-safe.
+ * @param item the string to quote
+ * @return the quoted string
+ */
+ public static String quoteHtmlChars(String item) {
+ if (item == null) {
+ return null;
+ }
+ byte[] bytes = Bytes.toBytes(item);
+ if (needsQuoting(bytes, 0, bytes.length)) {
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ try {
+ quoteHtmlChars(buffer, bytes, 0, bytes.length);
+ } catch (IOException ioe) {
+ // Won't happen, since it is a bytearrayoutputstream
+ }
+ return buffer.toString();
+ } else {
+ return item;
+ }
+ }
+
+ /**
+ * Return an output stream that quotes all of the output.
+ * @param out the stream to write the quoted output to
+ * @return a new stream that the application show write to
+ */
+ public static OutputStream quoteOutputStream(final OutputStream out) {
+ return new OutputStream() {
+ private byte[] data = new byte[1];
+ @Override
+ public void write(byte[] data, int off, int len) throws IOException {
+ quoteHtmlChars(out, data, off, len);
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ data[0] = (byte) b;
+ quoteHtmlChars(out, data, 0, 1);
+ }
+
+ @Override
+ public void flush() throws IOException {
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ out.close();
+ }
+ };
+ }
+
+ /**
+ * Remove HTML quoting from a string.
+ * @param item the string to unquote
+ * @return the unquoted string
+ */
+ public static String unquoteHtmlChars(String item) {
+ if (item == null) {
+ return null;
+ }
+ int next = item.indexOf('&');
+ // nothing was quoted
+ if (next == -1) {
+ return item;
+ }
+ int len = item.length();
+ int posn = 0;
+ StringBuilder buffer = new StringBuilder();
+ while (next != -1) {
+ buffer.append(item.substring(posn, next));
+ if (item.startsWith("&", next)) {
+ buffer.append('&');
+ next += 5;
+ } else if (item.startsWith("'", next)) {
+ buffer.append('\'');
+ next += 6;
+ } else if (item.startsWith(">", next)) {
+ buffer.append('>');
+ next += 4;
+ } else if (item.startsWith("<", next)) {
+ buffer.append('<');
+ next += 4;
+ } else if (item.startsWith(""", next)) {
+ buffer.append('"');
+ next += 6;
+ } else {
+ int end = item.indexOf(';', next)+1;
+ if (end == 0) {
+ end = len;
+ }
+ throw new IllegalArgumentException("Bad HTML quoting for " +
+ item.substring(next,end));
+ }
+ posn = next;
+ next = item.indexOf('&', posn);
+ }
+ buffer.append(item.substring(posn, len));
+ return buffer.toString();
+ }
+
+ public static void main(String[] args) {
+ if (args.length == 0) {
+ throw new IllegalArgumentException("Please provide some arguments");
+ }
+ for(String arg:args) {
+ System.out.println("Original: " + arg);
+ String quoted = quoteHtmlChars(arg);
+ System.out.println("Quoted: "+ quoted);
+ String unquoted = unquoteHtmlChars(quoted);
+ System.out.println("Unquoted: " + unquoted);
+ System.out.println();
+ }
+ }
+
+ private HtmlQuoting() {}
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpConfig.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpConfig.java
new file mode 100755
index 00000000..e27b0900
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpConfig.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+/**
+ * Statics to get access to Http related configuration.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class HttpConfig {
+ private Policy policy;
+ public enum Policy {
+ HTTP_ONLY,
+ HTTPS_ONLY,
+ HTTP_AND_HTTPS;
+
+ public Policy fromString(String value) {
+ if (HTTPS_ONLY.name().equalsIgnoreCase(value)) {
+ return HTTPS_ONLY;
+ } else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) {
+ return HTTP_AND_HTTPS;
+ }
+ return HTTP_ONLY;
+ }
+
+ public boolean isHttpEnabled() {
+ return this == HTTP_ONLY || this == HTTP_AND_HTTPS;
+ }
+
+ public boolean isHttpsEnabled() {
+ return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
+ }
+ }
+
+ public HttpConfig(final Configuration conf) {
+ boolean sslEnabled = conf.getBoolean(
+ ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY,
+ ServerConfigurationKeys.HBASE_SSL_ENABLED_DEFAULT);
+ policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY;
+ if (sslEnabled) {
+ conf.addResource("ssl-server.xml");
+ conf.addResource("ssl-client.xml");
+ }
+ }
+
+ public void setPolicy(Policy policy) {
+ this.policy = policy;
+ }
+
+ public boolean isSecure() {
+ return policy == Policy.HTTPS_ONLY;
+ }
+
+ public String getSchemePrefix() {
+ return (isSecure()) ? "https://" : "http://";
+ }
+
+ public String getScheme(Policy policy) {
+ return policy == Policy.HTTPS_ONLY ? "https://" : "http://";
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServer.java
new file mode 100755
index 00000000..61c3e76d
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServer.java
@@ -0,0 +1,1508 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.rest.http.conf.ConfServlet;
+import org.apache.hadoop.hbase.rest.http.jmx.JMXJsonServlet;
+import org.apache.hadoop.hbase.rest.http.log.LogLevel;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.http.AdminAuthorizedServlet;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.http.FilterInitializer;
+import org.apache.hadoop.http.HttpRequestLog;
+import org.apache.hadoop.http.NoCacheFilter;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.Shell;
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+import org.eclipse.jetty.http.HttpVersion;
+import org.eclipse.jetty.server.Handler;
+import org.eclipse.jetty.server.HttpConfiguration;
+import org.eclipse.jetty.server.HttpConnectionFactory;
+import org.eclipse.jetty.server.RequestLog;
+import org.eclipse.jetty.server.SecureRequestCustomizer;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.server.SslConnectionFactory;
+import org.eclipse.jetty.server.handler.ContextHandlerCollection;
+import org.eclipse.jetty.server.handler.HandlerCollection;
+import org.eclipse.jetty.server.handler.RequestLogHandler;
+import org.eclipse.jetty.servlet.DefaultServlet;
+import org.eclipse.jetty.servlet.FilterHolder;
+import org.eclipse.jetty.servlet.FilterMapping;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.util.MultiException;
+import org.eclipse.jetty.util.ssl.SslContextFactory;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
+import org.eclipse.jetty.webapp.WebAppContext;
+import org.glassfish.jersey.server.ResourceConfig;
+import org.glassfish.jersey.servlet.ServletContainer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import javax.servlet.http.HttpServletResponse;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.io.PrintStream;
+import java.net.BindException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * Create a Jetty embedded server to answer http requests. The primary goal
+ * is to serve up status information for the server.
+ * There are three contexts:
+ * "/logs/" -> points to the log directory
+ * "/static/" -> points to common static files (src/webapps/static)
+ * "/" -> the jsp server code from (src/webapps/<name>)
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class HttpServer implements FilterContainer {
+ private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class);
+ private static final String EMPTY_STRING = "";
+
+ private static final int DEFAULT_MAX_HEADER_SIZE = 64 * 1024; // 64K
+
+ static final String FILTER_INITIALIZERS_PROPERTY
+ = "hbase.http.filter.initializers";
+ static final String HTTP_MAX_THREADS = "hbase.http.max.threads";
+
+ public static final String HTTP_UI_AUTHENTICATION = "hbase.security.authentication.ui";
+ static final String HTTP_AUTHENTICATION_PREFIX = "hbase.security.authentication.";
+ static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX
+ + "spnego.";
+ static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX = "kerberos.principal";
+ public static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY =
+ HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX;
+ static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX = "kerberos.keytab";
+ public static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY =
+ HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX;
+ static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = "kerberos.name.rules";
+ public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY =
+ HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX;
+ static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = "kerberos.proxyuser.enable";
+ public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY =
+ HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX;
+ public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false;
+ static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX =
+ "signature.secret.file";
+ public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY =
+ HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX;
+ public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY =
+ HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.users";
+ public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY =
+ HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.groups";
+ public static final String HTTP_PRIVILEGED_CONF_KEY =
+ "hbase.security.authentication.ui.config.protected";
+ public static final boolean HTTP_PRIVILEGED_CONF_DEFAULT = false;
+
+ // The ServletContext attribute where the daemon Configuration
+ // gets stored.
+ public static final String CONF_CONTEXT_ATTRIBUTE = "hbase.conf";
+ public static final String ADMINS_ACL = "admins.acl";
+ public static final String BIND_ADDRESS = "bind.address";
+ public static final String SPNEGO_FILTER = "SpnegoFilter";
+ public static final String SPNEGO_PROXYUSER_FILTER = "SpnegoProxyUserFilter";
+ public static final String NO_CACHE_FILTER = "NoCacheFilter";
+ public static final String APP_DIR = "webapps";
+
+ private final AccessControlList adminsAcl;
+
+ protected final Server webServer;
+ protected String appDir;
+ protected String logDir;
+
+ private static final class ListenerInfo {
+ /**
+ * Boolean flag to determine whether the HTTP server should clean up the
+ * listener in stop().
+ */
+ private final boolean isManaged;
+ private final ServerConnector listener;
+ private ListenerInfo(boolean isManaged, ServerConnector listener) {
+ this.isManaged = isManaged;
+ this.listener = listener;
+ }
+ }
+
+ private final List listeners = Lists.newArrayList();
+
+ @VisibleForTesting
+ public List getServerConnectors() {
+ return listeners.stream().map(info -> info.listener).collect(Collectors.toList());
+ }
+
+ protected final WebAppContext webAppContext;
+ protected final boolean findPort;
+ protected final Map defaultContexts = new HashMap<>();
+ protected final List filterNames = new ArrayList<>();
+ protected final boolean authenticationEnabled;
+ static final String STATE_DESCRIPTION_ALIVE = " - alive";
+ static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
+
+ /**
+ * Class to construct instances of HTTP server with specific options.
+ */
+ public static class Builder {
+ private ArrayList endpoints = Lists.newArrayList();
+ private Configuration conf;
+ private String[] pathSpecs;
+ private AccessControlList adminsAcl;
+ private boolean securityEnabled = false;
+ private String usernameConfKey;
+ private String keytabConfKey;
+ private boolean needsClientAuth;
+
+ private String hostName;
+ private String appDir = APP_DIR;
+ private String logDir;
+ private boolean findPort;
+
+ private String trustStore;
+ private String trustStorePassword;
+ private String trustStoreType;
+
+ private String keyStore;
+ private String keyStorePassword;
+ private String keyStoreType;
+
+ // The -keypass option in keytool
+ private String keyPassword;
+
+ private String kerberosNameRulesKey;
+ private String signatureSecretFileKey;
+
+ /**
+ * @see #setAppDir(String)
+ * @deprecated Since 0.99.0. Use builder pattern via {@link #setAppDir(String)} instead.
+ */
+ @Deprecated
+ private String name;
+ /**
+ * @see #addEndpoint(URI)
+ * @deprecated Since 0.99.0. Use builder pattern via {@link #addEndpoint(URI)} instead.
+ */
+ @Deprecated
+ private String bindAddress;
+ /**
+ * @see #addEndpoint(URI)
+ * @deprecated Since 0.99.0. Use builder pattern via {@link #addEndpoint(URI)} instead.
+ */
+ @Deprecated
+ private int port = -1;
+
+ /**
+ * Add an endpoint that the HTTP server should listen to.
+ *
+ * @param endpoint
+ * the endpoint of that the HTTP server should listen to. The
+ * scheme specifies the protocol (i.e. HTTP / HTTPS), the host
+ * specifies the binding address, and the port specifies the
+ * listening port. Unspecified or zero port means that the server
+ * can listen to any port.
+ */
+ public Builder addEndpoint(URI endpoint) {
+ endpoints.add(endpoint);
+ return this;
+ }
+
+ /**
+ * Set the hostname of the http server. The host name is used to resolve the
+ * _HOST field in Kerberos principals. The hostname of the first listener
+ * will be used if the name is unspecified.
+ */
+ public Builder hostName(String hostName) {
+ this.hostName = hostName;
+ return this;
+ }
+
+ public Builder trustStore(String location, String password, String type) {
+ this.trustStore = location;
+ this.trustStorePassword = password;
+ this.trustStoreType = type;
+ return this;
+ }
+
+ public Builder keyStore(String location, String password, String type) {
+ this.keyStore = location;
+ this.keyStorePassword = password;
+ this.keyStoreType = type;
+ return this;
+ }
+
+ public Builder keyPassword(String password) {
+ this.keyPassword = password;
+ return this;
+ }
+
+ /**
+ * Specify whether the server should authorize the client in SSL
+ * connections.
+ */
+ public Builder needsClientAuth(boolean value) {
+ this.needsClientAuth = value;
+ return this;
+ }
+
+ /**
+ * @see #setAppDir(String)
+ * @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead.
+ */
+ @Deprecated
+ public Builder setName(String name){
+ this.name = name;
+ return this;
+ }
+
+ /**
+ * @see #addEndpoint(URI)
+ * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead.
+ */
+ @Deprecated
+ public Builder setBindAddress(String bindAddress){
+ this.bindAddress = bindAddress;
+ return this;
+ }
+
+ /**
+ * @see #addEndpoint(URI)
+ * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead.
+ */
+ @Deprecated
+ public Builder setPort(int port) {
+ this.port = port;
+ return this;
+ }
+
+ public Builder setFindPort(boolean findPort) {
+ this.findPort = findPort;
+ return this;
+ }
+
+ public Builder setConf(Configuration conf) {
+ this.conf = conf;
+ return this;
+ }
+
+ public Builder setPathSpec(String[] pathSpec) {
+ this.pathSpecs = pathSpec;
+ return this;
+ }
+
+ public Builder setACL(AccessControlList acl) {
+ this.adminsAcl = acl;
+ return this;
+ }
+
+ public Builder setSecurityEnabled(boolean securityEnabled) {
+ this.securityEnabled = securityEnabled;
+ return this;
+ }
+
+ public Builder setUsernameConfKey(String usernameConfKey) {
+ this.usernameConfKey = usernameConfKey;
+ return this;
+ }
+
+ public Builder setKeytabConfKey(String keytabConfKey) {
+ this.keytabConfKey = keytabConfKey;
+ return this;
+ }
+
+ public Builder setKerberosNameRulesKey(String kerberosNameRulesKey) {
+ this.kerberosNameRulesKey = kerberosNameRulesKey;
+ return this;
+ }
+
+ public Builder setSignatureSecretFileKey(String signatureSecretFileKey) {
+ this.signatureSecretFileKey = signatureSecretFileKey;
+ return this;
+ }
+
+ public Builder setAppDir(String appDir) {
+ this.appDir = appDir;
+ return this;
+ }
+
+ public Builder setLogDir(String logDir) {
+ this.logDir = logDir;
+ return this;
+ }
+
+ public HttpServer build() throws IOException {
+
+ // Do we still need to assert this non null name if it is deprecated?
+ if (this.name == null) {
+ throw new HadoopIllegalArgumentException("name is not set");
+ }
+
+ // Make the behavior compatible with deprecated interfaces
+ if (bindAddress != null && port != -1) {
+ try {
+ endpoints.add(0, new URI("http", "", bindAddress, port, "", "", ""));
+ } catch (URISyntaxException e) {
+ throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e);
+ }
+ }
+
+ if (endpoints.isEmpty()) {
+ throw new HadoopIllegalArgumentException("No endpoints specified");
+ }
+
+ if (hostName == null) {
+ hostName = endpoints.get(0).getHost();
+ }
+
+ if (this.conf == null) {
+ conf = new Configuration();
+ }
+
+ HttpServer server = new HttpServer(this);
+
+ for (URI ep : endpoints) {
+ ServerConnector listener = null;
+ String scheme = ep.getScheme();
+ HttpConfiguration httpConfig = new HttpConfiguration();
+ httpConfig.setSecureScheme("https");
+ httpConfig.setHeaderCacheSize(DEFAULT_MAX_HEADER_SIZE);
+ httpConfig.setResponseHeaderSize(DEFAULT_MAX_HEADER_SIZE);
+ httpConfig.setRequestHeaderSize(DEFAULT_MAX_HEADER_SIZE);
+
+ if ("http".equals(scheme)) {
+ listener = new ServerConnector(server.webServer, new HttpConnectionFactory(httpConfig));
+ } else if ("https".equals(scheme)) {
+ HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig);
+ httpsConfig.addCustomizer(new SecureRequestCustomizer());
+ SslContextFactory sslCtxFactory = new SslContextFactory();
+ sslCtxFactory.setNeedClientAuth(needsClientAuth);
+ sslCtxFactory.setKeyManagerPassword(keyPassword);
+
+ if (keyStore != null) {
+ sslCtxFactory.setKeyStorePath(keyStore);
+ sslCtxFactory.setKeyStoreType(keyStoreType);
+ sslCtxFactory.setKeyStorePassword(keyStorePassword);
+ }
+
+ if (trustStore != null) {
+ sslCtxFactory.setTrustStorePath(trustStore);
+ sslCtxFactory.setTrustStoreType(trustStoreType);
+ sslCtxFactory.setTrustStorePassword(trustStorePassword);
+
+ }
+ listener = new ServerConnector(server.webServer, new SslConnectionFactory(sslCtxFactory,
+ HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig));
+ } else {
+ throw new HadoopIllegalArgumentException(
+ "unknown scheme for endpoint:" + ep);
+ }
+
+ // default settings for connector
+ listener.setAcceptQueueSize(128);
+ if (Shell.WINDOWS) {
+ // result of setting the SO_REUSEADDR flag is different on Windows
+ // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
+ // without this 2 NN's can start on the same machine and listen on
+ // the same port with indeterminate routing of incoming requests to them
+ listener.setReuseAddress(false);
+ }
+
+ listener.setHost(ep.getHost());
+ listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
+ server.addManagedListener(listener);
+ }
+
+ server.loadListeners();
+ return server;
+
+ }
+
+ }
+
+ /**
+ * @see #HttpServer(String, String, int, boolean, Configuration)
+ * @deprecated Since 0.99.0
+ */
+ @Deprecated
+ public HttpServer(String name, String bindAddress, int port, boolean findPort)
+ throws IOException {
+ this(name, bindAddress, port, findPort, new Configuration());
+ }
+
+ /**
+ * Create a status server on the given port. Allows you to specify the
+ * path specifications that this server will be serving so that they will be
+ * added to the filters properly.
+ *
+ * @param name The name of the server
+ * @param bindAddress The address for this server
+ * @param port The port to use on the server
+ * @param findPort whether the server should start at the given port and
+ * increment by 1 until it finds a free port.
+ * @param conf Configuration
+ * @param pathSpecs Path specifications that this httpserver will be serving.
+ * These will be added to any filters.
+ * @deprecated Since 0.99.0
+ */
+ @Deprecated
+ public HttpServer(String name, String bindAddress, int port,
+ boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
+ this(name, bindAddress, port, findPort, conf, null, pathSpecs);
+ }
+
+ /**
+ * Create a status server on the given port.
+ * The jsp scripts are taken from src/webapps/<name>.
+ * @param name The name of the server
+ * @param port The port to use on the server
+ * @param findPort whether the server should start at the given port and
+ * increment by 1 until it finds a free port.
+ * @param conf Configuration
+ * @deprecated Since 0.99.0
+ */
+ @Deprecated
+ public HttpServer(String name, String bindAddress, int port,
+ boolean findPort, Configuration conf) throws IOException {
+ this(name, bindAddress, port, findPort, conf, null, null);
+ }
+
+ /**
+ * Creates a status server on the given port. The JSP scripts are taken
+ * from src/webapp<name>.
+ *
+ * @param name the name of the server
+ * @param bindAddress the address for this server
+ * @param port the port to use on the server
+ * @param findPort whether the server should start at the given port and increment by 1 until it
+ * finds a free port
+ * @param conf the configuration to use
+ * @param adminsAcl {@link AccessControlList} of the admins
+ * @throws IOException when creating the server fails
+ * @deprecated Since 0.99.0
+ */
+ @Deprecated
+ public HttpServer(String name, String bindAddress, int port,
+ boolean findPort, Configuration conf, AccessControlList adminsAcl)
+ throws IOException {
+ this(name, bindAddress, port, findPort, conf, adminsAcl, null);
+ }
+
+ /**
+ * Create a status server on the given port.
+ * The jsp scripts are taken from src/webapps/<name>.
+ * @param name The name of the server
+ * @param bindAddress The address for this server
+ * @param port The port to use on the server
+ * @param findPort whether the server should start at the given port and
+ * increment by 1 until it finds a free port.
+ * @param conf Configuration
+ * @param adminsAcl {@link AccessControlList} of the admins
+ * @param pathSpecs Path specifications that this httpserver will be serving.
+ * These will be added to any filters.
+ * @deprecated Since 0.99.0
+ */
+ @Deprecated
+ public HttpServer(String name, String bindAddress, int port,
+ boolean findPort, Configuration conf, AccessControlList adminsAcl,
+ String[] pathSpecs) throws IOException {
+ this(new Builder().setName(name)
+ .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
+ .setFindPort(findPort).setConf(conf).setACL(adminsAcl)
+ .setPathSpec(pathSpecs));
+ }
+
+ private HttpServer(final Builder b) throws IOException {
+ this.appDir = b.appDir;
+ this.logDir = b.logDir;
+ final String appDir = getWebAppsPath(b.name);
+
+
+ int maxThreads = b.conf.getInt(HTTP_MAX_THREADS, 16);
+ // If HTTP_MAX_THREADS is less than or equal to 0, QueueThreadPool() will use the
+ // default value (currently 200).
+ QueuedThreadPool threadPool = maxThreads <= 0 ? new QueuedThreadPool()
+ : new QueuedThreadPool(maxThreads);
+ threadPool.setDaemon(true);
+ this.webServer = new Server(threadPool);
+
+ this.adminsAcl = b.adminsAcl;
+ this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
+ this.findPort = b.findPort;
+ this.authenticationEnabled = b.securityEnabled;
+ initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs, b);
+ }
+
+ private void initializeWebServer(String name, String hostName,
+ Configuration conf, String[] pathSpecs, HttpServer.Builder b)
+ throws FileNotFoundException, IOException {
+
+ Preconditions.checkNotNull(webAppContext);
+
+ HandlerCollection handlerCollection = new HandlerCollection();
+
+ ContextHandlerCollection contexts = new ContextHandlerCollection();
+ RequestLog requestLog = HttpRequestLog.getRequestLog(name);
+
+ if (requestLog != null) {
+ RequestLogHandler requestLogHandler = new RequestLogHandler();
+ requestLogHandler.setRequestLog(requestLog);
+ handlerCollection.addHandler(requestLogHandler);
+ }
+
+ final String appDir = getWebAppsPath(name);
+
+ handlerCollection.addHandler(contexts);
+ handlerCollection.addHandler(webAppContext);
+
+ webServer.setHandler(handlerCollection);
+
+ webAppContext.setAttribute(ADMINS_ACL, adminsAcl);
+
+ // Default apps need to be set first, so that all filters are applied to them.
+ // Because they're added to defaultContexts, we need them there before we start
+ // adding filters
+ addDefaultApps(contexts, appDir, conf);
+
+ addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
+
+ addGlobalFilter("clickjackingprevention",
+ ClickjackingPreventionFilter.class.getName(),
+ ClickjackingPreventionFilter.getDefaultParameters(conf));
+
+ addGlobalFilter("securityheaders",
+ SecurityHeadersFilter.class.getName(),
+ SecurityHeadersFilter.getDefaultParameters(conf));
+
+ // But security needs to be enabled prior to adding the other servlets
+ if (authenticationEnabled) {
+ initSpnego(conf, hostName, b.usernameConfKey, b.keytabConfKey, b.kerberosNameRulesKey,
+ b.signatureSecretFileKey);
+ }
+
+ final FilterInitializer[] initializers = getFilterInitializers(conf);
+ if (initializers != null) {
+ conf = new Configuration(conf);
+ conf.set(BIND_ADDRESS, hostName);
+ for (FilterInitializer c : initializers) {
+ c.initFilter(this, conf);
+ }
+ }
+
+ addDefaultServlets(contexts, conf);
+
+ if (pathSpecs != null) {
+ for (String path : pathSpecs) {
+ LOG.info("adding path spec: " + path);
+ addFilterPathMapping(path, webAppContext);
+ }
+ }
+ }
+
+ private void addManagedListener(ServerConnector connector) {
+ listeners.add(new ListenerInfo(true, connector));
+ }
+
+ private static WebAppContext createWebAppContext(String name,
+ Configuration conf, AccessControlList adminsAcl, final String appDir) {
+ WebAppContext ctx = new WebAppContext();
+ ctx.setDisplayName(name);
+ ctx.setContextPath("/");
+ ctx.setWar(appDir + "/" + name);
+ ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
+ // for org.apache.hadoop.metrics.MetricsServlet
+ ctx.getServletContext().setAttribute(
+ org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
+ ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
+ addNoCacheFilter(ctx);
+ return ctx;
+ }
+
+ private static void addNoCacheFilter(WebAppContext ctxt) {
+ defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
+ Collections. emptyMap(), new String[] { "/*" });
+ }
+
+ /** Get an array of FilterConfiguration specified in the conf */
+ private static FilterInitializer[] getFilterInitializers(Configuration conf) {
+ if (conf == null) {
+ return null;
+ }
+
+ Class>[] classes = conf.getClasses(FILTER_INITIALIZERS_PROPERTY);
+ if (classes == null) {
+ return null;
+ }
+
+ FilterInitializer[] initializers = new FilterInitializer[classes.length];
+ for(int i = 0; i < classes.length; i++) {
+ initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(classes[i]);
+ }
+ return initializers;
+ }
+
+ /**
+ * Add default apps.
+ * @param appDir The application directory
+ */
+ protected void addDefaultApps(ContextHandlerCollection parent,
+ final String appDir, Configuration conf) {
+ // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
+ String logDir = this.logDir;
+ if (logDir == null) {
+ logDir = System.getProperty("hadoop.log.dir");
+ }
+ if (logDir != null) {
+ ServletContextHandler logContext = new ServletContextHandler(parent, "/logs");
+ logContext.addServlet(AdminAuthorizedServlet.class, "/*");
+ logContext.setResourceBase(logDir);
+
+ if (conf.getBoolean(
+ ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES,
+ ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) {
+ Map params = logContext.getInitParams();
+ params.put(
+ "org.mortbay.jetty.servlet.Default.aliases", "true");
+ }
+ logContext.setDisplayName("logs");
+ setContextAttributes(logContext, conf);
+ defaultContexts.put(logContext, true);
+ }
+ // set up the context for "/static/*"
+ ServletContextHandler staticContext = new ServletContextHandler(parent, "/static");
+ staticContext.setResourceBase(appDir + "/static");
+ staticContext.addServlet(DefaultServlet.class, "/*");
+ staticContext.setDisplayName("static");
+ setContextAttributes(staticContext, conf);
+ defaultContexts.put(staticContext, true);
+ }
+
+ private void setContextAttributes(ServletContextHandler context, Configuration conf) {
+ context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
+ context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
+ }
+
+ /**
+ * Add default servlets.
+ */
+ protected void addDefaultServlets(
+ ContextHandlerCollection contexts, Configuration conf) throws IOException {
+ // set up default servlets
+ addPrivilegedServlet("stacks", "/stacks", StackServlet.class);
+ addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class);
+ // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's
+ // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2.
+ // Remove when we drop support for hbase on hadoop2.x.
+ try {
+ Class> clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet");
+ addPrivilegedServlet("metrics", "/metrics", clz.asSubclass(HttpServlet.class));
+ } catch (Exception e) {
+ // do nothing
+ }
+ addPrivilegedServlet("jmx", "/jmx", JMXJsonServlet.class);
+ // While we don't expect users to have sensitive information in their configuration, they
+ // might. Give them an option to not expose the service configuration to all users.
+ if (conf.getBoolean(HTTP_PRIVILEGED_CONF_KEY, HTTP_PRIVILEGED_CONF_DEFAULT)) {
+ addPrivilegedServlet("conf", "/conf", ConfServlet.class);
+ } else {
+ addUnprivilegedServlet("conf", "/conf", ConfServlet.class);
+ }
+ final String asyncProfilerHome = ProfileServlet.getAsyncProfilerHome();
+ if (asyncProfilerHome != null && !asyncProfilerHome.trim().isEmpty()) {
+ addPrivilegedServlet("prof", "/prof", ProfileServlet.class);
+ Path tmpDir = Paths.get(ProfileServlet.OUTPUT_DIR);
+ if (Files.notExists(tmpDir)) {
+ Files.createDirectories(tmpDir);
+ }
+ ServletContextHandler genCtx = new ServletContextHandler(contexts, "/prof-output");
+ genCtx.addServlet(ProfileOutputServlet.class, "/*");
+ genCtx.setResourceBase(tmpDir.toAbsolutePath().toString());
+ genCtx.setDisplayName("prof-output");
+ } else {
+ addUnprivilegedServlet("prof", "/prof", ProfileServlet.DisabledServlet.class);
+ LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " +
+ "not specified. Disabling /prof endpoint.");
+ }
+ }
+
+ /**
+ * Set a value in the webapp context. These values are available to the jsp
+ * pages as "application.getAttribute(name)".
+ * @param name The name of the attribute
+ * @param value The value of the attribute
+ */
+ public void setAttribute(String name, Object value) {
+ webAppContext.setAttribute(name, value);
+ }
+
+ /**
+ * Add a Jersey resource package.
+ * @param packageName The Java package name containing the Jersey resource.
+ * @param pathSpec The path spec for the servlet
+ */
+ public void addJerseyResourcePackage(final String packageName,
+ final String pathSpec) {
+ LOG.info("addJerseyResourcePackage: packageName=" + packageName
+ + ", pathSpec=" + pathSpec);
+
+ ResourceConfig application = new ResourceConfig().packages(packageName);
+ final ServletHolder sh = new ServletHolder(new ServletContainer(application));
+ webAppContext.addServlet(sh, pathSpec);
+ }
+
+ /**
+ * Adds a servlet in the server that any user can access. This method differs from
+ * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user
+ * can interact with the servlet added by this method.
+ * @param name The name of the servlet (can be passed as null)
+ * @param pathSpec The path spec for the servlet
+ * @param clazz The servlet class
+ */
+ public void addUnprivilegedServlet(String name, String pathSpec,
+ Class extends HttpServlet> clazz) {
+ addServletWithAuth(name, pathSpec, clazz, false);
+ }
+
+ /**
+ * Adds a servlet in the server that only administrators can access. This method differs from
+ * {@link #addUnprivilegedServlet(String, String, Class)} in that only those authenticated user
+ * who are identified as administrators can interact with the servlet added by this method.
+ */
+ public void addPrivilegedServlet(String name, String pathSpec,
+ Class extends HttpServlet> clazz) {
+ addServletWithAuth(name, pathSpec, clazz, true);
+ }
+
+ /**
+ * Internal method to add a servlet to the HTTP server. Developers should not call this method
+ * directly, but invoke it via {@link #addUnprivilegedServlet(String, String, Class)} or
+ * {@link #addPrivilegedServlet(String, String, Class)}.
+ */
+ void addServletWithAuth(String name, String pathSpec,
+ Class extends HttpServlet> clazz, boolean requireAuthz) {
+ addInternalServlet(name, pathSpec, clazz, requireAuthz);
+ addFilterPathMapping(pathSpec, webAppContext);
+ }
+
+ /**
+ * Add an internal servlet in the server, specifying whether or not to
+ * protect with Kerberos authentication.
+ * Note: This method is to be used for adding servlets that facilitate
+ * internal communication and not for user facing functionality. For
+ * servlets added using this method, filters (except internal Kerberos
+ * filters) are not enabled.
+ *
+ * @param name The name of the servlet (can be passed as null)
+ * @param pathSpec The path spec for the servlet
+ * @param clazz The servlet class
+ * @param requireAuthz Require Kerberos authenticate to access servlet
+ */
+ void addInternalServlet(String name, String pathSpec,
+ Class extends HttpServlet> clazz, boolean requireAuthz) {
+ ServletHolder holder = new ServletHolder(clazz);
+ if (name != null) {
+ holder.setName(name);
+ }
+ if (authenticationEnabled && requireAuthz) {
+ FilterHolder filter = new FilterHolder(AdminAuthorizedFilter.class);
+ filter.setName(AdminAuthorizedFilter.class.getSimpleName());
+ FilterMapping fmap = new FilterMapping();
+ fmap.setPathSpec(pathSpec);
+ fmap.setDispatches(FilterMapping.ALL);
+ fmap.setFilterName(AdminAuthorizedFilter.class.getSimpleName());
+ webAppContext.getServletHandler().addFilter(filter, fmap);
+ }
+ webAppContext.addServlet(holder, pathSpec);
+ }
+
+ @Override
+ public void addFilter(String name, String classname, Map parameters) {
+ final String[] USER_FACING_URLS = { "*.html", "*.jsp" };
+ defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS);
+ LOG.info("Added filter " + name + " (class=" + classname
+ + ") to context " + webAppContext.getDisplayName());
+ final String[] ALL_URLS = { "/*" };
+ for (Map.Entry e : defaultContexts.entrySet()) {
+ if (e.getValue()) {
+ ServletContextHandler handler = e.getKey();
+ defineFilter(handler, name, classname, parameters, ALL_URLS);
+ LOG.info("Added filter " + name + " (class=" + classname
+ + ") to context " + handler.getDisplayName());
+ }
+ }
+ filterNames.add(name);
+ }
+
+ @Override
+ public void addGlobalFilter(String name, String classname, Map parameters) {
+ final String[] ALL_URLS = { "/*" };
+ defineFilter(webAppContext, name, classname, parameters, ALL_URLS);
+ for (ServletContextHandler ctx : defaultContexts.keySet()) {
+ defineFilter(ctx, name, classname, parameters, ALL_URLS);
+ }
+ LOG.info("Added global filter '" + name + "' (class=" + classname + ")");
+ }
+
+ /**
+ * Define a filter for a context and set up default url mappings.
+ */
+ public static void defineFilter(ServletContextHandler handler, String name,
+ String classname, Map parameters, String[] urls) {
+ FilterHolder holder = new FilterHolder();
+ holder.setName(name);
+ holder.setClassName(classname);
+ if (parameters != null) {
+ holder.setInitParameters(parameters);
+ }
+ FilterMapping fmap = new FilterMapping();
+ fmap.setPathSpecs(urls);
+ fmap.setDispatches(FilterMapping.ALL);
+ fmap.setFilterName(name);
+ handler.getServletHandler().addFilter(holder, fmap);
+ }
+
+ /**
+ * Add the path spec to the filter path mapping.
+ * @param pathSpec The path spec
+ * @param webAppCtx The WebApplicationContext to add to
+ */
+ protected void addFilterPathMapping(String pathSpec,
+ WebAppContext webAppCtx) {
+ for(String name : filterNames) {
+ FilterMapping fmap = new FilterMapping();
+ fmap.setPathSpec(pathSpec);
+ fmap.setFilterName(name);
+ fmap.setDispatches(FilterMapping.ALL);
+ webAppCtx.getServletHandler().addFilterMapping(fmap);
+ }
+ }
+
+ /**
+ * Get the value in the webapp context.
+ * @param name The name of the attribute
+ * @return The value of the attribute
+ */
+ public Object getAttribute(String name) {
+ return webAppContext.getAttribute(name);
+ }
+
+ public WebAppContext getWebAppContext(){
+ return this.webAppContext;
+ }
+
+ public String getWebAppsPath(String appName) throws FileNotFoundException {
+ return getWebAppsPath(this.appDir, appName);
+ }
+
+ /**
+ * Get the pathname to the webapps files.
+ * @param appName eg "secondary" or "datanode"
+ * @return the pathname as a URL
+ * @throws FileNotFoundException if 'webapps' directory cannot be found on CLASSPATH.
+ */
+ protected String getWebAppsPath(String webapps, String appName) throws FileNotFoundException {
+ URL url = getClass().getClassLoader().getResource(webapps + "/" + appName);
+
+ if (url == null) {
+ throw new FileNotFoundException(webapps + "/" + appName
+ + " not found in CLASSPATH");
+ }
+
+ String urlString = url.toString();
+ return urlString.substring(0, urlString.lastIndexOf('/'));
+ }
+
+ /**
+ * Get the port that the server is on
+ * @return the port
+ * @deprecated Since 0.99.0
+ */
+ @Deprecated
+ public int getPort() {
+ return ((ServerConnector)webServer.getConnectors()[0]).getLocalPort();
+ }
+
+ /**
+ * Get the address that corresponds to a particular connector.
+ *
+ * @return the corresponding address for the connector, or null if there's no
+ * such connector or the connector is not bounded.
+ */
+ public InetSocketAddress getConnectorAddress(int index) {
+ Preconditions.checkArgument(index >= 0);
+
+ if (index > webServer.getConnectors().length) {
+ return null;
+ }
+
+ ServerConnector c = (ServerConnector)webServer.getConnectors()[index];
+ if (c.getLocalPort() == -1 || c.getLocalPort() == -2) {
+ // -1 if the connector has not been opened
+ // -2 if it has been closed
+ return null;
+ }
+
+ return new InetSocketAddress(c.getHost(), c.getLocalPort());
+ }
+
+ /**
+ * Set the min, max number of worker threads (simultaneous connections).
+ */
+ public void setThreads(int min, int max) {
+ QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool();
+ pool.setMinThreads(min);
+ pool.setMaxThreads(max);
+ }
+
+ private void initSpnego(Configuration conf, String hostName,
+ String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey,
+ String signatureSecretKeyFileKey) throws IOException {
+ Map params = new HashMap<>();
+ String principalInConf = getOrEmptyString(conf, usernameConfKey);
+ if (!principalInConf.isEmpty()) {
+ params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal(
+ principalInConf, hostName));
+ }
+ String httpKeytab = getOrEmptyString(conf, keytabConfKey);
+ if (!httpKeytab.isEmpty()) {
+ params.put(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX, httpKeytab);
+ }
+ String kerberosNameRule = getOrEmptyString(conf, kerberosNameRuleKey);
+ if (!kerberosNameRule.isEmpty()) {
+ params.put(HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX, kerberosNameRule);
+ }
+ String signatureSecretKeyFile = getOrEmptyString(conf, signatureSecretKeyFileKey);
+ if (!signatureSecretKeyFile.isEmpty()) {
+ params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX,
+ signatureSecretKeyFile);
+ }
+ params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
+
+ // Verify that the required options were provided
+ if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) ||
+ isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) {
+ throw new IllegalArgumentException(usernameConfKey + " and "
+ + keytabConfKey + " are both required in the configuration "
+ + "to enable SPNEGO/Kerberos authentication for the Web UI");
+ }
+
+ if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY,
+ HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) {
+ //Copy/rename standard hadoop proxyuser settings to filter
+ for(Map.Entry proxyEntry :
+ conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) {
+ params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(),
+ proxyEntry.getValue());
+ }
+ addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), params);
+ } else {
+ addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params);
+ }
+ }
+
+ /**
+ * Returns true if the argument is non-null and not whitespace
+ */
+ private boolean isMissing(String value) {
+ if (null == value) {
+ return true;
+ }
+ return value.trim().isEmpty();
+ }
+
+ /**
+ * Extracts the value for the given key from the configuration of returns a string of
+ * zero length.
+ */
+ private String getOrEmptyString(Configuration conf, String key) {
+ if (null == key) {
+ return EMPTY_STRING;
+ }
+ final String value = conf.get(key.trim());
+ return null == value ? EMPTY_STRING : value;
+ }
+
+ /**
+ * Start the server. Does not wait for the server to start.
+ */
+ public void start() throws IOException {
+ try {
+ try {
+ openListeners();
+ webServer.start();
+ } catch (IOException ex) {
+ LOG.info("HttpServer.start() threw a non Bind IOException", ex);
+ throw ex;
+ } catch (MultiException ex) {
+ LOG.info("HttpServer.start() threw a MultiException", ex);
+ throw ex;
+ }
+ // Make sure there is no handler failures.
+ Handler[] handlers = webServer.getHandlers();
+ for (int i = 0; i < handlers.length; i++) {
+ if (handlers[i].isFailed()) {
+ throw new IOException(
+ "Problem in starting http server. Server handlers failed");
+ }
+ }
+ // Make sure there are no errors initializing the context.
+ Throwable unavailableException = webAppContext.getUnavailableException();
+ if (unavailableException != null) {
+ // Have to stop the webserver, or else its non-daemon threads
+ // will hang forever.
+ webServer.stop();
+ throw new IOException("Unable to initialize WebAppContext",
+ unavailableException);
+ }
+ } catch (IOException e) {
+ throw e;
+ } catch (InterruptedException e) {
+ throw (IOException) new InterruptedIOException(
+ "Interrupted while starting HTTP server").initCause(e);
+ } catch (Exception e) {
+ throw new IOException("Problem starting http server", e);
+ }
+ }
+
+ private void loadListeners() {
+ for (ListenerInfo li : listeners) {
+ webServer.addConnector(li.listener);
+ }
+ }
+
+ /**
+ * Open the main listener for the server
+ * @throws Exception if the listener cannot be opened or the appropriate port is already in use
+ */
+ @VisibleForTesting
+ void openListeners() throws Exception {
+ for (ListenerInfo li : listeners) {
+ ServerConnector listener = li.listener;
+ if (!li.isManaged || (li.listener.getLocalPort() != -1 && li.listener.getLocalPort() != -2)) {
+ // This listener is either started externally, or has not been opened, or has been closed
+ continue;
+ }
+ int port = listener.getPort();
+ while (true) {
+ // jetty has a bug where you can't reopen a listener that previously
+ // failed to open w/o issuing a close first, even if the port is changed
+ try {
+ listener.close();
+ listener.open();
+ LOG.info("Jetty bound to port " + listener.getLocalPort());
+ break;
+ } catch (IOException ex) {
+ if(!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) {
+ throw ex;
+ }
+ if (port == 0 || !findPort) {
+ BindException be = new BindException("Port in use: "
+ + listener.getHost() + ":" + listener.getPort());
+ be.initCause(ex);
+ throw be;
+ }
+ }
+ // try the next port number
+ listener.setPort(++port);
+ Thread.sleep(100);
+ }
+ }
+ }
+
+ /**
+ * stop the server
+ */
+ public void stop() throws Exception {
+ MultiException exception = null;
+ for (ListenerInfo li : listeners) {
+ if (!li.isManaged) {
+ continue;
+ }
+
+ try {
+ li.listener.close();
+ } catch (Exception e) {
+ LOG.error(
+ "Error while stopping listener for webapp"
+ + webAppContext.getDisplayName(), e);
+ exception = addMultiException(exception, e);
+ }
+ }
+
+ try {
+ // clear & stop webAppContext attributes to avoid memory leaks.
+ webAppContext.clearAttributes();
+ webAppContext.stop();
+ } catch (Exception e) {
+ LOG.error("Error while stopping web app context for webapp "
+ + webAppContext.getDisplayName(), e);
+ exception = addMultiException(exception, e);
+ }
+
+ try {
+ webServer.stop();
+ } catch (Exception e) {
+ LOG.error("Error while stopping web server for webapp "
+ + webAppContext.getDisplayName(), e);
+ exception = addMultiException(exception, e);
+ }
+
+ if (exception != null) {
+ exception.ifExceptionThrow();
+ }
+
+ }
+
+ private MultiException addMultiException(MultiException exception, Exception e) {
+ if(exception == null){
+ exception = new MultiException();
+ }
+ exception.add(e);
+ return exception;
+ }
+
+ public void join() throws InterruptedException {
+ webServer.join();
+ }
+
+ /**
+ * Test for the availability of the web server
+ * @return true if the web server is started, false otherwise
+ */
+ public boolean isAlive() {
+ return webServer != null && webServer.isStarted();
+ }
+
+ /**
+ * Return the host and port of the HttpServer, if live
+ * @return the classname and any HTTP URL
+ */
+ @Override
+ public String toString() {
+ if (listeners.isEmpty()) {
+ return "Inactive HttpServer";
+ } else {
+ StringBuilder sb = new StringBuilder("HttpServer (")
+ .append(isAlive() ? STATE_DESCRIPTION_ALIVE :
+ STATE_DESCRIPTION_NOT_LIVE).append("), listening at:");
+ for (ListenerInfo li : listeners) {
+ ServerConnector l = li.listener;
+ sb.append(l.getHost()).append(":").append(l.getPort()).append("/,");
+ }
+ return sb.toString();
+ }
+ }
+
+ /**
+ * Checks the user has privileges to access to instrumentation servlets.
+ *
+ * If hadoop.security.instrumentation.requires.admin is set to FALSE
+ * (default value) it always returns TRUE.
+ *
+ * If hadoop.security.instrumentation.requires.admin is set to TRUE
+ * it will check that if the current user is in the admin ACLS. If the user is
+ * in the admin ACLs it returns TRUE, otherwise it returns FALSE.
+ *
+ *
+ * @param servletContext the servlet context.
+ * @param request the servlet request.
+ * @param response the servlet response.
+ * @return TRUE/FALSE based on the logic decribed above.
+ */
+ public static boolean isInstrumentationAccessAllowed(
+ ServletContext servletContext, HttpServletRequest request,
+ HttpServletResponse response) throws IOException {
+ Configuration conf =
+ (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+
+ boolean access = true;
+ boolean adminAccess = conf.getBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+ false);
+ if (adminAccess) {
+ access = hasAdministratorAccess(servletContext, request, response);
+ }
+ return access;
+ }
+
+ /**
+ * Does the user sending the HttpServletRequest has the administrator ACLs? If
+ * it isn't the case, response will be modified to send an error to the user.
+ *
+ * @param servletContext the {@link ServletContext} to use
+ * @param request the {@link HttpServletRequest} to check
+ * @param response used to send the error response if user does not have admin access.
+ * @return true if admin-authorized, false otherwise
+ * @throws IOException if an unauthenticated or unauthorized user tries to access the page
+ */
+ public static boolean hasAdministratorAccess(
+ ServletContext servletContext, HttpServletRequest request,
+ HttpServletResponse response) throws IOException {
+ Configuration conf =
+ (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+ AccessControlList acl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL);
+
+ return hasAdministratorAccess(conf, acl, request, response);
+ }
+
+ public static boolean hasAdministratorAccess(Configuration conf, AccessControlList acl,
+ HttpServletRequest request, HttpServletResponse response) throws IOException {
+ // If there is no authorization, anybody has administrator access.
+ if (!conf.getBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
+ return true;
+ }
+
+ String remoteUser = request.getRemoteUser();
+ if (remoteUser == null) {
+ response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
+ "Unauthenticated users are not " +
+ "authorized to access this page.");
+ return false;
+ }
+
+ if (acl != null && !userHasAdministratorAccess(acl, remoteUser)) {
+ response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
+ + remoteUser + " is unauthorized to access this page.");
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Get the admin ACLs from the given ServletContext and check if the given
+ * user is in the ACL.
+ *
+ * @param servletContext the context containing the admin ACL.
+ * @param remoteUser the remote user to check for.
+ * @return true if the user is present in the ACL, false if no ACL is set or
+ * the user is not present
+ */
+ public static boolean userHasAdministratorAccess(ServletContext servletContext,
+ String remoteUser) {
+ AccessControlList adminsAcl = (AccessControlList) servletContext
+ .getAttribute(ADMINS_ACL);
+ return userHasAdministratorAccess(adminsAcl, remoteUser);
+ }
+
+ public static boolean userHasAdministratorAccess(AccessControlList acl, String remoteUser) {
+ UserGroupInformation remoteUserUGI =
+ UserGroupInformation.createRemoteUser(remoteUser);
+ return acl != null && acl.isUserAllowed(remoteUserUGI);
+ }
+
+ /**
+ * A very simple servlet to serve up a text representation of the current
+ * stack traces. It both returns the stacks to the caller and logs them.
+ * Currently the stack traces are done sequentially rather than exactly the
+ * same data.
+ */
+ public static class StackServlet extends HttpServlet {
+ private static final long serialVersionUID = -6284183679759467039L;
+
+ @Override
+ public void doGet(HttpServletRequest request, HttpServletResponse response)
+ throws ServletException, IOException {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
+ return;
+ }
+ response.setContentType("text/plain; charset=UTF-8");
+ try (PrintStream out = new PrintStream(
+ response.getOutputStream(), false, "UTF-8")) {
+ Threads.printThreadInfo(out, "");
+ out.flush();
+ }
+ ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);
+ }
+ }
+
+ /**
+ * A Servlet input filter that quotes all HTML active characters in the
+ * parameter names and values. The goal is to quote the characters to make
+ * all of the servlets resistant to cross-site scripting attacks.
+ */
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+ public static class QuotingInputFilter implements Filter {
+ private FilterConfig config;
+
+ public static class RequestQuoter extends HttpServletRequestWrapper {
+ private final HttpServletRequest rawRequest;
+ public RequestQuoter(HttpServletRequest rawRequest) {
+ super(rawRequest);
+ this.rawRequest = rawRequest;
+ }
+
+ /**
+ * Return the set of parameter names, quoting each name.
+ */
+ @Override
+ public Enumeration getParameterNames() {
+ return new Enumeration() {
+ private Enumeration rawIterator =
+ rawRequest.getParameterNames();
+ @Override
+ public boolean hasMoreElements() {
+ return rawIterator.hasMoreElements();
+ }
+
+ @Override
+ public String nextElement() {
+ return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement());
+ }
+ };
+ }
+
+ /**
+ * Unquote the name and quote the value.
+ */
+ @Override
+ public String getParameter(String name) {
+ return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter(
+ HtmlQuoting.unquoteHtmlChars(name)));
+ }
+
+ @Override
+ public String[] getParameterValues(String name) {
+ String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
+ String[] unquoteValue = rawRequest.getParameterValues(unquoteName);
+ if (unquoteValue == null) {
+ return null;
+ }
+ String[] result = new String[unquoteValue.length];
+ for(int i=0; i < result.length; ++i) {
+ result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]);
+ }
+ return result;
+ }
+
+ @Override
+ public Map getParameterMap() {
+ Map result = new HashMap<>();
+ Map raw = rawRequest.getParameterMap();
+ for (Map.Entry item: raw.entrySet()) {
+ String[] rawValue = item.getValue();
+ String[] cookedValue = new String[rawValue.length];
+ for(int i=0; i< rawValue.length; ++i) {
+ cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]);
+ }
+ result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue);
+ }
+ return result;
+ }
+
+ /**
+ * Quote the url so that users specifying the HOST HTTP header
+ * can't inject attacks.
+ */
+ @Override
+ public StringBuffer getRequestURL(){
+ String url = rawRequest.getRequestURL().toString();
+ return new StringBuffer(HtmlQuoting.quoteHtmlChars(url));
+ }
+
+ /**
+ * Quote the server name so that users specifying the HOST HTTP header
+ * can't inject attacks.
+ */
+ @Override
+ public String getServerName() {
+ return HtmlQuoting.quoteHtmlChars(rawRequest.getServerName());
+ }
+ }
+
+ @Override
+ public void init(FilterConfig config) throws ServletException {
+ this.config = config;
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ @Override
+ public void doFilter(ServletRequest request,
+ ServletResponse response,
+ FilterChain chain
+ ) throws IOException, ServletException {
+ HttpServletRequestWrapper quoted =
+ new RequestQuoter((HttpServletRequest) request);
+ HttpServletResponse httpResponse = (HttpServletResponse) response;
+
+ String mime = inferMimeType(request);
+ if (mime == null) {
+ httpResponse.setContentType("text/plain; charset=utf-8");
+ } else if (mime.startsWith("text/html")) {
+ // HTML with unspecified encoding, we want to
+ // force HTML with utf-8 encoding
+ // This is to avoid the following security issue:
+ // http://openmya.hacker.jp/hasegawa/security/utf7cs.html
+ httpResponse.setContentType("text/html; charset=utf-8");
+ } else if (mime.startsWith("application/xml")) {
+ httpResponse.setContentType("text/xml; charset=utf-8");
+ }
+ chain.doFilter(quoted, httpResponse);
+ }
+
+ /**
+ * Infer the mime type for the response based on the extension of the request
+ * URI. Returns null if unknown.
+ */
+ private String inferMimeType(ServletRequest request) {
+ String path = ((HttpServletRequest)request).getRequestURI();
+ ServletContext context = config.getServletContext();
+ return context.getMimeType(path);
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServerUtil.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServerUtil.java
new file mode 100755
index 00000000..a043ba6c
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServerUtil.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.eclipse.jetty.security.ConstraintMapping;
+import org.eclipse.jetty.security.ConstraintSecurityHandler;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.util.security.Constraint;
+
+/**
+ * HttpServer utility.
+ */
+@InterfaceAudience.Private
+public final class HttpServerUtil {
+ /**
+ * Add constraints to a Jetty Context to disallow undesirable Http methods.
+ * @param ctxHandler The context to modify
+ * @param allowOptionsMethod if true then OPTIONS method will not be set in constraint mapping
+ */
+ public static void constrainHttpMethods(ServletContextHandler ctxHandler,
+ boolean allowOptionsMethod) {
+ Constraint c = new Constraint();
+ c.setAuthenticate(true);
+
+ ConstraintMapping cmt = new ConstraintMapping();
+ cmt.setConstraint(c);
+ cmt.setMethod("TRACE");
+ cmt.setPathSpec("/*");
+
+ ConstraintSecurityHandler securityHandler = new ConstraintSecurityHandler();
+
+ if (!allowOptionsMethod) {
+ ConstraintMapping cmo = new ConstraintMapping();
+ cmo.setConstraint(c);
+ cmo.setMethod("OPTIONS");
+ cmo.setPathSpec("/*");
+ securityHandler.setConstraintMappings(new ConstraintMapping[] { cmt, cmo });
+ } else {
+ securityHandler.setConstraintMappings(new ConstraintMapping[] { cmt });
+ }
+
+ ctxHandler.setSecurityHandler(securityHandler);
+ }
+
+ private HttpServerUtil() {}
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/InfoServer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/InfoServer.java
new file mode 100755
index 00000000..33079740
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/InfoServer.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+import java.net.URI;
+
+/**
+ * Create a Jetty embedded server to answer http requests. The primary goal
+ * is to serve up status information for the server.
+ * There are three contexts:
+ * "/stacks/" -> points to stack trace
+ * "/static/" -> points to common static files (src/hbase-webapps/static)
+ * "/" -> the jsp server code from (src/hbase-webapps/<name>)
+ */
+@InterfaceAudience.Private
+public class InfoServer {
+ private static final String HBASE_APP_DIR = "hbase-webapps";
+ private final HttpServer httpServer;
+
+ /**
+ * Create a status server on the given port.
+ * The jsp scripts are taken from src/hbase-webapps/name.
+ * @param name The name of the server
+ * @param bindAddress address to bind to
+ * @param port The port to use on the server
+ * @param findPort whether the server should start at the given port and increment by 1 until it
+ * finds a free port.
+ * @param c the {@link Configuration} to build the server
+ * @throws IOException if getting one of the password fails or the server cannot be created
+ */
+ public InfoServer(String name, String bindAddress, int port, boolean findPort,
+ final Configuration c) throws IOException {
+ HttpConfig httpConfig = new HttpConfig(c);
+ HttpServer.Builder builder =
+ new HttpServer.Builder();
+
+ builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() +
+ bindAddress + ":" +
+ port)).setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c);
+ String logDir = System.getProperty("hbase.log.dir");
+ if (logDir != null) {
+ builder.setLogDir(logDir);
+ }
+ if (httpConfig.isSecure()) {
+ builder.keyPassword(HBaseConfiguration
+ .getPassword(c, "ssl.server.keystore.keypassword", null))
+ .keyStore(c.get("ssl.server.keystore.location"),
+ HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null),
+ c.get("ssl.server.keystore.type", "jks"))
+ .trustStore(c.get("ssl.server.truststore.location"),
+ HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null),
+ c.get("ssl.server.truststore.type", "jks"));
+ }
+ // Enable SPNEGO authentication
+ if ("kerberos".equalsIgnoreCase(c.get(HttpServer.HTTP_UI_AUTHENTICATION, null))) {
+ builder.setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY)
+ .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY)
+ .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY)
+ .setSignatureSecretFileKey(
+ HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY)
+ .setSecurityEnabled(true);
+
+ // Set an admin ACL on sensitive webUI endpoints
+ AccessControlList acl = buildAdminAcl(c);
+ builder.setACL(acl);
+ }
+ this.httpServer = builder.build();
+ }
+
+ /**
+ * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI
+ * which are meant only for administrators.
+ */
+ AccessControlList buildAdminAcl(Configuration conf) {
+ final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null);
+ final String adminGroups = conf.get(
+ HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null);
+ if (userGroups == null && adminGroups == null) {
+ // Backwards compatibility - if the user doesn't have anything set, allow all users in.
+ return new AccessControlList("*", null);
+ }
+ return new AccessControlList(userGroups, adminGroups);
+ }
+
+ /**
+ * Explicitly invoke {@link #addPrivilegedServlet(String, String, Class)} or
+ * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method.
+ * This method will add a servlet which any authenticated user can access.
+ *
+ * @deprecated Use {@link #addUnprivilegedServlet(String, String, Class)} or
+ * {@link #addPrivilegedServlet(String, String, Class)} instead of this
+ * method which does not state outwardly what kind of authz rules will
+ * be applied to this servlet.
+ */
+ @Deprecated
+ public void addServlet(String name, String pathSpec,
+ Class extends HttpServlet> clazz) {
+ addUnprivilegedServlet(name, pathSpec, clazz);
+ }
+
+ /**
+ * @see HttpServer#addUnprivilegedServlet(String, String, Class)
+ */
+ public void addUnprivilegedServlet(String name, String pathSpec,
+ Class extends HttpServlet> clazz) {
+ this.httpServer.addUnprivilegedServlet(name, pathSpec, clazz);
+ }
+
+ /**
+ * @see HttpServer#addPrivilegedServlet(String, String, Class)
+ */
+ public void addPrivilegedServlet(String name, String pathSpec,
+ Class extends HttpServlet> clazz) {
+ this.httpServer.addPrivilegedServlet(name, pathSpec, clazz);
+ }
+
+ public void setAttribute(String name, Object value) {
+ this.httpServer.setAttribute(name, value);
+ }
+
+ public void start() throws IOException {
+ this.httpServer.start();
+ }
+
+ /**
+ * @return the port of the info server
+ * @deprecated Since 0.99.0
+ */
+ @Deprecated
+ public int getPort() {
+ return this.httpServer.getPort();
+ }
+
+ public void stop() throws Exception {
+ this.httpServer.stop();
+ }
+
+
+ /**
+ * Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled,
+ * and the requesting user is defined as an administrator. If the UI is set to readonly, this
+ * method always returns false.
+ */
+ public static boolean canUserModifyUI(
+ HttpServletRequest req, ServletContext ctx, Configuration conf) {
+ if (conf.getBoolean("hbase.master.ui.readonly", false)) {
+ return false;
+ }
+ String remoteUser = req.getRemoteUser();
+ if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) &&
+ conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) &&
+ remoteUser != null) {
+ return HttpServer.userHasAdministratorAccess(ctx, remoteUser);
+ }
+ return false;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileOutputServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileOutputServlet.java
new file mode 100755
index 00000000..c867b727
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileOutputServlet.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.eclipse.jetty.servlet.DefaultServlet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.IOException;
+import java.util.regex.Pattern;
+
+/**
+ * Servlet to serve files generated by {@link ProfileServlet}
+ */
+@InterfaceAudience.Private
+public class ProfileOutputServlet extends DefaultServlet {
+ private static final long serialVersionUID = 1L;
+ private static final Logger LOG = LoggerFactory.getLogger(ProfileOutputServlet.class);
+ private static final int REFRESH_PERIOD = 2;
+ // Alphanumeric characters, plus percent (url-encoding), equals, ampersand, dot and hyphen
+ private static final Pattern ALPHA_NUMERIC = Pattern.compile("[a-zA-Z0-9%=&.\\-]*");
+
+ @Override
+ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
+ throws ServletException, IOException {
+ String absoluteDiskPath = getServletContext().getRealPath(req.getPathInfo());
+ File requestedFile = new File(absoluteDiskPath);
+ // async-profiler version 1.4 writes 'Started [cpu] profiling' to output file when profiler is
+ // running which gets replaced by final output. If final output is not ready yet, the file size
+ // will be <100 bytes (in all modes).
+ if (requestedFile.length() < 100) {
+ LOG.info(requestedFile + " is incomplete. Sending auto-refresh header.");
+ String refreshUrl = req.getRequestURI();
+ // Rebuild the query string (if we have one)
+ if (req.getQueryString() != null) {
+ refreshUrl += "?" + sanitize(req.getQueryString());
+ }
+ ProfileServlet.setResponseHeader(resp);
+ resp.setHeader("Refresh", REFRESH_PERIOD + ";" + refreshUrl);
+ resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD +
+ " seconds until the output file is ready. Redirecting to " + refreshUrl);
+ } else {
+ super.doGet(req, resp);
+ }
+ }
+
+ static String sanitize(String input) {
+ // Basic test to try to avoid any XSS attacks or HTML content showing up.
+ // Duplicates HtmlQuoting a little, but avoid destroying ampersand.
+ if (ALPHA_NUMERIC.matcher(input).matches()) {
+ return input;
+ }
+ throw new RuntimeException("Non-alphanumeric data found in input, aborting.");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileServlet.java
new file mode 100755
index 00000000..1cb3d68a
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileServlet.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.hbase.rest.util.ProcessUtils;
+import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * Servlet that runs async-profiler as web-endpoint.
+ * Following options from async-profiler can be specified as query paramater.
+ * // -e event profiling event: cpu|alloc|lock|cache-misses etc.
+ * // -d duration run profiling for 'duration' seconds (integer)
+ * // -i interval sampling interval in nanoseconds (long)
+ * // -j jstackdepth maximum Java stack depth (integer)
+ * // -b bufsize frame buffer size (long)
+ * // -t profile different threads separately
+ * // -s simple class names instead of FQN
+ * // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr
+ * // --width px SVG width pixels (integer)
+ * // --height px SVG frame height pixels (integer)
+ * // --minwidth px skip frames smaller than px (double)
+ * // --reverse generate stack-reversed FlameGraph / Call tree
+ * Example:
+ * - To collect 30 second CPU profile of current process (returns FlameGraph svg)
+ * curl "http://localhost:10002/prof"
+ * - To collect 1 minute CPU profile of current process and output in tree format (html)
+ * curl "http://localhost:10002/prof?output=tree&duration=60"
+ * - To collect 30 second heap allocation profile of current process (returns FlameGraph svg)
+ * curl "http://localhost:10002/prof?event=alloc"
+ * - To collect lock contention profile of current process (returns FlameGraph svg)
+ * curl "http://localhost:10002/prof?event=lock"
+ * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events)
+ * // Perf events:
+ * // cpu
+ * // page-faults
+ * // context-switches
+ * // cycles
+ * // instructions
+ * // cache-references
+ * // cache-misses
+ * // branches
+ * // branch-misses
+ * // bus-cycles
+ * // L1-dcache-load-misses
+ * // LLC-load-misses
+ * // dTLB-load-misses
+ * // mem:breakpoint
+ * // trace:tracepoint
+ * // Java events:
+ * // alloc
+ * // lock
+ */
+@InterfaceAudience.Private
+public class ProfileServlet extends HttpServlet {
+
+ private static final long serialVersionUID = 1L;
+ private static final Logger LOG = LoggerFactory.getLogger(ProfileServlet.class);
+
+ private static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods";
+ private static final String ALLOWED_METHODS = "GET";
+ private static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin";
+ private static final String CONTENT_TYPE_TEXT = "text/plain; charset=utf-8";
+ private static final String ASYNC_PROFILER_HOME_ENV = "ASYNC_PROFILER_HOME";
+ private static final String ASYNC_PROFILER_HOME_SYSTEM_PROPERTY = "async.profiler.home";
+ private static final String PROFILER_SCRIPT = "/profiler.sh";
+ private static final int DEFAULT_DURATION_SECONDS = 10;
+ private static final AtomicInteger ID_GEN = new AtomicInteger(0);
+ static final String OUTPUT_DIR = System.getProperty("java.io.tmpdir") + "/prof-output";
+
+ enum Event {
+ CPU("cpu"),
+ ALLOC("alloc"),
+ LOCK("lock"),
+ PAGE_FAULTS("page-faults"),
+ CONTEXT_SWITCHES("context-switches"),
+ CYCLES("cycles"),
+ INSTRUCTIONS("instructions"),
+ CACHE_REFERENCES("cache-references"),
+ CACHE_MISSES("cache-misses"),
+ BRANCHES("branches"),
+ BRANCH_MISSES("branch-misses"),
+ BUS_CYCLES("bus-cycles"),
+ L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"),
+ LLC_LOAD_MISSES("LLC-load-misses"),
+ DTLB_LOAD_MISSES("dTLB-load-misses"),
+ MEM_BREAKPOINT("mem:breakpoint"),
+ TRACE_TRACEPOINT("trace:tracepoint"),;
+
+ private final String internalName;
+
+ Event(final String internalName) {
+ this.internalName = internalName;
+ }
+
+ public String getInternalName() {
+ return internalName;
+ }
+
+ public static Event fromInternalName(final String name) {
+ for (Event event : values()) {
+ if (event.getInternalName().equalsIgnoreCase(name)) {
+ return event;
+ }
+ }
+
+ return null;
+ }
+ }
+
+ enum Output {
+ SUMMARY,
+ TRACES,
+ FLAT,
+ COLLAPSED,
+ SVG,
+ TREE,
+ JFR
+ }
+
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED",
+ justification = "This class is never serialized nor restored.")
+ private transient Lock profilerLock = new ReentrantLock();
+ private transient volatile Process process;
+ private String asyncProfilerHome;
+ private Integer pid;
+
+ public ProfileServlet() {
+ this.asyncProfilerHome = getAsyncProfilerHome();
+ this.pid = ProcessUtils.getPid();
+ LOG.info("Servlet process PID: " + pid + " asyncProfilerHome: " + asyncProfilerHome);
+ }
+
+ @Override
+ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
+ throws IOException {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), req, resp)) {
+ resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+ setResponseHeader(resp);
+ resp.getWriter().write("Unauthorized: Instrumentation access is not allowed!");
+ return;
+ }
+
+ // make sure async profiler home is set
+ if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ setResponseHeader(resp);
+ resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" +
+ "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" +
+ "environment is properly configured. For more information please see\n" +
+ "http://hbase.apache.org/book.html#profiler\n");
+ return;
+ }
+
+ // if pid is explicitly specified, use it else default to current process
+ pid = getInteger(req, "pid", pid);
+
+ // if pid is not specified in query param and if current process pid cannot be determined
+ if (pid == null) {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ setResponseHeader(resp);
+ resp.getWriter().write(
+ "'pid' query parameter unspecified or unable to determine PID of current process.");
+ return;
+ }
+
+ final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS);
+ final Output output = getOutput(req);
+ final Event event = getEvent(req);
+ final Long interval = getLong(req, "interval");
+ final Integer jstackDepth = getInteger(req, "jstackdepth", null);
+ final Long bufsize = getLong(req, "bufsize");
+ final boolean thread = req.getParameterMap().containsKey("thread");
+ final boolean simple = req.getParameterMap().containsKey("simple");
+ final Integer width = getInteger(req, "width", null);
+ final Integer height = getInteger(req, "height", null);
+ final Double minwidth = getMinWidth(req);
+ final boolean reverse = req.getParameterMap().containsKey("reverse");
+
+ if (process == null || !process.isAlive()) {
+ try {
+ int lockTimeoutSecs = 3;
+ if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) {
+ try {
+ File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" +
+ event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." +
+ output.name().toLowerCase());
+ List cmd = new ArrayList<>();
+ cmd.add(asyncProfilerHome + PROFILER_SCRIPT);
+ cmd.add("-e");
+ cmd.add(event.getInternalName());
+ cmd.add("-d");
+ cmd.add("" + duration);
+ cmd.add("-o");
+ cmd.add(output.name().toLowerCase());
+ cmd.add("-f");
+ cmd.add(outputFile.getAbsolutePath());
+ if (interval != null) {
+ cmd.add("-i");
+ cmd.add(interval.toString());
+ }
+ if (jstackDepth != null) {
+ cmd.add("-j");
+ cmd.add(jstackDepth.toString());
+ }
+ if (bufsize != null) {
+ cmd.add("-b");
+ cmd.add(bufsize.toString());
+ }
+ if (thread) {
+ cmd.add("-t");
+ }
+ if (simple) {
+ cmd.add("-s");
+ }
+ if (width != null) {
+ cmd.add("--width");
+ cmd.add(width.toString());
+ }
+ if (height != null) {
+ cmd.add("--height");
+ cmd.add(height.toString());
+ }
+ if (minwidth != null) {
+ cmd.add("--minwidth");
+ cmd.add(minwidth.toString());
+ }
+ if (reverse) {
+ cmd.add("--reverse");
+ }
+ cmd.add(pid.toString());
+ process = ProcessUtils.runCmdAsync(cmd);
+
+ // set response and set refresh header to output location
+ setResponseHeader(resp);
+ resp.setStatus(HttpServletResponse.SC_ACCEPTED);
+ String relativeUrl = "/prof-output/" + outputFile.getName();
+ resp.getWriter().write(
+ "Started [" + event.getInternalName() +
+ "] profiling. This page will automatically redirect to " +
+ relativeUrl + " after " + duration + " seconds. " +
+ "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " +
+ "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." +
+ "\n\nCommand:\n" +
+ Joiner.on(" ").join(cmd));
+
+ // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified
+ // via url param
+ int refreshDelay = getInteger(req, "refreshDelay", 0);
+
+ // instead of sending redirect, set auto-refresh so that browsers will refresh
+ // with redirected url
+ resp.setHeader("Refresh", (duration + refreshDelay) + ";" + relativeUrl);
+ resp.getWriter().flush();
+ } finally {
+ profilerLock.unlock();
+ }
+ } else {
+ setResponseHeader(resp);
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ resp.getWriter().write(
+ "Unable to acquire lock. Another instance of profiler might be running.");
+ LOG.warn("Unable to acquire lock in " + lockTimeoutSecs +
+ " seconds. Another instance of profiler might be running.");
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted while acquiring profile lock.", e);
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ }
+ } else {
+ setResponseHeader(resp);
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ resp.getWriter().write("Another instance of profiler is already running.");
+ }
+ }
+
+ private Integer getInteger(final HttpServletRequest req, final String param,
+ final Integer defaultValue) {
+ final String value = req.getParameter(param);
+ if (value != null) {
+ try {
+ return Integer.valueOf(value);
+ } catch (NumberFormatException e) {
+ return defaultValue;
+ }
+ }
+ return defaultValue;
+ }
+
+ private Long getLong(final HttpServletRequest req, final String param) {
+ final String value = req.getParameter(param);
+ if (value != null) {
+ try {
+ return Long.valueOf(value);
+ } catch (NumberFormatException e) {
+ return null;
+ }
+ }
+ return null;
+ }
+
+ private Double getMinWidth(final HttpServletRequest req) {
+ final String value = req.getParameter("minwidth");
+ if (value != null) {
+ try {
+ return Double.valueOf(value);
+ } catch (NumberFormatException e) {
+ return null;
+ }
+ }
+ return null;
+ }
+
+ private Event getEvent(final HttpServletRequest req) {
+ final String eventArg = req.getParameter("event");
+ if (eventArg != null) {
+ Event event = Event.fromInternalName(eventArg);
+ return event == null ? Event.CPU : event;
+ }
+ return Event.CPU;
+ }
+
+ private Output getOutput(final HttpServletRequest req) {
+ final String outputArg = req.getParameter("output");
+ if (req.getParameter("output") != null) {
+ try {
+ return Output.valueOf(outputArg.trim().toUpperCase());
+ } catch (IllegalArgumentException e) {
+ return Output.SVG;
+ }
+ }
+ return Output.SVG;
+ }
+
+ static void setResponseHeader(final HttpServletResponse response) {
+ response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS);
+ response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
+ response.setContentType(CONTENT_TYPE_TEXT);
+ }
+
+ static String getAsyncProfilerHome() {
+ String asyncProfilerHome = System.getenv(ASYNC_PROFILER_HOME_ENV);
+ // if ENV is not set, see if -Dasync.profiler.home=/path/to/async/profiler/home is set
+ if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) {
+ asyncProfilerHome = System.getProperty(ASYNC_PROFILER_HOME_SYSTEM_PROPERTY);
+ }
+
+ return asyncProfilerHome;
+ }
+
+ public static class DisabledServlet extends HttpServlet {
+
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
+ throws IOException {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ setResponseHeader(resp);
+ resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" +
+ "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" +
+ "environment is properly configured. For more information please see\n" +
+ "http://hbase.apache.org/book.html#profiler\n");
+ return;
+ }
+
+ }
+
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProxyUserAuthenticationFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProxyUserAuthenticationFilter.java
new file mode 100755
index 00000000..55e774a2
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProxyUserAuthenticationFilter.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.security.Principal;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * This file has been copied directly (changing only the package name and and the ASF license
+ * text format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase
+ * depends on doesn't have it yet
+ * (as of 2020 Apr 24, there is no Hadoop release that has it either).
+ *
+ * Hadoop version:
+ * unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49
+ *
+ * Haddop path:
+ * hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/
+ * server/ProxyUserAuthenticationFilter.java
+ *
+ * AuthenticationFilter which adds support to perform operations
+ * using end user instead of proxy user. Fetches the end user from
+ * doAs Query Parameter.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ProxyUserAuthenticationFilter extends AuthenticationFilter {
+
+ private static final Logger LOG = LoggerFactory.getLogger(
+ ProxyUserAuthenticationFilter.class);
+
+ private static final String DO_AS = "doas";
+ public static final String PROXYUSER_PREFIX = "proxyuser";
+
+ @Override
+ public void init(FilterConfig filterConfig) throws ServletException {
+ Configuration conf = getProxyuserConfiguration(filterConfig);
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
+ super.init(filterConfig);
+ }
+
+ @Override
+ protected void doFilter(FilterChain filterChain, HttpServletRequest request,
+ HttpServletResponse response) throws IOException, ServletException {
+ final HttpServletRequest lowerCaseRequest = toLowerCase(request);
+ String doAsUser = lowerCaseRequest.getParameter(DO_AS);
+
+ if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) {
+ LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ",
+ doAsUser, request.getRemoteUser(), request.getRemoteAddr());
+ UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ?
+ UserGroupInformation.createRemoteUser(request.getRemoteUser())
+ : null;
+ if (requestUgi != null) {
+ requestUgi = UserGroupInformation.createProxyUser(doAsUser,
+ requestUgi);
+ try {
+ ProxyUsers.authorize(requestUgi, request.getRemoteAddr());
+
+ final UserGroupInformation ugiF = requestUgi;
+ request = new HttpServletRequestWrapper(request) {
+ @Override
+ public String getRemoteUser() {
+ return ugiF.getShortUserName();
+ }
+
+ @Override
+ public Principal getUserPrincipal() {
+ return new Principal() {
+ @Override
+ public String getName() {
+ return ugiF.getUserName();
+ }
+ };
+ }
+ };
+ LOG.debug("Proxy user Authentication successful");
+ } catch (AuthorizationException ex) {
+ HttpExceptionUtils.createServletExceptionResponse(response,
+ HttpServletResponse.SC_FORBIDDEN, ex);
+ LOG.warn("Proxy user Authentication exception", ex);
+ return;
+ }
+ }
+ }
+ super.doFilter(filterChain, request, response);
+ }
+
+ protected Configuration getProxyuserConfiguration(FilterConfig filterConfig)
+ throws ServletException {
+ Configuration conf = new Configuration(false);
+ Enumeration> names = filterConfig.getInitParameterNames();
+ while (names.hasMoreElements()) {
+ String name = (String) names.nextElement();
+ if (name.startsWith(PROXYUSER_PREFIX + ".")) {
+ String value = filterConfig.getInitParameter(name);
+ conf.set(name, value);
+ }
+ }
+ return conf;
+ }
+
+ static boolean containsUpperCase(final Iterable strings) {
+ for(String s : strings) {
+ for(int i = 0; i < s.length(); i++) {
+ if (Character.isUpperCase(s.charAt(i))) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public static HttpServletRequest toLowerCase(
+ final HttpServletRequest request) {
+ @SuppressWarnings("unchecked")
+ final Map original = (Map)
+ request.getParameterMap();
+ if (!containsUpperCase(original.keySet())) {
+ return request;
+ }
+
+ final Map> m = new HashMap>();
+ for (Map.Entry entry : original.entrySet()) {
+ final String key = StringUtils.toLowerCase(entry.getKey());
+ List strings = m.get(key);
+ if (strings == null) {
+ strings = new ArrayList();
+ m.put(key, strings);
+ }
+ for (String v : entry.getValue()) {
+ strings.add(v);
+ }
+ }
+
+ return new HttpServletRequestWrapper(request) {
+ private Map parameters = null;
+
+ @Override
+ public Map getParameterMap() {
+ if (parameters == null) {
+ parameters = new HashMap();
+ for (Map.Entry> entry : m.entrySet()) {
+ final List a = entry.getValue();
+ parameters.put(entry.getKey(), a.toArray(new String[a.size()]));
+ }
+ }
+ return parameters;
+ }
+
+ @Override
+ public String getParameter(String name) {
+ final List a = m.get(name);
+ return a == null ? null : a.get(0);
+ }
+
+ @Override
+ public String[] getParameterValues(String name) {
+ return getParameterMap().get(name);
+ }
+
+ @Override
+ public Enumeration getParameterNames() {
+ final Iterator i = m.keySet().iterator();
+ return new Enumeration() {
+ @Override
+ public boolean hasMoreElements() {
+ return i.hasNext();
+ }
+
+ @Override
+ public String nextElement() {
+ return i.next();
+ }
+ };
+ }
+ };
+ }
+
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/SecurityHeadersFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/SecurityHeadersFilter.java
new file mode 100755
index 00000000..b2cddc0c
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/SecurityHeadersFilter.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class SecurityHeadersFilter implements Filter {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SecurityHeadersFilter.class);
+ private static final String DEFAULT_HSTS = "";
+ private static final String DEFAULT_CSP = "";
+ private FilterConfig filterConfig;
+
+ @Override
+ public void init(FilterConfig filterConfig) throws ServletException {
+ this.filterConfig = filterConfig;
+ LOG.info("Added security headers filter");
+ }
+
+ @Override
+ public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
+ throws IOException, ServletException {
+ HttpServletResponse httpResponse = (HttpServletResponse) response;
+ httpResponse.addHeader("X-Content-Type-Options", "nosniff");
+ httpResponse.addHeader("X-XSS-Protection", "1; mode=block");
+ String hsts = filterConfig.getInitParameter("hsts");
+ if (StringUtils.isNotBlank(hsts)) {
+ httpResponse.addHeader("Strict-Transport-Security", hsts);
+ }
+ String csp = filterConfig.getInitParameter("csp");
+ if (StringUtils.isNotBlank(csp)) {
+ httpResponse.addHeader("Content-Security-Policy", csp);
+ }
+ chain.doFilter(request, response);
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ public static Map getDefaultParameters(Configuration conf) {
+ Map params = new HashMap<>();
+ params.put("hsts", conf.get("hbase.http.filter.hsts.value",
+ DEFAULT_HSTS));
+ params.put("csp", conf.get("hbase.http.filter.csp.value",
+ DEFAULT_CSP));
+ return params;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ServerConfigurationKeys.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ServerConfigurationKeys.java
new file mode 100755
index 00000000..8b8f9aeb
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ServerConfigurationKeys.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+/**
+ * This interface contains constants for configuration keys used
+ * in the hbase http server code.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface ServerConfigurationKeys {
+
+ /** Enable/Disable ssl for http server */
+ public static final String HBASE_SSL_ENABLED_KEY = "hbase.ssl.enabled";
+
+ public static final boolean HBASE_SSL_ENABLED_DEFAULT = false;
+
+ /** Enable/Disable aliases serving from jetty */
+ public static final String HBASE_JETTY_LOGS_SERVE_ALIASES =
+ "hbase.jetty.logs.serve.aliases";
+
+ public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES =
+ true;
+
+ public static final String HBASE_HTTP_STATIC_USER = "hbase.http.staticuser.user";
+
+ public static final String DEFAULT_HBASE_HTTP_STATIC_USER = "dr.stack";
+
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/conf/ConfServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/conf/ConfServlet.java
new file mode 100755
index 00000000..f3c69f3b
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/conf/ConfServlet.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.rest.http.HttpServer;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.Writer;
+
+/**
+ * A servlet to print out the running configuration data.
+ */
+@InterfaceAudience.LimitedPrivate({"HBase"})
+@InterfaceStability.Unstable
+public class ConfServlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+
+ private static final String FORMAT_JSON = "json";
+ private static final String FORMAT_XML = "xml";
+ private static final String FORMAT_PARAM = "format";
+
+ /**
+ * Return the Configuration of the daemon hosting this servlet.
+ * This is populated when the HttpServer starts.
+ */
+ private Configuration getConfFromContext() {
+ Configuration conf = (Configuration)getServletContext().getAttribute(
+ HttpServer.CONF_CONTEXT_ATTRIBUTE);
+ assert conf != null;
+ return conf;
+ }
+
+ @Override
+ public void doGet(HttpServletRequest request, HttpServletResponse response)
+ throws ServletException, IOException {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
+ return;
+ }
+
+ String format = request.getParameter(FORMAT_PARAM);
+ if (null == format) {
+ format = FORMAT_XML;
+ }
+
+ if (FORMAT_XML.equals(format)) {
+ response.setContentType("text/xml; charset=utf-8");
+ } else if (FORMAT_JSON.equals(format)) {
+ response.setContentType("application/json; charset=utf-8");
+ }
+
+ Writer out = response.getWriter();
+ try {
+ writeResponse(getConfFromContext(), out, format);
+ } catch (BadFormatException bfe) {
+ response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage());
+ }
+ out.close();
+ }
+
+ /**
+ * Guts of the servlet - extracted for easy testing.
+ */
+ static void writeResponse(Configuration conf, Writer out, String format)
+ throws IOException, BadFormatException {
+ if (FORMAT_JSON.equals(format)) {
+ Configuration.dumpConfiguration(conf, out);
+ } else if (FORMAT_XML.equals(format)) {
+ conf.writeXml(out);
+ } else {
+ throw new BadFormatException("Bad format: " + format);
+ }
+ }
+
+ public static class BadFormatException extends Exception {
+ private static final long serialVersionUID = 1L;
+
+ public BadFormatException(String msg) {
+ super(msg);
+ }
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/jmx/JMXJsonServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/jmx/JMXJsonServlet.java
new file mode 100755
index 00000000..4b93ab5e
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/jmx/JMXJsonServlet.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.http.jmx;
+
+import org.apache.hadoop.hbase.rest.http.HttpServer;
+import org.apache.hadoop.hbase.rest.util.JSONBean;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.MBeanServer;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.openmbean.CompositeData;
+import javax.management.openmbean.TabularData;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.lang.management.ManagementFactory;
+
+/*
+ * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has
+ * been rewritten to be read only and to output in a JSON format so it is not
+ * really that close to the original.
+ */
+
+/**
+ * Provides Read only web access to JMX.
+ *
+ * This servlet generally will be placed under the /jmx URL for each
+ * HttpServer. It provides read only
+ * access to JMX metrics. The optional qry parameter
+ * may be used to query only a subset of the JMX Beans. This query
+ * functionality is provided through the
+ * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)}
+ * method.
+ *
+ *
+ * For example http://.../jmx?qry=Hadoop:* will return
+ * all hadoop metrics exposed through JMX.
+ *
+ *
+ * The optional get parameter is used to query an specific
+ * attribute of a JMX bean. The format of the URL is
+ * http://.../jmx?get=MXBeanName::AttributeName
+ *
+ *
+ * For example
+ *
+ * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
+ * will return the cluster id of the namenode mxbean.
+ *
+ *
+ * If the qry or the get parameter is not formatted
+ * correctly then a 400 BAD REQUEST http response code will be returned.
+ *
+ *
+ * If a resouce such as a mbean or attribute can not be found,
+ * a 404 SC_NOT_FOUND http response code will be returned.
+ *
+ * The servlet attempts to convert the the JMXBeans into JSON. Each
+ * bean's attributes will be converted to a JSON object member.
+ *
+ * If the attribute is a boolean, a number, a string, or an array
+ * it will be converted to the JSON equivalent.
+ *
+ * If the value is a {@link CompositeData} then it will be converted
+ * to a JSON object with the keys as the name of the JSON member and
+ * the value is converted following these same rules.
+ *
+ * If the value is a {@link TabularData} then it will be converted
+ * to an array of the {@link CompositeData} elements that it contains.
+ *
+ * All other objects will be converted to a string and output as such.
+ *
+ * The bean's name and modelerType will be returned for all beans.
+ *
+ * Optional paramater "callback" should be used to deliver JSONP response.
+ *
+ *
+ */
+@InterfaceAudience.Private
+public class JMXJsonServlet extends HttpServlet {
+ private static final Logger LOG = LoggerFactory.getLogger(
+ JMXJsonServlet.class);
+
+ private static final long serialVersionUID = 1L;
+
+ private static final String CALLBACK_PARAM = "callback";
+ /**
+ * If query string includes 'description', then we will emit bean and attribute descriptions to
+ * output IFF they are not null and IFF the description is not the same as the attribute name:
+ * i.e. specify a URL like so: /jmx?description=true
+ */
+ private static final String INCLUDE_DESCRIPTION = "description";
+
+ /**
+ * MBean server.
+ */
+ protected transient MBeanServer mBeanServer;
+
+ protected transient JSONBean jsonBeanWriter;
+
+ /**
+ * Initialize this servlet.
+ */
+ @Override
+ public void init() throws ServletException {
+ // Retrieve the MBean server
+ mBeanServer = ManagementFactory.getPlatformMBeanServer();
+ this.jsonBeanWriter = new JSONBean();
+ }
+
+ /**
+ * Process a GET request for the specified resource.
+ *
+ * @param request
+ * The servlet request we are processing
+ * @param response
+ * The servlet response we are creating
+ */
+ @Override
+ public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException {
+ try {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) {
+ return;
+ }
+ String jsonpcb = null;
+ PrintWriter writer = null;
+ JSONBean.Writer beanWriter = null;
+ try {
+ jsonpcb = checkCallbackName(request.getParameter(CALLBACK_PARAM));
+ writer = response.getWriter();
+
+ // "callback" parameter implies JSONP outpout
+ if (jsonpcb != null) {
+ response.setContentType("application/javascript; charset=utf8");
+ writer.write(jsonpcb + "(");
+ } else {
+ response.setContentType("application/json; charset=utf8");
+ }
+ beanWriter = this.jsonBeanWriter.open(writer);
+ // Should we output description on each attribute and bean?
+ String tmpStr = request.getParameter(INCLUDE_DESCRIPTION);
+ boolean description = tmpStr != null && tmpStr.length() > 0;
+
+ // query per mbean attribute
+ String getmethod = request.getParameter("get");
+ if (getmethod != null) {
+ String[] splitStrings = getmethod.split("\\:\\:");
+ if (splitStrings.length != 2) {
+ beanWriter.write("result", "ERROR");
+ beanWriter.write("message", "query format is not as expected.");
+ beanWriter.flush();
+ response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+ return;
+ }
+ if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]),
+ splitStrings[1], description) != 0) {
+ beanWriter.flush();
+ response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+ }
+ return;
+ }
+
+ // query per mbean
+ String qry = request.getParameter("qry");
+ if (qry == null) {
+ qry = "*:*";
+ }
+ if (beanWriter.write(this.mBeanServer, new ObjectName(qry), null, description) != 0) {
+ beanWriter.flush();
+ response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+ }
+ } finally {
+ if (beanWriter != null) {
+ beanWriter.close();
+ }
+ if (jsonpcb != null) {
+ writer.write(");");
+ }
+ if (writer != null) {
+ writer.close();
+ }
+ }
+ } catch (IOException e) {
+ LOG.error("Caught an exception while processing JMX request", e);
+ response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ } catch (MalformedObjectNameException e) {
+ LOG.error("Caught an exception while processing JMX request", e);
+ response.sendError(HttpServletResponse.SC_BAD_REQUEST);
+ }
+ }
+
+ /**
+ * Verifies that the callback property, if provided, is purely alphanumeric.
+ * This prevents a malicious callback name (that is javascript code) from being
+ * returned by the UI to an unsuspecting user.
+ *
+ * @param callbackName The callback name, can be null.
+ * @return The callback name
+ * @throws IOException If the name is disallowed.
+ */
+ private String checkCallbackName(String callbackName) throws IOException {
+ if (null == callbackName) {
+ return null;
+ }
+ if (callbackName.matches("[A-Za-z0-9_]+")) {
+ return callbackName;
+ }
+ throw new IOException("'callback' must be alphanumeric");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/Log4jUtils.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/Log4jUtils.java
new file mode 100755
index 00000000..368167e1
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/Log4jUtils.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http.log;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Set;
+
+/**
+ * A bridge class for operating on log4j, such as changing log level, etc.
+ *
+ * Will call the methods in {@link InternalLog4jUtils} to actually operate on the log4j stuff.
+ */
+@InterfaceAudience.Private
+public final class Log4jUtils {
+
+ private static final String INTERNAL_UTILS_CLASS_NAME =
+ "org.apache.hadoop.hbase.logging.InternalLog4jUtils";
+
+ private Log4jUtils() {
+ }
+
+ // load class when calling to avoid introducing class not found exception on log4j when loading
+ // this class even without calling any of the methods below.
+ private static Method getMethod(String methodName, Class>... args) {
+ try {
+ Class> clazz = Class.forName(INTERNAL_UTILS_CLASS_NAME);
+ return clazz.getDeclaredMethod(methodName, args);
+ } catch (ClassNotFoundException | NoSuchMethodException e) {
+ throw new AssertionError("should not happen", e);
+ }
+ }
+
+ private static void throwUnchecked(Throwable throwable) {
+ if (throwable instanceof RuntimeException) {
+ throw (RuntimeException) throwable;
+ }
+ if (throwable instanceof Error) {
+ throw (Error) throwable;
+ }
+ }
+
+ public static void setLogLevel(String loggerName, String levelName) {
+ Method method = getMethod("setLogLevel", String.class, String.class);
+ try {
+ method.invoke(null, loggerName, levelName);
+ } catch (IllegalAccessException e) {
+ throw new AssertionError("should not happen", e);
+ } catch (InvocationTargetException e) {
+ throwUnchecked(e.getCause());
+ throw new AssertionError("should not happen", e.getCause());
+ }
+ }
+
+ public static String getEffectiveLevel(String loggerName) {
+ Method method = getMethod("getEffectiveLevel", String.class);
+ try {
+ return (String) method.invoke(null, loggerName);
+ } catch (IllegalAccessException e) {
+ throw new AssertionError("should not happen", e);
+ } catch (InvocationTargetException e) {
+ throwUnchecked(e.getCause());
+ throw new AssertionError("should not happen", e.getCause());
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public static Set getActiveLogFiles() throws IOException {
+ Method method = getMethod("getActiveLogFiles");
+ try {
+ return (Set) method.invoke(null);
+ } catch (IllegalAccessException e) {
+ throw new AssertionError("should not happen", e);
+ } catch (InvocationTargetException e) {
+ Throwable cause = e.getCause();
+ throwUnchecked(cause);
+ if (cause instanceof IOException) {
+ throw (IOException) cause;
+ }
+ throw new AssertionError("should not happen", cause);
+ }
+ }
+
+ /**
+ * Disables Zk- and HBase client logging
+ */
+ public static void disableZkAndClientLoggers() {
+ // disable zookeeper log to avoid it mess up command output
+ setLogLevel("org.apache.zookeeper", "OFF");
+ // disable hbase zookeeper tool log to avoid it mess up command output
+ setLogLevel("org.apache.hadoop.hbase.zookeeper", "OFF");
+ // disable hbase client log to avoid it mess up command output
+ setLogLevel("org.apache.hadoop.hbase.client", "OFF");
+ }
+
+ /**
+ * Switches the logger for the given class to DEBUG level.
+ * @param clazz The class for which to switch to debug logging.
+ */
+ public static void enableDebug(Class> clazz) {
+ setLogLevel(clazz.getName(), "DEBUG");
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/LogLevel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/LogLevel.java
new file mode 100755
index 00000000..26a0c72d
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/LogLevel.java
@@ -0,0 +1,401 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.http.log;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.rest.http.HttpServer;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.util.ServletUtil;
+import org.apache.hadoop.util.Tool;
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.base.Charsets;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Objects;
+import java.util.regex.Pattern;
+
+/**
+ * Change log level in runtime.
+ */
+@InterfaceAudience.Private
+public final class LogLevel {
+ private static final String USAGES = "\nUsage: General options are:\n"
+ + "\t[-getlevel [-protocol (http|https)]\n"
+ + "\t[-setlevel [-protocol (http|https)]";
+
+ public static final String PROTOCOL_HTTP = "http";
+ public static final String PROTOCOL_HTTPS = "https";
+
+ /**
+ * A command line implementation
+ */
+ public static void main(String[] args) throws Exception {
+ CLI cli = new CLI(new Configuration());
+ System.exit(cli.run(args));
+ }
+
+ /**
+ * Valid command line options.
+ */
+ private enum Operations {
+ GETLEVEL,
+ SETLEVEL,
+ UNKNOWN
+ }
+
+ private static void printUsage() {
+ System.err.println(USAGES);
+ System.exit(-1);
+ }
+
+ public static boolean isValidProtocol(String protocol) {
+ return ((protocol.equals(PROTOCOL_HTTP) ||
+ protocol.equals(PROTOCOL_HTTPS)));
+ }
+
+ @VisibleForTesting
+ static class CLI extends Configured implements Tool {
+ private Operations operation = Operations.UNKNOWN;
+ private String protocol;
+ private String hostName;
+ private String className;
+ private String level;
+
+ CLI(Configuration conf) {
+ setConf(conf);
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ try {
+ parseArguments(args);
+ sendLogLevelRequest();
+ } catch (HadoopIllegalArgumentException e) {
+ printUsage();
+ }
+ return 0;
+ }
+
+ /**
+ * Send HTTP request to the daemon.
+ * @throws HadoopIllegalArgumentException if arguments are invalid.
+ * @throws Exception if unable to connect
+ */
+ private void sendLogLevelRequest()
+ throws HadoopIllegalArgumentException, Exception {
+ switch (operation) {
+ case GETLEVEL:
+ doGetLevel();
+ break;
+ case SETLEVEL:
+ doSetLevel();
+ break;
+ default:
+ throw new HadoopIllegalArgumentException(
+ "Expect either -getlevel or -setlevel");
+ }
+ }
+
+ public void parseArguments(String[] args) throws HadoopIllegalArgumentException {
+ if (args.length == 0) {
+ throw new HadoopIllegalArgumentException("No arguments specified");
+ }
+ int nextArgIndex = 0;
+ while (nextArgIndex < args.length) {
+ switch (args[nextArgIndex]) {
+ case "-getlevel":
+ nextArgIndex = parseGetLevelArgs(args, nextArgIndex);
+ break;
+ case "-setlevel":
+ nextArgIndex = parseSetLevelArgs(args, nextArgIndex);
+ break;
+ case "-protocol":
+ nextArgIndex = parseProtocolArgs(args, nextArgIndex);
+ break;
+ default:
+ throw new HadoopIllegalArgumentException(
+ "Unexpected argument " + args[nextArgIndex]);
+ }
+ }
+
+ // if operation is never specified in the arguments
+ if (operation == Operations.UNKNOWN) {
+ throw new HadoopIllegalArgumentException(
+ "Must specify either -getlevel or -setlevel");
+ }
+
+ // if protocol is unspecified, set it as http.
+ if (protocol == null) {
+ protocol = PROTOCOL_HTTP;
+ }
+ }
+
+ private int parseGetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException {
+ // fail if multiple operations are specified in the arguments
+ if (operation != Operations.UNKNOWN) {
+ throw new HadoopIllegalArgumentException("Redundant -getlevel command");
+ }
+ // check number of arguments is sufficient
+ if (index + 2 >= args.length) {
+ throw new HadoopIllegalArgumentException("-getlevel needs two parameters");
+ }
+ operation = Operations.GETLEVEL;
+ hostName = args[index + 1];
+ className = args[index + 2];
+ return index + 3;
+ }
+
+ private int parseSetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException {
+ // fail if multiple operations are specified in the arguments
+ if (operation != Operations.UNKNOWN) {
+ throw new HadoopIllegalArgumentException("Redundant -setlevel command");
+ }
+ // check number of arguments is sufficient
+ if (index + 3 >= args.length) {
+ throw new HadoopIllegalArgumentException("-setlevel needs three parameters");
+ }
+ operation = Operations.SETLEVEL;
+ hostName = args[index + 1];
+ className = args[index + 2];
+ level = args[index + 3];
+ return index + 4;
+ }
+
+ private int parseProtocolArgs(String[] args, int index) throws HadoopIllegalArgumentException {
+ // make sure only -protocol is specified
+ if (protocol != null) {
+ throw new HadoopIllegalArgumentException(
+ "Redundant -protocol command");
+ }
+ // check number of arguments is sufficient
+ if (index + 1 >= args.length) {
+ throw new HadoopIllegalArgumentException(
+ "-protocol needs one parameter");
+ }
+ // check protocol is valid
+ protocol = args[index + 1];
+ if (!isValidProtocol(protocol)) {
+ throw new HadoopIllegalArgumentException(
+ "Invalid protocol: " + protocol);
+ }
+ return index + 2;
+ }
+
+ /**
+ * Send HTTP request to get log level.
+ *
+ * @throws HadoopIllegalArgumentException if arguments are invalid.
+ * @throws Exception if unable to connect
+ */
+ private void doGetLevel() throws Exception {
+ process(protocol + "://" + hostName + "/logLevel?log=" + className);
+ }
+
+ /**
+ * Send HTTP request to set log level.
+ *
+ * @throws HadoopIllegalArgumentException if arguments are invalid.
+ * @throws Exception if unable to connect
+ */
+ private void doSetLevel() throws Exception {
+ process(protocol + "://" + hostName + "/logLevel?log=" + className
+ + "&level=" + level);
+ }
+
+ /**
+ * Connect to the URL. Supports HTTP and supports SPNEGO
+ * authentication. It falls back to simple authentication if it fails to
+ * initiate SPNEGO.
+ *
+ * @param url the URL address of the daemon servlet
+ * @return a connected connection
+ * @throws Exception if it can not establish a connection.
+ */
+ private URLConnection connect(URL url) throws Exception {
+ AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+ AuthenticatedURL aUrl;
+ SSLFactory clientSslFactory;
+ URLConnection connection;
+ // If https is chosen, configures SSL client.
+ if (PROTOCOL_HTTPS.equals(url.getProtocol())) {
+ clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, this.getConf());
+ clientSslFactory.init();
+ SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
+
+ aUrl = new AuthenticatedURL(new KerberosAuthenticator(), clientSslFactory);
+ connection = aUrl.openConnection(url, token);
+ HttpsURLConnection httpsConn = (HttpsURLConnection) connection;
+ httpsConn.setSSLSocketFactory(sslSocketF);
+ } else {
+ aUrl = new AuthenticatedURL(new KerberosAuthenticator());
+ connection = aUrl.openConnection(url, token);
+ }
+ connection.connect();
+ return connection;
+ }
+
+ /**
+ * Configures the client to send HTTP request to the URL.
+ * Supports SPENGO for authentication.
+ * @param urlString URL and query string to the daemon's web UI
+ * @throws Exception if unable to connect
+ */
+ private void process(String urlString) throws Exception {
+ URL url = new URL(urlString);
+ System.out.println("Connecting to " + url);
+
+ URLConnection connection = connect(url);
+
+ // read from the servlet
+
+ try (InputStreamReader streamReader =
+ new InputStreamReader(connection.getInputStream(), Charsets.UTF_8);
+ BufferedReader bufferedReader = new BufferedReader(streamReader)) {
+ bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER))
+ .forEach(line -> System.out.println(TAG.matcher(line).replaceAll("")));
+ } catch (IOException ioe) {
+ System.err.println("" + ioe);
+ }
+ }
+ }
+
+ private static final String MARKER = "";
+ private static final Pattern TAG = Pattern.compile("<[^>]*>");
+
+ /**
+ * A servlet implementation
+ */
+ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+ @InterfaceStability.Unstable
+ public static class Servlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public void doGet(HttpServletRequest request, HttpServletResponse response)
+ throws ServletException, IOException {
+ // Do the authorization
+ if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
+ response)) {
+ return;
+ }
+ // Disallow modification of the LogLevel if explicitly set to readonly
+ Configuration conf = (Configuration) getServletContext().getAttribute(
+ HttpServer.CONF_CONTEXT_ATTRIBUTE);
+ if (conf.getBoolean("hbase.master.ui.readonly", false)) {
+ response.sendError(HttpServletResponse.SC_FORBIDDEN, "Modification of HBase via"
+ + " the UI is disallowed in configuration.");
+ return;
+ }
+ response.setContentType("text/html");
+ PrintWriter out;
+ try {
+ String headerPath = "header.jsp?pageTitle=Log Level";
+ request.getRequestDispatcher(headerPath).include(request, response);
+ out = response.getWriter();
+ } catch (FileNotFoundException e) {
+ // in case file is not found fall back to old design
+ out = ServletUtil.initHTML(response, "Log Level");
+ }
+ out.println(FORMS);
+
+ String logName = ServletUtil.getParameter(request, "log");
+ String level = ServletUtil.getParameter(request, "level");
+
+ if (logName != null) {
+ out.println("
");
+ }
+ }
+ out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) +
+ " ");
+ }
+ }
+
+ private LogLevel() {}
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
new file mode 100755
index 00000000..128be02b
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java
@@ -0,0 +1,248 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.io.IOException;
+import java.io.Serializable;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlValue;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell;
+/**
+ * Representation of a cell. A cell is a single value associated a column and
+ * optional qualifier, and either the timestamp when it was stored or the user-
+ * provided timestamp if one was explicitly supplied.
+ *
+ *
+ */
+@XmlRootElement(name="CellSet")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class CellSetModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+
+ @XmlElement(name="Row")
+ private List rows;
+
+ /**
+ * Constructor
+ */
+ public CellSetModel() {
+ this.rows = new ArrayList<>();
+ }
+
+ /**
+ * @param rows the rows
+ */
+ public CellSetModel(List rows) {
+ super();
+ this.rows = rows;
+ }
+
+ /**
+ * Add a row to this cell set
+ * @param row the row
+ */
+ public void addRow(RowModel row) {
+ rows.add(row);
+ }
+
+ /**
+ * @return the rows
+ */
+ public List getRows() {
+ return rows;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ CellSet.Builder builder = CellSet.newBuilder();
+ for (RowModel row : getRows()) {
+ CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
+ rowBuilder.setKey(UnsafeByteOperations.unsafeWrap(row.getKey()));
+ for (CellModel cell : row.getCells()) {
+ Cell.Builder cellBuilder = Cell.newBuilder();
+ cellBuilder.setColumn(UnsafeByteOperations.unsafeWrap(cell.getColumn()));
+ cellBuilder.setData(UnsafeByteOperations.unsafeWrap(cell.getValue()));
+ if (cell.hasUserTimestamp()) {
+ cellBuilder.setTimestamp(cell.getTimestamp());
+ }
+ rowBuilder.addValues(cellBuilder);
+ }
+ builder.addRows(rowBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ CellSet.Builder builder = CellSet.newBuilder();
+ ProtobufUtil.mergeFrom(builder, message);
+ for (CellSet.Row row : builder.getRowsList()) {
+ RowModel rowModel = new RowModel(row.getKey().toByteArray());
+ for (Cell cell : row.getValuesList()) {
+ long timestamp = HConstants.LATEST_TIMESTAMP;
+ if (cell.hasTimestamp()) {
+ timestamp = cell.getTimestamp();
+ }
+ rowModel.addCell(
+ new CellModel(cell.getColumn().toByteArray(), timestamp,
+ cell.getData().toByteArray()));
+ }
+ addRow(rowModel);
+ }
+ return this;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
new file mode 100755
index 00000000..967f6ba2
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java
@@ -0,0 +1,242 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.namespace.QName;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+
+import com.fasterxml.jackson.annotation.JsonAnyGetter;
+import com.fasterxml.jackson.annotation.JsonAnySetter;
+
+/**
+ * Representation of a column family schema.
+ *
+ *
+ */
+@XmlRootElement(name="ColumnSchema")
+@InterfaceAudience.Private
+public class ColumnSchemaModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
+ private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
+ private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
+ private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
+ private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
+ private static QName TTL = new QName(HColumnDescriptor.TTL);
+ private static QName VERSIONS = new QName(HConstants.VERSIONS);
+
+ private String name;
+ private Map attrs = new LinkedHashMap<>();
+
+ /**
+ * Default constructor
+ */
+ public ColumnSchemaModel() {}
+
+ /**
+ * Add an attribute to the column family schema
+ * @param name the attribute name
+ * @param value the attribute value
+ */
+ @JsonAnySetter
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ /**
+ * @param name the attribute name
+ * @return the attribute value
+ */
+ public String getAttribute(String name) {
+ Object o = attrs.get(new QName(name));
+ return o != null ? o.toString(): null;
+ }
+
+ /**
+ * @return the column name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ @JsonAnyGetter
+ public Map getAny() {
+ return attrs;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME => '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry e: attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(" }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ /**
+ * @return true if the BLOCKCACHE attribute is present and true
+ */
+ public boolean __getBlockcache() {
+ Object o = attrs.get(BLOCKCACHE);
+ return o != null ?
+ Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
+ }
+
+ /**
+ * @return the value of the BLOCKSIZE attribute or its default if it is unset
+ */
+ public int __getBlocksize() {
+ Object o = attrs.get(BLOCKSIZE);
+ return o != null ?
+ Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
+ }
+
+ /**
+ * @return the value of the BLOOMFILTER attribute or its default if unset
+ */
+ public String __getBloomfilter() {
+ Object o = attrs.get(BLOOMFILTER);
+ return o != null ? o.toString() : HColumnDescriptor.DEFAULT_BLOOMFILTER;
+ }
+
+ /**
+ * @return the value of the COMPRESSION attribute or its default if unset
+ */
+ public String __getCompression() {
+ Object o = attrs.get(COMPRESSION);
+ return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
+ }
+
+ /**
+ * @return true if the IN_MEMORY attribute is present and true
+ */
+ public boolean __getInMemory() {
+ Object o = attrs.get(IN_MEMORY);
+ return o != null ?
+ Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
+ }
+
+ /**
+ * @return the value of the TTL attribute or its default if it is unset
+ */
+ public int __getTTL() {
+ Object o = attrs.get(TTL);
+ return o != null ?
+ Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
+ }
+
+ /**
+ * @return the value of the VERSIONS attribute or its default if it is unset
+ */
+ public int __getVersions() {
+ Object o = attrs.get(VERSIONS);
+ return o != null ?
+ Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
+ }
+
+ /**
+ * @param value the desired value of the BLOCKSIZE attribute
+ */
+ public void __setBlocksize(int value) {
+ attrs.put(BLOCKSIZE, Integer.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the BLOCKCACHE attribute
+ */
+ public void __setBlockcache(boolean value) {
+ attrs.put(BLOCKCACHE, Boolean.toString(value));
+ }
+
+ public void __setBloomfilter(String value) {
+ attrs.put(BLOOMFILTER, value);
+ }
+
+ /**
+ * @param value the desired value of the COMPRESSION attribute
+ */
+ public void __setCompression(String value) {
+ attrs.put(COMPRESSION, value);
+ }
+
+ /**
+ * @param value the desired value of the IN_MEMORY attribute
+ */
+ public void __setInMemory(boolean value) {
+ attrs.put(IN_MEMORY, Boolean.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the TTL attribute
+ */
+ public void __setTTL(int value) {
+ attrs.put(TTL, Integer.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the VERSIONS attribute
+ */
+ public void __setVersions(int value) {
+ attrs.put(VERSIONS, Integer.toString(value));
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
new file mode 100755
index 00000000..aa7df1e9
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
@@ -0,0 +1,171 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
+
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+
+import org.apache.hadoop.hbase.shaded.rest.protobuf
+ .generated.NamespacePropertiesMessage.NamespaceProperties;
+
+/**
+ * List a HBase namespace's key/value properties.
+ *
+ *
NamespaceProperties: outer element
+ *
properties: sequence property elements
+ *
entry
+ *
key: property key
+ *
value: property value
+ *
+ */
+@XmlRootElement(name="NamespaceProperties")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class NamespacesInstanceModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ // JAX-RS automatically converts Map to XMLAnyElement.
+ private Map properties = null;
+
+ @XmlTransient
+ private String namespaceName;
+
+ /**
+ * Default constructor. Do not use.
+ */
+ public NamespacesInstanceModel() {}
+
+ /**
+ * Constructor to use if namespace does not exist in HBASE.
+ * @param namespaceName the namespace name.
+ * @throws IOException
+ */
+ public NamespacesInstanceModel(String namespaceName) throws IOException {
+ this(null, namespaceName);
+ }
+
+ /**
+ * Constructor
+ * @param admin the administrative API
+ * @param namespaceName the namespace name.
+ * @throws IOException
+ */
+ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException {
+ this.namespaceName = namespaceName;
+ if(admin == null) { return; }
+
+ NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName);
+
+ // For properly formed JSON, if no properties, field has to be null (not just no elements).
+ if(nd.getConfiguration().isEmpty()){ return; }
+
+ properties = new HashMap<>();
+ properties.putAll(nd.getConfiguration());
+ }
+
+ /**
+ * Add property to the namespace.
+ * @param key attribute name
+ * @param value attribute value
+ */
+ public void addProperty(String key, String value) {
+ if(properties == null){
+ properties = new HashMap<>();
+ }
+ properties.put(key, value);
+ }
+
+ /**
+ * @return The map of uncategorized namespace properties.
+ */
+ public Map getProperties() {
+ if(properties == null){
+ properties = new HashMap<>();
+ }
+ return properties;
+ }
+
+ public String getNamespaceName(){
+ return namespaceName;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{NAME => \'");
+ sb.append(namespaceName);
+ sb.append("\'");
+ if(properties != null){
+ for (Map.Entry entry : properties.entrySet()) {
+ sb.append(", ");
+ sb.append(entry.getKey());
+ sb.append(" => '");
+ sb.append(entry.getValue());
+ sb.append("\'");
+ }
+ }
+ sb.append("}");
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ NamespaceProperties.Builder builder = NamespaceProperties.newBuilder();
+ if(properties != null){
+ for (Map.Entry entry : properties.entrySet()) {
+ String key = entry.getKey();
+ NamespaceProperties.Property.Builder property = NamespaceProperties.Property.newBuilder();
+ property.setKey(key);
+ property.setValue(entry.getValue());
+ builder.addProps(property);
+ }
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException {
+ NamespaceProperties.Builder builder = NamespaceProperties.newBuilder();
+ builder.mergeFrom(message);
+ List properties = builder.getPropsList();
+ for(NamespaceProperties.Property property: properties){
+ addProperty(property.getKey(), property.getValue());
+ }
+ return this;
+ }
+
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
new file mode 100755
index 00000000..0be558d2
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
@@ -0,0 +1,118 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+
+import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacesMessage.Namespaces;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+
+/**
+ * A list of HBase namespaces.
+ *
+ *
Namespace: namespace name
+ *
+ */
+@XmlRootElement(name="Namespaces")
+@XmlAccessorType(XmlAccessType.FIELD)
+@InterfaceAudience.Private
+public class NamespacesModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ @JsonProperty("Namespace")
+ @XmlElement(name="Namespace")
+ private List namespaces = new ArrayList<>();
+
+ /**
+ * Default constructor. Do not use.
+ */
+ public NamespacesModel() {}
+
+ /**
+ * Constructor
+ * @param admin the administrative API
+ * @throws IOException
+ */
+ public NamespacesModel(Admin admin) throws IOException {
+ NamespaceDescriptor[] nds = admin.listNamespaceDescriptors();
+ namespaces = new ArrayList<>(nds.length);
+ for (NamespaceDescriptor nd : nds) {
+ namespaces.add(nd.getName());
+ }
+ }
+
+ /**
+ * @return all namespaces
+ */
+ public List getNamespaces() {
+ return namespaces;
+ }
+
+ /**
+ * @param namespaces the namespace name array
+ */
+ public void setNamespaces(List namespaces) {
+ this.namespaces = namespaces;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (String namespace : namespaces) {
+ sb.append(namespace);
+ sb.append("\n");
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Namespaces.Builder builder = Namespaces.newBuilder();
+ builder.addAllNamespace(namespaces);
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException {
+ Namespaces.Builder builder = Namespaces.newBuilder();
+ builder.mergeFrom(message);
+ namespaces = builder.getNamespaceList();
+ return this;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
new file mode 100755
index 00000000..b560f697
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java
@@ -0,0 +1,190 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Representation of a row. A row is a related set of cells, grouped by common
+ * row key. RowModels do not appear in results by themselves. They are always
+ * encapsulated within CellSetModels.
+ *
+ *
+ */
+@XmlRootElement(name="ClusterStatus")
+@InterfaceAudience.Private
+public class StorageClusterStatusModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Represents a region server.
+ */
+ public static class Node implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Represents a region hosted on a region server.
+ */
+ public static class Region implements Serializable {
+ private static final long serialVersionUID = -1326683840086398193L;
+
+ private byte[] name;
+ private int stores;
+ private int storefiles;
+ private int storefileSizeMB;
+ private int memstoreSizeMB;
+ private long storefileIndexSizeKB;
+ private long readRequestsCount;
+ private long writeRequestsCount;
+ private int rootIndexSizeKB;
+ private int totalStaticIndexSizeKB;
+ private int totalStaticBloomSizeKB;
+ private long totalCompactingKVs;
+ private long currentCompactedKVs;
+
+ /**
+ * Default constructor
+ */
+ public Region() {
+ }
+
+ /**
+ * Constructor
+ * @param name the region name
+ */
+ public Region(byte[] name) {
+ this.name = name;
+ }
+
+ /**
+ * Constructor
+ * @param name the region name
+ * @param stores the number of stores
+ * @param storefiles the number of store files
+ * @param storefileSizeMB total size of store files, in MB
+ * @param memstoreSizeMB total size of memstore, in MB
+ * @param storefileIndexSizeKB total size of store file indexes, in KB
+ */
+ public Region(byte[] name, int stores, int storefiles,
+ int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB,
+ long readRequestsCount, long writeRequestsCount,
+ int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
+ long totalCompactingKVs, long currentCompactedKVs) {
+ this.name = name;
+ this.stores = stores;
+ this.storefiles = storefiles;
+ this.storefileSizeMB = storefileSizeMB;
+ this.memstoreSizeMB = memstoreSizeMB;
+ this.storefileIndexSizeKB = storefileIndexSizeKB;
+ this.readRequestsCount = readRequestsCount;
+ this.writeRequestsCount = writeRequestsCount;
+ this.rootIndexSizeKB = rootIndexSizeKB;
+ this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
+ this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
+ this.totalCompactingKVs = totalCompactingKVs;
+ this.currentCompactedKVs = currentCompactedKVs;
+ }
+
+ /**
+ * @return the region name
+ */
+ @XmlAttribute
+ public byte[] getName() {
+ return name;
+ }
+
+ /**
+ * @return the number of stores
+ */
+ @XmlAttribute
+ public int getStores() {
+ return stores;
+ }
+
+ /**
+ * @return the number of store files
+ */
+ @XmlAttribute
+ public int getStorefiles() {
+ return storefiles;
+ }
+
+ /**
+ * @return the total size of store files, in MB
+ */
+ @XmlAttribute
+ public int getStorefileSizeMB() {
+ return storefileSizeMB;
+ }
+
+ /**
+ * @return memstore size, in MB
+ */
+ @XmlAttribute
+ public int getMemStoreSizeMB() {
+ return memstoreSizeMB;
+ }
+
+ /**
+ * @return the total size of store file indexes, in KB
+ */
+ @XmlAttribute
+ public long getStorefileIndexSizeKB() {
+ return storefileIndexSizeKB;
+ }
+
+ /**
+ * @return the current total read requests made to region
+ */
+ @XmlAttribute
+ public long getReadRequestsCount() {
+ return readRequestsCount;
+ }
+
+
+ /**
+ * @return the current total write requests made to region
+ */
+ @XmlAttribute
+ public long getWriteRequestsCount() {
+ return writeRequestsCount;
+ }
+
+ /**
+ * @return The current total size of root-level indexes for the region, in KB.
+ */
+ @XmlAttribute
+ public int getRootIndexSizeKB() {
+ return rootIndexSizeKB;
+ }
+
+ /**
+ * @return The total size of static index, in KB
+ */
+ @XmlAttribute
+ public int getTotalStaticIndexSizeKB() {
+ return totalStaticIndexSizeKB;
+ }
+
+ /**
+ * @return The total size of static bloom, in KB
+ */
+ @XmlAttribute
+ public int getTotalStaticBloomSizeKB() {
+ return totalStaticBloomSizeKB;
+ }
+
+ /**
+ * @return The total number of compacting key-values
+ */
+ @XmlAttribute
+ public long getTotalCompactingKVs() {
+ return totalCompactingKVs;
+ }
+
+ /**
+ * @return The number of current compacted key-values
+ */
+ @XmlAttribute
+ public long getCurrentCompactedKVs() {
+ return currentCompactedKVs;
+ }
+
+ /**
+ * @param readRequestsCount The current total read requests made to region
+ */
+ public void setReadRequestsCount(long readRequestsCount) {
+ this.readRequestsCount = readRequestsCount;
+ }
+
+ /**
+ * @param rootIndexSizeKB The current total size of root-level indexes
+ * for the region, in KB
+ */
+ public void setRootIndexSizeKB(int rootIndexSizeKB) {
+ this.rootIndexSizeKB = rootIndexSizeKB;
+ }
+
+ /**
+ * @param writeRequestsCount The current total write requests made to region
+ */
+ public void setWriteRequestsCount(long writeRequestsCount) {
+ this.writeRequestsCount = writeRequestsCount;
+ }
+
+ /**
+ * @param currentCompactedKVs The completed count of key values
+ * in currently running compaction
+ */
+ public void setCurrentCompactedKVs(long currentCompactedKVs) {
+ this.currentCompactedKVs = currentCompactedKVs;
+ }
+
+ /**
+ * @param totalCompactingKVs The total compacting key values
+ * in currently running compaction
+ */
+ public void setTotalCompactingKVs(long totalCompactingKVs) {
+ this.totalCompactingKVs = totalCompactingKVs;
+ }
+
+ /**
+ * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks,
+ * not just loaded into the block cache, in KB.
+ */
+ public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) {
+ this.totalStaticBloomSizeKB = totalStaticBloomSizeKB;
+ }
+
+ /**
+ * @param totalStaticIndexSizeKB The total size of all index blocks,
+ * not just the root level, in KB.
+ */
+ public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) {
+ this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
+ }
+
+ /**
+ * @param name the region name
+ */
+ public void setName(byte[] name) {
+ this.name = name;
+ }
+
+ /**
+ * @param stores the number of stores
+ */
+ public void setStores(int stores) {
+ this.stores = stores;
+ }
+
+ /**
+ * @param storefiles the number of store files
+ */
+ public void setStorefiles(int storefiles) {
+ this.storefiles = storefiles;
+ }
+
+ /**
+ * @param storefileSizeMB total size of store files, in MB
+ */
+ public void setStorefileSizeMB(int storefileSizeMB) {
+ this.storefileSizeMB = storefileSizeMB;
+ }
+
+ /**
+ * @param memstoreSizeMB memstore size, in MB
+ */
+ public void setMemStoreSizeMB(int memstoreSizeMB) {
+ this.memstoreSizeMB = memstoreSizeMB;
+ }
+
+ /**
+ * @param storefileIndexSizeKB total size of store file indexes, in KB
+ */
+ public void setStorefileIndexSizeKB(long storefileIndexSizeKB) {
+ this.storefileIndexSizeKB = storefileIndexSizeKB;
+ }
+ }
+
+ private String name;
+ private long startCode;
+ private long requests;
+ private int heapSizeMB;
+ private int maxHeapSizeMB;
+ private List regions = new ArrayList<>();
+
+ /**
+ * Add a region name to the list
+ * @param name the region name
+ */
+ public void addRegion(byte[] name, int stores, int storefiles,
+ int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB,
+ long readRequestsCount, long writeRequestsCount,
+ int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
+ long totalCompactingKVs, long currentCompactedKVs) {
+ regions.add(new Region(name, stores, storefiles, storefileSizeMB,
+ memstoreSizeMB, storefileIndexSizeKB, readRequestsCount,
+ writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
+ totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
+ }
+
+ /**
+ * @param index the index
+ * @return the region name
+ */
+ public Region getRegion(int index) {
+ return regions.get(index);
+ }
+
+ /**
+ * Default constructor
+ */
+ public Node() {}
+
+ /**
+ * Constructor
+ * @param name the region server name
+ * @param startCode the region server's start code
+ */
+ public Node(String name, long startCode) {
+ this.name = name;
+ this.startCode = startCode;
+ }
+
+ /**
+ * @return the region server's name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the region server's start code
+ */
+ @XmlAttribute
+ public long getStartCode() {
+ return startCode;
+ }
+
+ /**
+ * @return the current heap size, in MB
+ */
+ @XmlAttribute
+ public int getHeapSizeMB() {
+ return heapSizeMB;
+ }
+
+ /**
+ * @return the maximum heap size, in MB
+ */
+ @XmlAttribute
+ public int getMaxHeapSizeMB() {
+ return maxHeapSizeMB;
+ }
+
+ /**
+ * @return the list of regions served by the region server
+ */
+ @XmlElement(name="Region")
+ public List getRegions() {
+ return regions;
+ }
+
+ /**
+ * @return the number of requests per second processed by the region server
+ */
+ @XmlAttribute
+ public long getRequests() {
+ return requests;
+ }
+
+ /**
+ * @param name the region server's hostname
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param startCode the region server's start code
+ */
+ public void setStartCode(long startCode) {
+ this.startCode = startCode;
+ }
+
+ /**
+ * @param heapSizeMB the current heap size, in MB
+ */
+ public void setHeapSizeMB(int heapSizeMB) {
+ this.heapSizeMB = heapSizeMB;
+ }
+
+ /**
+ * @param maxHeapSizeMB the maximum heap size, in MB
+ */
+ public void setMaxHeapSizeMB(int maxHeapSizeMB) {
+ this.maxHeapSizeMB = maxHeapSizeMB;
+ }
+
+ /**
+ * @param regions a list of regions served by the region server
+ */
+ public void setRegions(List regions) {
+ this.regions = regions;
+ }
+
+ /**
+ * @param requests the number of requests per second processed by the region server
+ */
+ public void setRequests(long requests) {
+ this.requests = requests;
+ }
+ }
+
+ private List liveNodes = new ArrayList<>();
+ private List deadNodes = new ArrayList<>();
+ private int regions;
+ private long requests;
+ private double averageLoad;
+
+ /**
+ * Add a live node to the cluster representation.
+ * @param name the region server name
+ * @param startCode the region server's start code
+ * @param heapSizeMB the current heap size, in MB
+ * @param maxHeapSizeMB the maximum heap size, in MB
+ */
+ public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) {
+ Node node = new Node(name, startCode);
+ node.setHeapSizeMB(heapSizeMB);
+ node.setMaxHeapSizeMB(maxHeapSizeMB);
+ liveNodes.add(node);
+ return node;
+ }
+
+ /**
+ * @param index the index
+ * @return the region server model
+ */
+ public Node getLiveNode(int index) {
+ return liveNodes.get(index);
+ }
+
+ /**
+ * Add a dead node to the cluster representation.
+ * @param node the dead region server's name
+ */
+ public void addDeadNode(String node) {
+ deadNodes.add(node);
+ }
+
+ /**
+ * @param index the index
+ * @return the dead region server's name
+ */
+ public String getDeadNode(int index) {
+ return deadNodes.get(index);
+ }
+
+ /**
+ * Default constructor
+ */
+ public StorageClusterStatusModel() {
+ }
+
+ /**
+ * @return the list of live nodes
+ */
+ @XmlElement(name = "Node")
+ @XmlElementWrapper(name = "LiveNodes")
+ // workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192
+ @JsonProperty("LiveNodes")
+ public List getLiveNodes() {
+ return liveNodes;
+ }
+
+ /**
+ * @return the list of dead nodes
+ */
+ @XmlElement(name = "Node")
+ @XmlElementWrapper(name = "DeadNodes")
+ // workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192
+ @JsonProperty("DeadNodes")
+ public List getDeadNodes() {
+ return deadNodes;
+ }
+
+ /**
+ * @return the total number of regions served by the cluster
+ */
+ @XmlAttribute
+ public int getRegions() {
+ return regions;
+ }
+
+ /**
+ * @return the total number of requests per second handled by the cluster in the last reporting
+ * interval
+ */
+ @XmlAttribute
+ public long getRequests() {
+ return requests;
+ }
+
+ /**
+ * @return the average load of the region servers in the cluster
+ */
+ @XmlAttribute
+ public double getAverageLoad() {
+ return averageLoad;
+ }
+
+ /**
+ * @param nodes the list of live node models
+ */
+ public void setLiveNodes(List nodes) {
+ this.liveNodes = nodes;
+ }
+
+ /**
+ * @param nodes the list of dead node names
+ */
+ public void setDeadNodes(List nodes) {
+ this.deadNodes = nodes;
+ }
+
+ /**
+ * @param regions the total number of regions served by the cluster
+ */
+ public void setRegions(int regions) {
+ this.regions = regions;
+ }
+
+ /**
+ * @param requests the total number of requests per second handled by the cluster
+ */
+ public void setRequests(long requests) {
+ this.requests = requests;
+ }
+
+ /**
+ * @param averageLoad the average load of region servers in the cluster
+ */
+ public void setAverageLoad(double averageLoad) {
+ this.averageLoad = averageLoad;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(String.format("%d live servers, %d dead servers, " +
+ "%.4f average load%n%n", liveNodes.size(), deadNodes.size(),
+ averageLoad));
+ if (!liveNodes.isEmpty()) {
+ sb.append(liveNodes.size());
+ sb.append(" live servers\n");
+ for (Node node : liveNodes) {
+ sb.append(" ");
+ sb.append(node.name);
+ sb.append(' ');
+ sb.append(node.startCode);
+ sb.append("\n requests=");
+ sb.append(node.requests);
+ sb.append(", regions=");
+ sb.append(node.regions.size());
+ sb.append("\n heapSizeMB=");
+ sb.append(node.heapSizeMB);
+ sb.append("\n maxHeapSizeMB=");
+ sb.append(node.maxHeapSizeMB);
+ sb.append("\n\n");
+ for (Node.Region region : node.regions) {
+ sb.append(" ");
+ sb.append(Bytes.toString(region.name));
+ sb.append("\n stores=");
+ sb.append(region.stores);
+ sb.append("\n storefiless=");
+ sb.append(region.storefiles);
+ sb.append("\n storefileSizeMB=");
+ sb.append(region.storefileSizeMB);
+ sb.append("\n memstoreSizeMB=");
+ sb.append(region.memstoreSizeMB);
+ sb.append("\n storefileIndexSizeKB=");
+ sb.append(region.storefileIndexSizeKB);
+ sb.append("\n readRequestsCount=");
+ sb.append(region.readRequestsCount);
+ sb.append("\n cpRequestsCount=");
+ sb.append("\n writeRequestsCount=");
+ sb.append(region.writeRequestsCount);
+ sb.append("\n rootIndexSizeKB=");
+ sb.append(region.rootIndexSizeKB);
+ sb.append("\n totalStaticIndexSizeKB=");
+ sb.append(region.totalStaticIndexSizeKB);
+ sb.append("\n totalStaticBloomSizeKB=");
+ sb.append(region.totalStaticBloomSizeKB);
+ sb.append("\n totalCompactingKVs=");
+ sb.append(region.totalCompactingKVs);
+ sb.append("\n currentCompactedKVs=");
+ sb.append(region.currentCompactedKVs);
+ sb.append('\n');
+ }
+ sb.append('\n');
+ }
+ }
+ if (!deadNodes.isEmpty()) {
+ sb.append('\n');
+ sb.append(deadNodes.size());
+ sb.append(" dead servers\n");
+ for (String node : deadNodes) {
+ sb.append(" ");
+ sb.append(node);
+ sb.append('\n');
+ }
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ builder.setRegions(regions);
+ builder.setRequests(requests);
+ builder.setAverageLoad(averageLoad);
+ for (Node node : liveNodes) {
+ StorageClusterStatus.Node.Builder nodeBuilder =
+ StorageClusterStatus.Node.newBuilder();
+ nodeBuilder.setName(node.name);
+ nodeBuilder.setStartCode(node.startCode);
+ nodeBuilder.setRequests(node.requests);
+ nodeBuilder.setHeapSizeMB(node.heapSizeMB);
+ nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB);
+ for (Node.Region region : node.regions) {
+ StorageClusterStatus.Region.Builder regionBuilder =
+ StorageClusterStatus.Region.newBuilder();
+ regionBuilder.setName(UnsafeByteOperations.unsafeWrap(region.name));
+ regionBuilder.setStores(region.stores);
+ regionBuilder.setStorefiles(region.storefiles);
+ regionBuilder.setStorefileSizeMB(region.storefileSizeMB);
+ regionBuilder.setMemStoreSizeMB(region.memstoreSizeMB);
+ regionBuilder.setStorefileIndexSizeKB(region.storefileIndexSizeKB);
+ regionBuilder.setReadRequestsCount(region.readRequestsCount);
+ regionBuilder.setWriteRequestsCount(region.writeRequestsCount);
+ regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB);
+ regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB);
+ regionBuilder.setTotalStaticBloomSizeKB(region.totalStaticBloomSizeKB);
+ regionBuilder.setTotalCompactingKVs(region.totalCompactingKVs);
+ regionBuilder.setCurrentCompactedKVs(region.currentCompactedKVs);
+ nodeBuilder.addRegions(regionBuilder);
+ }
+ builder.addLiveNodes(nodeBuilder);
+ }
+ for (String node : deadNodes) {
+ builder.addDeadNodes(node);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ ProtobufUtil.mergeFrom(builder, message);
+ if (builder.hasRegions()) {
+ regions = builder.getRegions();
+ }
+ if (builder.hasRequests()) {
+ requests = builder.getRequests();
+ }
+ if (builder.hasAverageLoad()) {
+ averageLoad = builder.getAverageLoad();
+ }
+ for (StorageClusterStatus.Node node : builder.getLiveNodesList()) {
+ long startCode = node.hasStartCode() ? node.getStartCode() : -1;
+ Node nodeModel =
+ addLiveNode(node.getName(), startCode, node.getHeapSizeMB(),
+ node.getMaxHeapSizeMB());
+ long requests = node.hasRequests() ? node.getRequests() : 0;
+ nodeModel.setRequests(requests);
+ for (StorageClusterStatus.Region region : node.getRegionsList()) {
+ nodeModel.addRegion(
+ region.getName().toByteArray(),
+ region.getStores(),
+ region.getStorefiles(),
+ region.getStorefileSizeMB(),
+ region.getMemStoreSizeMB(),
+ region.getStorefileIndexSizeKB(),
+ region.getReadRequestsCount(),
+ region.getWriteRequestsCount(),
+ region.getRootIndexSizeKB(),
+ region.getTotalStaticIndexSizeKB(),
+ region.getTotalStaticBloomSizeKB(),
+ region.getTotalCompactingKVs(),
+ region.getCurrentCompactedKVs());
+ }
+ }
+ for (String node : builder.getDeadNodesList()) {
+ addDeadNode(node);
+ }
+ return this;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
new file mode 100755
index 00000000..58409976
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java
@@ -0,0 +1,75 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Simple representation of the version of the storage cluster
+ *
+ *
+ */
+@XmlRootElement(name="ClusterVersion")
+@InterfaceAudience.Private
+public class StorageClusterVersionModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private String version;
+
+ /**
+ * @return the storage cluster version
+ */
+ @XmlAttribute(name="Version")
+ public String getVersion() {
+ return version;
+ }
+
+ /**
+ * @param version the storage cluster version
+ */
+ public void setVersion(String version) {
+ this.version = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return version;
+ }
+
+ //needed for jackson deserialization
+ private static StorageClusterVersionModel valueOf(String value) {
+ StorageClusterVersionModel versionModel
+ = new StorageClusterVersionModel();
+ versionModel.setVersion(value);
+ return versionModel;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
new file mode 100755
index 00000000..32006251
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java
@@ -0,0 +1,162 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableInfoMessage.TableInfo;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
+
+/**
+ * Representation of a list of table regions.
+ *
+ *
+ */
+@XmlRootElement(name="TableInfo")
+@InterfaceAudience.Private
+public class TableInfoModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+ private List regions = new ArrayList<>();
+
+ /**
+ * Default constructor
+ */
+ public TableInfoModel() {}
+
+ /**
+ * Constructor
+ * @param name
+ */
+ public TableInfoModel(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Add a region model to the list
+ * @param region the region
+ */
+ public void add(TableRegionModel region) {
+ regions.add(region);
+ }
+
+ /**
+ * @param index the index
+ * @return the region model
+ */
+ public TableRegionModel get(int index) {
+ return regions.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the regions
+ */
+ @XmlElement(name="Region")
+ public List getRegions() {
+ return regions;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param regions the regions to set
+ */
+ public void setRegions(List regions) {
+ this.regions = regions;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableRegionModel aRegion : regions) {
+ sb.append(aRegion.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ builder.setName(name);
+ for (TableRegionModel aRegion: regions) {
+ TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder();
+ regionBuilder.setName(aRegion.getName());
+ regionBuilder.setId(aRegion.getId());
+ regionBuilder.setStartKey(UnsafeByteOperations.unsafeWrap(aRegion.getStartKey()));
+ regionBuilder.setEndKey(UnsafeByteOperations.unsafeWrap(aRegion.getEndKey()));
+ regionBuilder.setLocation(aRegion.getLocation());
+ builder.addRegions(regionBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ ProtobufUtil.mergeFrom(builder, message);
+ setName(builder.getName());
+ for (TableInfo.Region region: builder.getRegionsList()) {
+ add(new TableRegionModel(builder.getName(), region.getId(),
+ region.getStartKey().toByteArray(),
+ region.getEndKey().toByteArray(),
+ region.getLocation()));
+ }
+ return this;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
new file mode 100755
index 00000000..8d3e1ab0
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java
@@ -0,0 +1,115 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlElementRef;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableListMessage.TableList;
+
+/**
+ * Simple representation of a list of table names.
+ */
+@XmlRootElement(name="TableList")
+@InterfaceAudience.Private
+public class TableListModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ private List tables = new ArrayList<>();
+
+ /**
+ * Default constructor
+ */
+ public TableListModel() {}
+
+ /**
+ * Add the table name model to the list
+ * @param table the table model
+ */
+ public void add(TableModel table) {
+ tables.add(table);
+ }
+
+ /**
+ * @param index the index
+ * @return the table model
+ */
+ public TableModel get(int index) {
+ return tables.get(index);
+ }
+
+ /**
+ * @return the tables
+ */
+ @XmlElementRef(name="table")
+ public List getTables() {
+ return tables;
+ }
+
+ /**
+ * @param tables the tables to set
+ */
+ public void setTables(List tables) {
+ this.tables = tables;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableModel aTable : tables) {
+ sb.append(aTable.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableList.Builder builder = TableList.newBuilder();
+ for (TableModel aTable : tables) {
+ builder.addName(aTable.getName());
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableList.Builder builder = TableList.newBuilder();
+ ProtobufUtil.mergeFrom(builder, message);
+ for (String table: builder.getNameList()) {
+ this.add(new TableModel(table));
+ }
+ return this;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
new file mode 100755
index 00000000..4628263e
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java
@@ -0,0 +1,84 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Simple representation of a table name.
+ *
+ *
+ */
+@XmlRootElement(name="table")
+@InterfaceAudience.Private
+public class TableModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+
+ /**
+ * Default constructor
+ */
+ public TableModel() {}
+
+ /**
+ * Constructor
+ * @param name
+ */
+ public TableModel(String name) {
+ super();
+ this.name = name;
+ }
+
+ /**
+ * @return the name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @param name the name to set
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return this.name;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
new file mode 100755
index 00000000..2ed5d9d5
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java
@@ -0,0 +1,196 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Representation of a region of a table and its current location on the
+ * storage cluster.
+ *
+ *
+ */
+@XmlRootElement(name="Region")
+@InterfaceAudience.Private
+public class TableRegionModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String table;
+ private long id;
+ private byte[] startKey;
+ private byte[] endKey;
+ private String location;
+
+ /**
+ * Constructor
+ */
+ public TableRegionModel() {}
+
+ /**
+ * Constructor
+ * @param table the table name
+ * @param id the encoded id of the region
+ * @param startKey the start key of the region
+ * @param endKey the end key of the region
+ */
+ public TableRegionModel(String table, long id, byte[] startKey,
+ byte[] endKey) {
+ this(table, id, startKey, endKey, null);
+ }
+
+ /**
+ * Constructor
+ * @param table the table name
+ * @param id the encoded id of the region
+ * @param startKey the start key of the region
+ * @param endKey the end key of the region
+ * @param location the name and port of the region server hosting the region
+ */
+ public TableRegionModel(String table, long id, byte[] startKey,
+ byte[] endKey, String location) {
+ this.table = table;
+ this.id = id;
+ this.startKey = startKey;
+ this.endKey = endKey;
+ this.location = location;
+ }
+
+ /**
+ * @return the region name
+ */
+ @XmlAttribute
+ public String getName() {
+ byte [] tableNameAsBytes = Bytes.toBytes(this.table);
+ TableName tableName = TableName.valueOf(tableNameAsBytes);
+ byte [] nameAsBytes = HRegionInfo.createRegionName(
+ tableName, this.startKey, this.id, !tableName.isSystemTable());
+ return Bytes.toString(nameAsBytes);
+ }
+
+ /**
+ * @return the encoded region id
+ */
+ @XmlAttribute
+ public long getId() {
+ return id;
+ }
+
+ /**
+ * @return the start key
+ */
+ @XmlAttribute
+ public byte[] getStartKey() {
+ return startKey;
+ }
+
+ /**
+ * @return the end key
+ */
+ @XmlAttribute
+ public byte[] getEndKey() {
+ return endKey;
+ }
+
+ /**
+ * @return the name and port of the region server hosting the region
+ */
+ @XmlAttribute
+ public String getLocation() {
+ return location;
+ }
+
+ /**
+ * @param name region printable name
+ */
+ public void setName(String name) {
+ String split[] = name.split(",");
+ this.table = split[0];
+ this.startKey = Bytes.toBytes(split[1]);
+ String tail = split[2];
+ split = tail.split("\\.");
+ id = Long.parseLong(split[0]);
+ }
+
+ /**
+ * @param id the region's encoded id
+ */
+ public void setId(long id) {
+ this.id = id;
+ }
+
+ /**
+ * @param startKey the start key
+ */
+ public void setStartKey(byte[] startKey) {
+ this.startKey = startKey;
+ }
+
+ /**
+ * @param endKey the end key
+ */
+ public void setEndKey(byte[] endKey) {
+ this.endKey = endKey;
+ }
+
+ /**
+ * @param location the name and port of the region server hosting the region
+ */
+ public void setLocation(String location) {
+ this.location = location;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(getName());
+ sb.append(" [\n id=");
+ sb.append(id);
+ sb.append("\n startKey='");
+ sb.append(Bytes.toString(startKey));
+ sb.append("'\n endKey='");
+ sb.append(Bytes.toString(endKey));
+ if (location != null) {
+ sb.append("'\n location='");
+ sb.append(location);
+ }
+ sb.append("'\n]\n");
+ return sb.toString();
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
new file mode 100755
index 00000000..3d60490f
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java
@@ -0,0 +1,362 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rest.model;
+
+import com.fasterxml.jackson.annotation.JsonAnyGetter;
+import com.fasterxml.jackson.annotation.JsonAnySetter;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableSchemaMessage.TableSchema;
+import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
+
+/**
+ * A representation of HBase table descriptors.
+ *
+ *
+ */
+@XmlRootElement(name="TableSchema")
+@InterfaceAudience.Private
+public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+ private static final QName IS_META = new QName(HTableDescriptor.IS_META);
+ private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
+ private static final QName READONLY = new QName(HTableDescriptor.READONLY);
+ private static final QName TTL = new QName(HColumnDescriptor.TTL);
+ private static final QName VERSIONS = new QName(HConstants.VERSIONS);
+ private static final QName COMPRESSION =
+ new QName(HColumnDescriptor.COMPRESSION);
+
+ private String name;
+ private Map attrs = new LinkedHashMap<>();
+ private List columns = new ArrayList<>();
+
+ /**
+ * Default constructor.
+ */
+ public TableSchemaModel() {}
+
+ /**
+ * Constructor
+ * @param tableDescriptor the table descriptor
+ */
+ public TableSchemaModel(TableDescriptor tableDescriptor) {
+ setName(tableDescriptor.getTableName().getNameAsString());
+ for (Map.Entry e : tableDescriptor.getValues().entrySet()) {
+ addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ for (ColumnFamilyDescriptor hcd : tableDescriptor.getColumnFamilies()) {
+ ColumnSchemaModel columnModel = new ColumnSchemaModel();
+ columnModel.setName(hcd.getNameAsString());
+ for (Map.Entry e:
+ hcd.getValues().entrySet()) {
+ columnModel.addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ addColumnFamily(columnModel);
+ }
+ }
+
+ /**
+ * Add an attribute to the table descriptor
+ * @param name attribute name
+ * @param value attribute value
+ */
+ @JsonAnySetter
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ /**
+ * Return a table descriptor value as a string. Calls toString() on the
+ * object stored in the descriptor value map.
+ * @param name the attribute name
+ * @return the attribute value
+ */
+ public String getAttribute(String name) {
+ Object o = attrs.get(new QName(name));
+ return o != null ? o.toString() : null;
+ }
+
+ /**
+ * Add a column family to the table descriptor
+ * @param family the column family model
+ */
+ public void addColumnFamily(ColumnSchemaModel family) {
+ columns.add(family);
+ }
+
+ /**
+ * Retrieve the column family at the given index from the table descriptor
+ * @param index the index
+ * @return the column family model
+ */
+ public ColumnSchemaModel getColumnFamily(int index) {
+ return columns.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ @JsonAnyGetter
+ public Map getAny() {
+ return attrs;
+ }
+
+ /**
+ * @return the columns
+ */
+ @XmlElement(name="ColumnSchema")
+ public List getColumns() {
+ return columns;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param columns the columns to set
+ */
+ public void setColumns(List columns) {
+ this.columns = columns;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME=> '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry e : attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(", COLUMNS => [ ");
+ Iterator i = columns.iterator();
+ while (i.hasNext()) {
+ ColumnSchemaModel family = i.next();
+ sb.append(family.toString());
+ if (i.hasNext()) {
+ sb.append(',');
+ }
+ sb.append(' ');
+ }
+ sb.append("] }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ /**
+ * @return true if IS_META attribute exists and is truel
+ */
+ public boolean __getIsMeta() {
+ Object o = attrs.get(IS_META);
+ return o != null && Boolean.parseBoolean(o.toString());
+ }
+
+ /**
+ * @return true if IS_ROOT attribute exists and is truel
+ */
+ public boolean __getIsRoot() {
+ Object o = attrs.get(IS_ROOT);
+ return o != null && Boolean.parseBoolean(o.toString());
+ }
+
+ /**
+ * @return true if READONLY attribute exists and is truel
+ */
+ public boolean __getReadOnly() {
+ Object o = attrs.get(READONLY);
+ return o != null ? Boolean.parseBoolean(o.toString()) : HTableDescriptor.DEFAULT_READONLY;
+ }
+
+ /**
+ * @param value desired value of IS_META attribute
+ */
+ public void __setIsMeta(boolean value) {
+ attrs.put(IS_META, Boolean.toString(value));
+ }
+
+ /**
+ * @param value desired value of IS_ROOT attribute
+ */
+ public void __setIsRoot(boolean value) {
+ attrs.put(IS_ROOT, Boolean.toString(value));
+ }
+
+ /**
+ * @param value desired value of READONLY attribute
+ */
+ public void __setReadOnly(boolean value) {
+ attrs.put(READONLY, Boolean.toString(value));
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.setName(name);
+ for (Map.Entry e : attrs.entrySet()) {
+ TableSchema.Attribute.Builder attrBuilder =
+ TableSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ builder.addAttrs(attrBuilder);
+ }
+ for (ColumnSchemaModel family : columns) {
+ Map familyAttrs = family.getAny();
+ ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder();
+ familyBuilder.setName(family.getName());
+ for (Map.Entry e : familyAttrs.entrySet()) {
+ ColumnSchema.Attribute.Builder attrBuilder =
+ ColumnSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ familyBuilder.addAttrs(attrBuilder);
+ }
+ if (familyAttrs.containsKey(TTL)) {
+ familyBuilder.setTtl(Integer.parseInt(familyAttrs.get(TTL).toString()));
+ }
+ if (familyAttrs.containsKey(VERSIONS)) {
+ familyBuilder.setMaxVersions(Integer.parseInt(familyAttrs.get(VERSIONS).toString()));
+ }
+ if (familyAttrs.containsKey(COMPRESSION)) {
+ familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString());
+ }
+ builder.addColumns(familyBuilder);
+ }
+ if (attrs.containsKey(READONLY)) {
+ builder.setReadOnly(Boolean.parseBoolean(attrs.get(READONLY).toString()));
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ ProtobufUtil.mergeFrom(builder, message);
+ this.setName(builder.getName());
+ for (TableSchema.Attribute attr : builder.getAttrsList()) {
+ this.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (builder.hasReadOnly()) {
+ this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
+ }
+ for (ColumnSchema family : builder.getColumnsList()) {
+ ColumnSchemaModel familyModel = new ColumnSchemaModel();
+ familyModel.setName(family.getName());
+ for (ColumnSchema.Attribute attr : family.getAttrsList()) {
+ familyModel.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (family.hasTtl()) {
+ familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl());
+ }
+ if (family.hasMaxVersions()) {
+ familyModel.addAttribute(HConstants.VERSIONS,
+ family.getMaxVersions());
+ }
+ if (family.hasCompression()) {
+ familyModel.addAttribute(HColumnDescriptor.COMPRESSION,
+ family.getCompression());
+ }
+ this.addColumnFamily(familyModel);
+ }
+ return this;
+ }
+
+ /**
+ * @return a table descriptor
+ */
+ @JsonIgnore
+ public TableDescriptor getTableDescriptor() {
+ TableDescriptorBuilder tableDescriptorBuilder =
+ TableDescriptorBuilder.newBuilder(TableName.valueOf(getName()));
+ for (Map.Entry e : getAny().entrySet()) {
+ tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ for (ColumnSchemaModel column : getColumns()) {
+ ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder
+ .newBuilder(Bytes.toBytes(column.getName()));
+ for (Map.Entry e : column.getAny().entrySet()) {
+ cfdb.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ tableDescriptorBuilder.setColumnFamily(cfdb.build());
+ }
+ return tableDescriptorBuilder.build();
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
new file mode 100755
index 00000000..0db793ac
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java
@@ -0,0 +1,211 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.servlet.ServletContext;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.rest.RESTServlet;
+import org.glassfish.jersey.servlet.ServletContainer;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.VersionMessage.Version;
+
+/**
+ * A representation of the collection of versions of the REST gateway software
+ * components.
+ *
+ *
restVersion: REST gateway revision
+ *
jvmVersion: the JVM vendor and version information
+ *
osVersion: the OS type, version, and hardware architecture
+ *
serverVersion: the name and version of the servlet container
+ *
jerseyVersion: the version of the embedded Jersey framework
+ *
+ */
+@XmlRootElement(name="Version")
+@InterfaceAudience.Private
+public class VersionModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ private String restVersion;
+ private String jvmVersion;
+ private String osVersion;
+ private String serverVersion;
+ private String jerseyVersion;
+
+ /**
+ * Default constructor. Do not use.
+ */
+ public VersionModel() {}
+
+ /**
+ * Constructor
+ * @param context the servlet context
+ */
+ public VersionModel(ServletContext context) {
+ restVersion = RESTServlet.VERSION_STRING;
+ jvmVersion = System.getProperty("java.vm.vendor") + ' ' +
+ System.getProperty("java.version") + '-' +
+ System.getProperty("java.vm.version");
+ osVersion = System.getProperty("os.name") + ' ' +
+ System.getProperty("os.version") + ' ' +
+ System.getProperty("os.arch");
+ serverVersion = context.getServerInfo();
+ jerseyVersion = ServletContainer.class.getPackage().getImplementationVersion();
+ // Currently, this will always be null because the manifest doesn't have any useful information
+ if (jerseyVersion == null) jerseyVersion = "";
+ }
+
+ /**
+ * @return the REST gateway version
+ */
+ @XmlAttribute(name="REST")
+ public String getRESTVersion() {
+ return restVersion;
+ }
+
+ /**
+ * @return the JVM vendor and version
+ */
+ @XmlAttribute(name="JVM")
+ public String getJVMVersion() {
+ return jvmVersion;
+ }
+
+ /**
+ * @return the OS name, version, and hardware architecture
+ */
+ @XmlAttribute(name="OS")
+ public String getOSVersion() {
+ return osVersion;
+ }
+
+ /**
+ * @return the servlet container version
+ */
+ @XmlAttribute(name="Server")
+ public String getServerVersion() {
+ return serverVersion;
+ }
+
+ /**
+ * @return the version of the embedded Jersey framework
+ */
+ @XmlAttribute(name="Jersey")
+ public String getJerseyVersion() {
+ return jerseyVersion;
+ }
+
+ /**
+ * @param version the REST gateway version string
+ */
+ public void setRESTVersion(String version) {
+ this.restVersion = version;
+ }
+
+ /**
+ * @param version the OS version string
+ */
+ public void setOSVersion(String version) {
+ this.osVersion = version;
+ }
+
+ /**
+ * @param version the JVM version string
+ */
+ public void setJVMVersion(String version) {
+ this.jvmVersion = version;
+ }
+
+ /**
+ * @param version the servlet container version string
+ */
+ public void setServerVersion(String version) {
+ this.serverVersion = version;
+ }
+
+ /**
+ * @param version the Jersey framework version string
+ */
+ public void setJerseyVersion(String version) {
+ this.jerseyVersion = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("rest ");
+ sb.append(restVersion);
+ sb.append(" [JVM: ");
+ sb.append(jvmVersion);
+ sb.append("] [OS: ");
+ sb.append(osVersion);
+ sb.append("] [Server: ");
+ sb.append(serverVersion);
+ sb.append("] [Jersey: ");
+ sb.append(jerseyVersion);
+ sb.append("]\n");
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Version.Builder builder = Version.newBuilder();
+ builder.setRestVersion(restVersion);
+ builder.setJvmVersion(jvmVersion);
+ builder.setOsVersion(osVersion);
+ builder.setServerVersion(serverVersion);
+ builder.setJerseyVersion(jerseyVersion);
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ Version.Builder builder = Version.newBuilder();
+ ProtobufUtil.mergeFrom(builder, message);
+ if (builder.hasRestVersion()) {
+ restVersion = builder.getRestVersion();
+ }
+ if (builder.hasJvmVersion()) {
+ jvmVersion = builder.getJvmVersion();
+ }
+ if (builder.hasOsVersion()) {
+ osVersion = builder.getOsVersion();
+ }
+ if (builder.hasServerVersion()) {
+ serverVersion = builder.getServerVersion();
+ }
+ if (builder.hasJerseyVersion()) {
+ jerseyVersion = builder.getJerseyVersion();
+ }
+ return this;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html
new file mode 100755
index 00000000..1129023b
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html
@@ -0,0 +1,31 @@
+
+
+
+
+
+
+
+
HBase REST
+This package provides a RESTful Web service front end for HBase.
+
+The documentation that used to live in this file has moved to the HBase Reference Guide.
+
+
+
+
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
new file mode 100755
index 00000000..fda2b7de
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java
@@ -0,0 +1,89 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.provider;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.ext.ContextResolver;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.rest.model.CellModel;
+import org.apache.hadoop.hbase.rest.model.CellSetModel;
+import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.rest.model.NamespacesInstanceModel;
+import org.apache.hadoop.hbase.rest.model.NamespacesModel;
+import org.apache.hadoop.hbase.rest.model.RowModel;
+import org.apache.hadoop.hbase.rest.model.ScannerModel;
+import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.rest.model.TableInfoModel;
+import org.apache.hadoop.hbase.rest.model.TableListModel;
+import org.apache.hadoop.hbase.rest.model.TableModel;
+import org.apache.hadoop.hbase.rest.model.TableRegionModel;
+import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
+import org.apache.hadoop.hbase.rest.model.VersionModel;
+
+/**
+ * Plumbing for hooking up Jersey's JSON entity body encoding and decoding
+ * support to JAXB. Modify how the context is created (by using e.g. a
+ * different configuration builder) to control how JSON is processed and
+ * created.
+ */
+@Provider
+@InterfaceAudience.Private
+public class JAXBContextResolver implements ContextResolver {
+
+ private final JAXBContext context;
+
+ private final Set> types;
+
+ private final Class>[] cTypes = {
+ CellModel.class,
+ CellSetModel.class,
+ ColumnSchemaModel.class,
+ NamespacesModel.class,
+ NamespacesInstanceModel.class,
+ RowModel.class,
+ ScannerModel.class,
+ StorageClusterStatusModel.class,
+ StorageClusterVersionModel.class,
+ TableInfoModel.class,
+ TableListModel.class,
+ TableModel.class,
+ TableRegionModel.class,
+ TableSchemaModel.class,
+ VersionModel.class
+ };
+
+ @SuppressWarnings("unchecked")
+ public JAXBContextResolver() throws Exception {
+ this.types = new HashSet(Arrays.asList(cTypes));
+ context = JAXBContext.newInstance(cTypes);
+ }
+
+ @Override
+ public JAXBContext getContext(Class> objectType) {
+ return (types.contains(objectType)) ? context : null;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
new file mode 100755
index 00000000..9990f3fe
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
@@ -0,0 +1,88 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.provider.consumer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.rest.Constants;
+import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+
+/**
+ * Adapter for hooking up Jersey content processing dispatch to
+ * ProtobufMessageHandler interface capable handlers for decoding protobuf input.
+ */
+@Provider
+@Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF})
+@InterfaceAudience.Private
+public class ProtobufMessageBodyConsumer
+ implements MessageBodyReader {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class);
+
+ @Override
+ public boolean isReadable(Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ return ProtobufMessageHandler.class.isAssignableFrom(type);
+ }
+
+ @Override
+ public ProtobufMessageHandler readFrom(Class type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap httpHeaders, InputStream inputStream)
+ throws IOException, WebApplicationException {
+ ProtobufMessageHandler obj = null;
+ try {
+ obj = type.getDeclaredConstructor().newInstance();
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ byte[] buffer = new byte[4096];
+ int read;
+ do {
+ read = inputStream.read(buffer, 0, buffer.length);
+ if (read > 0) {
+ baos.write(buffer, 0, read);
+ }
+ } while (read > 0);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(getClass() + ": read " + baos.size() + " bytes from " +
+ inputStream);
+ }
+ obj = obj.getObjectFromMessage(baos.toByteArray());
+ } catch (InstantiationException | NoSuchMethodException | InvocationTargetException
+ | IllegalAccessException e) {
+ throw new WebApplicationException(e);
+ }
+ return obj;
+ }
+}
diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
new file mode 100755
index 00000000..0a606f60
--- /dev/null
+++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java
@@ -0,0 +1,70 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rest.provider.producer;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.rest.Constants;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * An adapter between Jersey and Object.toString(). Hooks up plain text output
+ * to the Jersey content handling framework.
+ * Jersey will first call getSize() to learn the number of bytes that will be
+ * sent, then writeTo to perform the actual I/O.
+ */
+@Provider
+@Produces(Constants.MIMETYPE_TEXT)
+@InterfaceAudience.Private
+public class PlainTextMessageBodyProducer
+ implements MessageBodyWriter