diff --git a/pom.xml b/pom.xml index d6061ead..b4564958 100644 --- a/pom.xml +++ b/pom.xml @@ -61,6 +61,7 @@ kafka spark + rest hbase-connectors-assembly @@ -154,6 +155,16 @@ 3.0.1-b08 hbase-hadoop2-compat false + 2.10.1 + 1.3.8 + 2.3.2 + 1.0.1 + 2.28.2 + 1.3.9-1 + 1.60 + 4.2.0-incubating + 1.1.0 + com.google.protobuf @@ -290,6 +301,36 @@ hbase-mapreduce ${hbase.version} + + org.apache.kerby + kerb-core + ${kerby.version} + + + org.apache.kerby + kerb-simplekdc + ${kerby.version} + + + org.apache.hbase + hbase-http + ${hbase.version} + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + ${hbase-thirdparty.version} + + + com.github.stephenc.findbugs + findbugs-annotations + ${findbugs-annotations.version} + + + org.apache.htrace + htrace-core4 + ${htrace.version} + hbase-it org.apache.hbase @@ -303,6 +344,18 @@ ${hbase.version} test + + org.apache.hadoop + hadoop-minikdc + ${hadoop-three.version} + test + + + org.mockito + mockito-core + ${mockito-core.version} + test + @@ -343,7 +396,7 @@ org.apache.maven.plugins maven-shade-plugin - 3.2.1 + 3.1.1 org.apache.maven.plugins @@ -431,6 +484,45 @@ true + + net.revelc.code + warbucks-maven-plugin + ${maven.warbucks.version} + + false + + + + (?!.*(.generated.|.tmpl.|\$)).* + false + true + false + false + false + org[.]apache[.]yetus[.]audience[.]InterfaceAudience.* + + + + + + run-warbucks + + check + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf.plugin.version} + + ${external.protobuf.groupid}:protoc:${external.protobuf.version}:exe:${os.detected.classifier} + ${basedir}/src/main/protobuf/ + false + true + + diff --git a/rest/hbase-rest-protocol/pom.xml b/rest/hbase-rest-protocol/pom.xml new file mode 100755 index 00000000..84001189 --- /dev/null +++ b/rest/hbase-rest-protocol/pom.xml @@ -0,0 +1,282 @@ + + + 4.0.0 + + + + + rest + org.apache.hbase.connectors + ${revision} + + + org.apache.hbase.connectors.rest + hbase-rest-protocol + ${revision} + Apache HBase - Rest Protocol + protobuf protocol classes used by HBase REST internally. + + true + + 3.5.1-1 + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + generate-sources + + compile + + + com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} + true + + + + + + + com.google.code.maven-replacer-plugin + replacer + 1.5.3 + + + process-sources + + replace + + + + + ${basedir}/target/generated-sources/ + + **/*.java + + + true + + + ([^\.])com.google.protobuf + $1org.apache.hbase.thirdparty.com.google.protobuf + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + + + + org.apache.maven.plugins + maven-shade-plugin + + + package + + shade + + + true + true + false + + + + com.google.protobuf + org.apache.hadoop.hbase.shaded.com.google.protobuf + + + + + + javax.annotation:javax.annotation-api + + org.apache.hbase.thirdparty:* + com.google.protobuf:protobuf-java + com.google.code.findbugs:* + com.google.j2objc:j2objc-annotations + org.codehaus.mojo:animal-sniffer-annotations + junit:junit + log4j:log4j + commons-logging:commons-logging + org.slf4j:slf4j-api + org.apache.yetus:audience-annotations + com.github.stephenc.fingbugs:* + com.github.spotbugs:* + + + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + + + junit + junit + test + + + org.apache.htrace + htrace-core4 + + + + + + skip-protocol-shaded-tests + + + skip-protocol-shaded-tests + + + + true + true + + + + build-with-jdk11 + + [1.11,) + + + + javax.annotation + javax.annotation-api + + + + + eclipse-specific + + + m2e.version + + + + + + + + org.eclipse.m2e + lifecycle-mapping + + + + + + org.apache.hadoop + hadoop-maven-plugins + [2.0.5-alpha,) + + protoc + + + + + + + + + + com.google.code.maven-replacer-plugin + + replacer + [1.5.3,) + + replace + + + + + false + + + + + + + + + + + + + diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/CellMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellMessage.proto new file mode 100755 index 00000000..75b6f01e --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellMessage.proto @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message Cell { + optional bytes row = 1; // unused if Cell is in a CellSet + optional bytes column = 2; + optional int64 timestamp = 3; + optional bytes data = 4; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/CellSetMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellSetMessage.proto new file mode 100755 index 00000000..68a6b05b --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/CellSetMessage.proto @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +import "rest/CellMessage.proto"; + +message CellSet { + message Row { + required bytes key = 1; + repeated Cell values = 2; + } + repeated Row rows = 1; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/ColumnSchemaMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/ColumnSchemaMessage.proto new file mode 100755 index 00000000..8b5e4795 --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/ColumnSchemaMessage.proto @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message ColumnSchema { + optional string name = 1; + message Attribute { + required string name = 1; + required string value = 2; + } + repeated Attribute attrs = 2; + // optional helpful encodings of commonly used attributes + optional int32 ttl = 3; + optional int32 maxVersions = 4; + optional string compression = 5; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacePropertiesMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacePropertiesMessage.proto new file mode 100755 index 00000000..d0a1a4eb --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacePropertiesMessage.proto @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message NamespaceProperties { + message Property { + required string key = 1; + required string value = 2; + } + repeated Property props = 1; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacesMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacesMessage.proto new file mode 100755 index 00000000..229a1f66 --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/NamespacesMessage.proto @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message Namespaces { + repeated string namespace = 1; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/ScannerMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/ScannerMessage.proto new file mode 100755 index 00000000..78aa85b4 --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/ScannerMessage.proto @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message Scanner { + optional bytes startRow = 1; + optional bytes endRow = 2; + repeated bytes columns = 3; + optional int32 batch = 4; + optional int64 startTime = 5; + optional int64 endTime = 6; + optional int32 maxVersions = 7; + optional string filter = 8; + optional int32 caching = 9; // specifies REST scanner caching + repeated string labels = 10; + optional bool cacheBlocks = 11; // server side block caching hint + optional int32 limit = 12; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/StorageClusterStatusMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/StorageClusterStatusMessage.proto new file mode 100755 index 00000000..c39e2395 --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/StorageClusterStatusMessage.proto @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message StorageClusterStatus { + message Region { + required bytes name = 1; + optional int32 stores = 2; + optional int32 storefiles = 3; + optional int32 storefileSizeMB = 4; + optional int32 memStoreSizeMB = 5; + optional int64 storefileIndexSizeKB = 6; + optional int64 readRequestsCount = 7; + optional int64 writeRequestsCount = 8; + optional int32 rootIndexSizeKB = 9; + optional int32 totalStaticIndexSizeKB = 10; + optional int32 totalStaticBloomSizeKB = 11; + optional int64 totalCompactingKVs = 12; + optional int64 currentCompactedKVs = 13; + optional int64 cpRequestsCount = 14; + } + message Node { + required string name = 1; // name:port + optional int64 startCode = 2; + optional int64 requests = 3; + optional int32 heapSizeMB = 4; + optional int32 maxHeapSizeMB = 5; + repeated Region regions = 6; + } + // node status + repeated Node liveNodes = 1; + repeated string deadNodes = 2; + // summary statistics + optional int32 regions = 3; + optional int64 requests = 4; + optional double averageLoad = 5; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/TableInfoMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableInfoMessage.proto new file mode 100755 index 00000000..344ee1d2 --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableInfoMessage.proto @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message TableInfo { + required string name = 1; + message Region { + required string name = 1; + optional bytes startKey = 2; + optional bytes endKey = 3; + optional int64 id = 4; + optional string location = 5; + } + repeated Region regions = 2; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/TableListMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableListMessage.proto new file mode 100755 index 00000000..bf3857e5 --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableListMessage.proto @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message TableList { + repeated string name = 1; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/TableSchemaMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableSchemaMessage.proto new file mode 100755 index 00000000..6135716c --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/TableSchemaMessage.proto @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +import "rest/ColumnSchemaMessage.proto"; + +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message TableSchema { + optional string name = 1; + message Attribute { + required string name = 1; + required string value = 2; + } + repeated Attribute attrs = 2; + repeated ColumnSchema columns = 3; + // optional helpful encodings of commonly used attributes + optional bool inMemory = 4; + optional bool readOnly = 5; +} diff --git a/rest/hbase-rest-protocol/src/main/protobuf/rest/VersionMessage.proto b/rest/hbase-rest-protocol/src/main/protobuf/rest/VersionMessage.proto new file mode 100755 index 00000000..742d8828 --- /dev/null +++ b/rest/hbase-rest-protocol/src/main/protobuf/rest/VersionMessage.proto @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package org.apache.hadoop.hbase.shaded.rest.protobuf.generated; + +message Version { + optional string restVersion = 1; + optional string jvmVersion = 2; + optional string osVersion = 3; + optional string serverVersion = 4; + optional string jerseyVersion = 5; +} diff --git a/rest/hbase-rest/pom.xml b/rest/hbase-rest/pom.xml new file mode 100755 index 00000000..77cd7eff --- /dev/null +++ b/rest/hbase-rest/pom.xml @@ -0,0 +1,493 @@ + + + 4.0.0 + jar + + + + + org.apache.hbase.connectors + rest + ${revision} + ../ + + + org.apache.hbase.connectors.rest + hbase-rest + Apache HBase - Rest Server + HBase Rest Server + + + surefire-junit47 + + + + + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + + + ../hbase-rest/src/test/resources + + **/** + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + %regex[.*(Cat|Dog).*Test.*] + + + + + + maven-antrun-plugin + + + + generate + generate-sources + + + + + + + + + + + + + + + + + + + + run + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + generate-sources + + add-source + + + + ${project.build.directory}/generated-sources/java + + + + + + + + maven-surefire-plugin + + + ../target/test-classes/webapps + + + + + + org.apache.maven.surefire + ${surefire.provider} + ${surefire.version} + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + + + + + + org.apache.hbase.connectors.rest + hbase-rest-protocol + + + + javax.ws.rs + javax.ws.rs-api + + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-mapreduce + + + org.apache.hbase + hbase-client + + + org.apache.hbase + hbase-hadoop-compat + + + org.apache.hbase + hbase-testing-util + test + + + + com.sun.jersey + jersey-core + + + + + + org.apache.hbase.thirdparty + hbase-shaded-miscellaneous + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + + + org.apache.httpcomponents + httpclient + + + org.apache.httpcomponents + httpcore + + + org.apache.commons + commons-lang3 + + + org.slf4j + slf4j-api + + + javax.xml.bind + jaxb-api + + + javax.servlet + javax.servlet-api + + + com.sun.activation + javax.activation + + + org.eclipse.jetty + jetty-server + + + org.eclipse.jetty + jetty-servlet + + + org.eclipse.jetty + jetty-util + + + org.eclipse.jetty + jetty-http + + + org.eclipse.jetty + jetty-jmx + + + org.glassfish.jersey.containers + jersey-container-servlet-core + + + com.fasterxml.jackson.jaxrs + jackson-jaxrs-json-provider + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-annotations + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-databind + ${jackson.version} + + + + org.codehaus.jettison + jettison + + + stax + stax-api + + + + + + org.glassfish.web + javax.servlet.jsp + + + + org.glassfish + javax.el + + + org.apache.kerby + kerb-simplekdc + test + + + org.apache.hadoop + hadoop-minikdc + test + + + org.apache.kerby + kerb-core + test + + + commons-io + commons-io + test + + + junit + junit + test + + + org.mockito + mockito-core + test + + + com.github.stephenc.findbugs + findbugs-annotations + compile + true + + + + org.bouncycastle + bcprov-jdk15on + test + + + + + + skipRestTests + + + skipRestTests + + + + true + true + + + + + + hadoop-3.0 + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-yarn-server-nodemanager + + + com.sun.jersey + jersey-core + + + + + org.apache.hadoop + hadoop-yarn-server-resourcemanager + + + com.sun.jersey + jersey-core + + + + + org.apache.hadoop + hadoop-yarn-server-timelineservice + + + javax.ws.rs + jsr311-api + + + + + org.apache.hadoop + hadoop-yarn-common + + + com.sun.jersey + jersey-core + + + + + + + + org.apache.hadoop + hadoop-annotations + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-auth + + + + org.glassfish.jaxb + jaxb-runtime + 2.3.2 + + + + + eclipse-specific + + + m2e.version + + + + + + + + + org.eclipse.m2e + lifecycle-mapping + + + + + + org.apache.maven.plugins + maven-antrun-plugin + [1.6,) + + run + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java new file mode 100755 index 00000000..4cf8a93e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Common constants for org.apache.hadoop.hbase.rest + */ +@InterfaceAudience.Public +public interface Constants { + // All constants in a public interface are 'public static final' + + String VERSION_STRING = "0.0.3"; + + int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours + + int DEFAULT_LISTEN_PORT = 8080; + + String MIMETYPE_TEXT = "text/plain"; + String MIMETYPE_HTML = "text/html"; + String MIMETYPE_XML = "text/xml"; + String MIMETYPE_BINARY = "application/octet-stream"; + String MIMETYPE_PROTOBUF = "application/x-protobuf"; + String MIMETYPE_PROTOBUF_IETF = "application/protobuf"; + String MIMETYPE_JSON = "application/json"; + + String CRLF = "\r\n"; + + String REST_KEYTAB_FILE = "hbase.rest.keytab.file"; + String REST_KERBEROS_PRINCIPAL = "hbase.rest.kerberos.principal"; + String REST_AUTHENTICATION_TYPE = "hbase.rest.authentication.type"; + String REST_AUTHENTICATION_PRINCIPAL = "hbase.rest.authentication.kerberos.principal"; + + String REST_SSL_ENABLED = "hbase.rest.ssl.enabled"; + String REST_SSL_KEYSTORE_STORE = "hbase.rest.ssl.keystore.store"; + String REST_SSL_KEYSTORE_PASSWORD = "hbase.rest.ssl.keystore.password"; + String REST_SSL_KEYSTORE_KEYPASSWORD = "hbase.rest.ssl.keystore.keypassword"; + String REST_SSL_EXCLUDE_CIPHER_SUITES = "hbase.rest.ssl.exclude.cipher.suites"; + String REST_SSL_INCLUDE_CIPHER_SUITES = "hbase.rest.ssl.include.cipher.suites"; + String REST_SSL_EXCLUDE_PROTOCOLS = "hbase.rest.ssl.exclude.protocols"; + String REST_SSL_INCLUDE_PROTOCOLS = "hbase.rest.ssl.include.protocols"; + + String REST_THREAD_POOL_THREADS_MAX = "hbase.rest.threads.max"; + String REST_THREAD_POOL_THREADS_MIN = "hbase.rest.threads.min"; + String REST_THREAD_POOL_TASK_QUEUE_SIZE = "hbase.rest.task.queue.size"; + String REST_THREAD_POOL_THREAD_IDLE_TIMEOUT = "hbase.rest.thread.idle.timeout"; + String REST_CONNECTOR_ACCEPT_QUEUE_SIZE = "hbase.rest.connector.accept.queue.size"; + + String REST_DNS_NAMESERVER = "hbase.rest.dns.nameserver"; + String REST_DNS_INTERFACE = "hbase.rest.dns.interface"; + + String FILTER_CLASSES = "hbase.rest.filter.classes"; + String SCAN_START_ROW = "startrow"; + String SCAN_END_ROW = "endrow"; + String SCAN_COLUMN = "column"; + String SCAN_START_TIME = "starttime"; + String SCAN_END_TIME = "endtime"; + String SCAN_MAX_VERSIONS = "maxversions"; + String SCAN_BATCH_SIZE = "batchsize"; + String SCAN_LIMIT = "limit"; + String SCAN_FETCH_SIZE = "hbase.rest.scan.fetchsize"; + String SCAN_FILTER = "filter"; + String SCAN_REVERSED = "reversed"; + String SCAN_CACHE_BLOCKS = "cacheblocks"; + String CUSTOM_FILTERS = "hbase.rest.custom.filters"; + + String ROW_KEYS_PARAM_NAME = "row"; + /** If this query parameter is present when processing row or scanner resources, + it disables server side block caching */ + String NOCACHE_PARAM_NAME = "nocache"; +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java new file mode 100755 index 00000000..aefd8475 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java @@ -0,0 +1,74 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import org.apache.yetus.audience.InterfaceAudience; + +import javax.ws.rs.GET; +import javax.ws.rs.Produces; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.UriInfo; +import java.io.IOException; + +@InterfaceAudience.Private +public class ExistsResource extends ResourceBase { + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + TableResource tableResource; + + /** + * Constructor + * @param tableResource + * @throws IOException + */ + public ExistsResource(TableResource tableResource) throws IOException { + super(); + this.tableResource = tableResource; + } + + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY}) + public Response get(final @Context UriInfo uriInfo) { + try { + if (!tableResource.exists()) { + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF) + .build(); + } + } catch (IOException e) { + return Response.status(Response.Status.SERVICE_UNAVAILABLE) + .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) + .build(); + } + ResponseBuilder response = Response.ok(); + response.cacheControl(cacheControl); + return response.build(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java new file mode 100755 index 00000000..57325b76 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java @@ -0,0 +1,129 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class MetricsREST { + + public MetricsRESTSource getSource() { + return source; + } + + private MetricsRESTSource source; + + public MetricsREST() { + source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); + } + + /** + * @param inc How much to add to requests. + */ + public void incrementRequests(final int inc) { + source.incrementRequests(inc); + } + + /** + * @param inc How much to add to sucessfulGetCount. + */ + public void incrementSucessfulGetRequests(final int inc) { + source.incrementSucessfulGetRequests(inc); + } + + /** + * @param inc How much to add to sucessfulPutCount. + */ + public void incrementSucessfulPutRequests(final int inc) { + source.incrementSucessfulPutRequests(inc); + } + + /** + * @param inc How much to add to failedPutCount. + */ + public void incrementFailedPutRequests(final int inc) { + source.incrementFailedPutRequests(inc); + } + + /** + * @param inc How much to add to failedGetCount. + */ + public void incrementFailedGetRequests(final int inc) { + source.incrementFailedGetRequests(inc); + } + + /** + * @param inc How much to add to sucessfulDeleteCount. + */ + public void incrementSucessfulDeleteRequests(final int inc) { + source.incrementSucessfulDeleteRequests(inc); + } + + /** + * @param inc How much to add to failedDeleteCount. + */ + public void incrementFailedDeleteRequests(final int inc) { + source.incrementFailedDeleteRequests(inc); + } + + /** + * @param inc How much to add to sucessfulScanCount. + */ + public synchronized void incrementSucessfulScanRequests(final int inc) { + source.incrementSucessfulScanRequests(inc); + } + + /** + * @param inc How much to add to failedScanCount. + */ + public void incrementFailedScanRequests(final int inc) { + source.incrementFailedScanRequests(inc); + } + + /** + * @param inc How much to add to sucessfulAppendCount. + */ + public synchronized void incrementSucessfulAppendRequests(final int inc) { + source.incrementSucessfulAppendRequests(inc); + } + + /** + * @param inc How much to add to failedAppendCount. + */ + public void incrementFailedAppendRequests(final int inc) { + source.incrementFailedAppendRequests(inc); + } + + /** + * @param inc How much to add to sucessfulIncrementCount. + */ + public synchronized void incrementSucessfulIncrementRequests(final int inc) { + source.incrementSucessfulIncrementRequests(inc); + } + + /** + * @param inc How much to add to failedIncrementCount. + */ + public void incrementFailedIncrementRequests(final int inc) { + source.incrementFailedIncrementRequests(inc); + } + +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java new file mode 100755 index 00000000..5bd7e2b2 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -0,0 +1,123 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.GET; +import javax.ws.rs.Produces; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.IOException; + +@InterfaceAudience.Private +public class MultiRowResource extends ResourceBase implements Constants { + private static final Logger LOG = LoggerFactory.getLogger(MultiRowResource.class); + + TableResource tableResource; + Integer versions = null; + String[] columns = null; + + /** + * Constructor + * + * @param tableResource + * @param versions + * @throws IOException + */ + public MultiRowResource(TableResource tableResource, String versions, String columnsStr) + throws IOException { + super(); + this.tableResource = tableResource; + + if (columnsStr != null && !columnsStr.equals("")) { + this.columns = columnsStr.split(","); + } + + if (versions != null) { + this.versions = Integer.valueOf(versions); + + } + } + + @GET + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context UriInfo uriInfo) { + MultivaluedMap params = uriInfo.getQueryParameters(); + + servlet.getMetrics().incrementRequests(1); + try { + CellSetModel model = new CellSetModel(); + for (String rk : params.get(ROW_KEYS_PARAM_NAME)) { + RowSpec rowSpec = new RowSpec(rk); + + if (this.versions != null) { + rowSpec.setMaxVersions(this.versions); + } + + if (this.columns != null) { + for (int i = 0; i < this.columns.length; i++) { + rowSpec.addColumn(Bytes.toBytes(this.columns[i])); + } + } + + ResultGenerator generator = + ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null, + !params.containsKey(NOCACHE_PARAM_NAME)); + Cell value = null; + RowModel rowModel = new RowModel(rowSpec.getRow()); + if (generator.hasNext()) { + while ((value = generator.next()) != null) { + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil + .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); + } + model.addRow(rowModel); + } else { + if (LOG.isTraceEnabled()) { + LOG.trace("The row : " + rk + " not found in the table."); + } + } + } + + if (model.getRows().isEmpty()) { + //If no rows found. + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("No rows found." + CRLF) + .build(); + } else { + servlet.getMetrics().incrementSucessfulGetRequests(1); + return Response.ok(model).build(); + } + } catch (IOException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return processException(e); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java new file mode 100755 index 00000000..649395f2 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java @@ -0,0 +1,290 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.rest.model.NamespacesInstanceModel; +import org.apache.hadoop.hbase.rest.model.TableListModel; +import org.apache.hadoop.hbase.rest.model.TableModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.ServletContext; +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.IOException; +import java.util.List; + +/** + * Implements the following REST end points: + *

+ * /namespaces/{namespace} GET: get namespace properties. + * /namespaces/{namespace} POST: create namespace. + * /namespaces/{namespace} PUT: alter namespace. + * /namespaces/{namespace} DELETE: drop namespace. + * /namespaces/{namespace}/tables GET: list namespace's tables. + *

+ */ +@InterfaceAudience.Private +public class NamespacesInstanceResource extends ResourceBase { + + private static final Logger LOG = LoggerFactory.getLogger(NamespacesInstanceResource.class); + String namespace; + boolean queryTables = false; + + /** + * Constructor for standard NamespaceInstanceResource. + * @throws IOException + */ + public NamespacesInstanceResource(String namespace) throws IOException { + this(namespace, false); + } + + /** + * Constructor for querying namespace table list via NamespaceInstanceResource. + * @throws IOException + */ + public NamespacesInstanceResource(String namespace, boolean queryTables) throws IOException { + super(); + this.namespace = namespace; + this.queryTables = queryTables; + } + + /** + * Build a response for GET namespace description or GET list of namespace tables. + * @param context servlet context + * @param uriInfo (JAX-RS context variable) request URL + * @return A response containing NamespacesInstanceModel for a namespace descriptions and + * TableListModel for a list of namespace tables. + */ + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context ServletContext context, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + + // Respond to list of namespace tables requests. + if(queryTables){ + TableListModel tableModel = new TableListModel(); + try{ + List tables = + servlet.getAdmin().listTableDescriptorsByNamespace(Bytes.toBytes(namespace)); + for (TableDescriptor table : tables) { + tableModel.add(new TableModel(table.getTableName().getQualifierAsString())); + } + + servlet.getMetrics().incrementSucessfulGetRequests(1); + return Response.ok(tableModel).build(); + }catch(IOException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + throw new RuntimeException("Cannot retrieve table list for '" + namespace + "'."); + } + } + + // Respond to namespace description requests. + try { + NamespacesInstanceModel rowModel = + new NamespacesInstanceModel(servlet.getAdmin(), namespace); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return Response.ok(rowModel).build(); + } catch (IOException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); + } + } + + /** + * Build a response for PUT alter namespace with properties specified. + * @param model properties used for alter. + * @param uriInfo (JAX-RS context variable) request URL + * @return response code. + */ + @PUT + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { + return processUpdate(model, true, uriInfo); + } + + /** + * Build a response for POST create namespace with properties specified. + * @param model properties used for create. + * @param uriInfo (JAX-RS context variable) request URL + * @return response code. + */ + @POST + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response post(final NamespacesInstanceModel model, + final @Context UriInfo uriInfo) { + return processUpdate(model, false, uriInfo); + } + + + // Check that POST or PUT is valid and then update namespace. + private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting, + final UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace((updateExisting ? "PUT " : "POST ") + uriInfo.getAbsolutePath()); + } + if (model == null) { + try { + model = new NamespacesInstanceModel(namespace); + } catch(IOException ioe) { + servlet.getMetrics().incrementFailedPutRequests(1); + throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); + } + } + servlet.getMetrics().incrementRequests(1); + + if (servlet.isReadOnly()) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); + } + + Admin admin = null; + boolean namespaceExists = false; + try { + admin = servlet.getAdmin(); + namespaceExists = doesNamespaceExist(admin, namespace); + }catch (IOException e) { + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } + + // Do not allow creation if namespace already exists. + if(!updateExisting && namespaceExists){ + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). + entity("Namespace '" + namespace + "' already exists. Use REST PUT " + + "to alter the existing namespace.").build(); + } + + // Do not allow altering if namespace does not exist. + if (updateExisting && !namespaceExists){ + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). + entity("Namespace '" + namespace + "' does not exist. Use " + + "REST POST to create the namespace.").build(); + } + + return createOrUpdate(model, uriInfo, admin, updateExisting); + } + + // Do the actual namespace create or alter. + private Response createOrUpdate(final NamespacesInstanceModel model, final UriInfo uriInfo, + final Admin admin, final boolean updateExisting) { + NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace); + builder.addConfiguration(model.getProperties()); + if(model.getProperties().size() > 0){ + builder.addConfiguration(model.getProperties()); + } + NamespaceDescriptor nsd = builder.build(); + + try{ + if(updateExisting){ + admin.modifyNamespace(nsd); + }else{ + admin.createNamespace(nsd); + } + }catch (IOException e) { + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } + + servlet.getMetrics().incrementSucessfulPutRequests(1); + + return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() : + Response.created(uriInfo.getAbsolutePath()).build(); + } + + private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException{ + NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); + for(int i = 0; i < nd.length; i++){ + if(nd[i].getName().equals(namespaceName)){ + return true; + } + } + return false; + } + + /** + * Build a response for DELETE delete namespace. + * @param message value not used. + * @param headers value not used. + * @return response code. + */ + @DELETE + public Response deleteNoBody(final byte[] message, + final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); + } + if (servlet.isReadOnly()) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); + } + + try{ + Admin admin = servlet.getAdmin(); + if (!doesNamespaceExist(admin, namespace)){ + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT). + entity("Namespace '" + namespace + "' does not exists. Cannot " + + "drop namespace.").build(); + } + + admin.deleteNamespace(namespace); + servlet.getMetrics().incrementSucessfulDeleteRequests(1); + return Response.ok().build(); + + } catch (IOException e) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return processException(e); + } + } + + /** + * Dispatch to NamespaceInstanceResource for getting list of tables. + */ + @Path("tables") + public NamespacesInstanceResource getNamespaceInstanceResource( + final @PathParam("tables") String namespace) throws IOException { + return new NamespacesInstanceResource(this.namespace, true); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java new file mode 100755 index 00000000..47b98f6a --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java @@ -0,0 +1,89 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.rest.model.NamespacesModel; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.ServletContext; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import java.io.IOException; + +/** + * Implements REST GET list of all namespaces. + *

+ * /namespaces + *

+ */ +@InterfaceAudience.Private +public class NamespacesResource extends ResourceBase { + + private static final Logger LOG = LoggerFactory.getLogger(NamespacesResource.class); + + /** + * Constructor + * @throws IOException + */ + public NamespacesResource() throws IOException { + super(); + } + + /** + * Build a response for a list of all namespaces request. + * @param context servlet context + * @param uriInfo (JAX-RS context variable) request URL + * @return a response for a version request + */ + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + try { + NamespacesModel rowModel = null; + rowModel = new NamespacesModel(servlet.getAdmin()); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return Response.ok(rowModel).build(); + } catch (IOException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + throw new RuntimeException("Cannot retrieve list of namespaces."); + } + } + + /** + * Dispatch to NamespaceInstanceResource + */ + @Path("{namespace}") + public NamespacesInstanceResource getNamespaceInstanceResource( + final @PathParam("namespace") String namespace) throws IOException { + return new NamespacesInstanceResource(namespace); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java new file mode 100755 index 00000000..d5e4354e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -0,0 +1,46 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Common interface for models capable of supporting protobuf marshalling + * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and + * ProtobufMessageBodyProducer adapters. + */ +@InterfaceAudience.Private +public interface ProtobufMessageHandler { + /** + * @return the protobuf represention of the model + */ + byte[] createProtobufOutput(); + + /** + * Initialize the model from a protobuf representation. + * @param message the raw bytes of the protobuf message + * @return reference to self for convenience + * @throws IOException + */ + ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException; +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java new file mode 100755 index 00000000..1c137e4b --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.StreamingOutput; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ProtobufStreamingOutput implements StreamingOutput { + private static final Logger LOG = LoggerFactory.getLogger(ProtobufStreamingOutput.class); + + private String contentType; + private ResultScanner resultScanner; + private int limit; + private int fetchSize; + + protected ProtobufStreamingOutput(ResultScanner scanner, String type, int limit, int fetchSize) { + this.resultScanner = scanner; + this.contentType = type; + this.limit = limit; + this.fetchSize = fetchSize; + if (LOG.isTraceEnabled()) { + LOG.trace("Created StreamingOutput with content type = " + this.contentType + + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize); + } + } + + @Override + public void write(OutputStream outStream) throws IOException, WebApplicationException { + Result[] rowsToSend; + if(limit < fetchSize){ + rowsToSend = this.resultScanner.next(limit); + writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream); + } else { + int count = limit; + while (count > 0) { + if (count < fetchSize) { + rowsToSend = this.resultScanner.next(count); + } else { + rowsToSend = this.resultScanner.next(this.fetchSize); + } + if(rowsToSend.length == 0){ + break; + } + count = count - rowsToSend.length; + writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream); + } + } + } + + private void writeToStream(CellSetModel model, String contentType, OutputStream outStream) + throws IOException { + byte[] objectBytes = model.createProtobufOutput(); + outStream.write(Bytes.toBytes((short)objectBytes.length)); + outStream.write(objectBytes); + outStream.flush(); + if (LOG.isTraceEnabled()) { + LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully."); + } + } + + private CellSetModel createModelFromResults(Result[] results) { + CellSetModel cellSetModel = new CellSetModel(); + for (Result rs : results) { + byte[] rowKey = rs.getRow(); + RowModel rModel = new RowModel(rowKey); + List kvs = rs.listCells(); + for (Cell kv : kvs) { + rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv + .getTimestamp(), CellUtil.cloneValue(kv))); + } + cellSetModel.addRow(rModel); + } + return cellSetModel; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java new file mode 100755 index 00000000..ae91d11e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -0,0 +1,449 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import javax.servlet.DispatcherType; +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.log.HBaseMarkers; +import org.apache.hadoop.hbase.rest.filter.AuthFilter; +import org.apache.hadoop.hbase.rest.filter.GzipFilter; +import org.apache.hadoop.hbase.rest.filter.RestCsrfPreventionFilter; +import org.apache.hadoop.hbase.rest.http.ClickjackingPreventionFilter; +import org.apache.hadoop.hbase.rest.http.HttpServerUtil; +import org.apache.hadoop.hbase.rest.http.InfoServer; +import org.apache.hadoop.hbase.rest.http.SecurityHeadersFilter; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.util.DNS; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.hadoop.hbase.util.Strings; +import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.yetus.audience.InterfaceAudience; +import org.eclipse.jetty.http.HttpVersion; +import org.eclipse.jetty.jmx.MBeanContainer; +import org.eclipse.jetty.server.HttpConfiguration; +import org.eclipse.jetty.server.HttpConnectionFactory; +import org.eclipse.jetty.server.SecureRequestCustomizer; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.SslConnectionFactory; +import org.eclipse.jetty.servlet.FilterHolder; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.eclipse.jetty.util.thread.QueuedThreadPool; +import org.glassfish.jersey.server.ResourceConfig; +import org.glassfish.jersey.servlet.ServletContainer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; +import org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException; +import org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser; + +/** + * Main class for launching REST gateway as a servlet hosted by Jetty. + *

+ * The following options are supported: + *

    + *
  • -p --port : service port
  • + *
  • -ro --readonly : server mode
  • + *
+ */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) +public class RESTServer implements Constants { + static Logger LOG = LoggerFactory.getLogger("RESTServer"); + + static final String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled"; + static final boolean REST_CSRF_ENABLED_DEFAULT = false; + boolean restCSRFEnabled = false; + static final String REST_CSRF_CUSTOM_HEADER_KEY ="hbase.rest.csrf.custom.header"; + static final String REST_CSRF_CUSTOM_HEADER_DEFAULT = "X-XSRF-HEADER"; + static final String REST_CSRF_METHODS_TO_IGNORE_KEY = "hbase.rest.csrf.methods.to.ignore"; + static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; + public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login"; + static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k + + private static final String PATH_SPEC_ANY = "/*"; + + static final String REST_HTTP_ALLOW_OPTIONS_METHOD = "hbase.rest.http.allow.options.method"; + // HTTP OPTIONS method is commonly used in REST APIs for negotiation. So it is enabled by default. + private static boolean REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT = true; + static final String REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY = + "hbase.rest-csrf.browser-useragents-regex"; + + // HACK, making this static for AuthFilter to get at our configuration. Necessary for unit tests. + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value={"ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL"}, + justification="For testing") + public static Configuration conf = null; + private final UserProvider userProvider; + private Server server; + private InfoServer infoServer; + + public RESTServer(Configuration conf) { + RESTServer.conf = conf; + this.userProvider = UserProvider.instantiate(conf); + } + + private static void printUsageAndExit(Options options, int exitCode) { + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("hbase rest start", "", options, + "\nTo run the REST server as a daemon, execute " + + "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", true); + System.exit(exitCode); + } + + void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) { + restCSRFEnabled = conf.getBoolean(REST_CSRF_ENABLED_KEY, REST_CSRF_ENABLED_DEFAULT); + if (restCSRFEnabled) { + Map restCsrfParams = RestCsrfPreventionFilter + .getFilterParams(conf, "hbase.rest-csrf."); + FilterHolder holder = new FilterHolder(); + holder.setName("csrf"); + holder.setClassName(RestCsrfPreventionFilter.class.getName()); + holder.setInitParameters(restCsrfParams); + ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); + } + } + + private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, + Configuration conf) { + FilterHolder holder = new FilterHolder(); + holder.setName("clickjackingprevention"); + holder.setClassName(ClickjackingPreventionFilter.class.getName()); + holder.setInitParameters(ClickjackingPreventionFilter.getDefaultParameters(conf)); + ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); + } + + private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf) { + FilterHolder holder = new FilterHolder(); + holder.setName("securityheaders"); + holder.setClassName(SecurityHeadersFilter.class.getName()); + holder.setInitParameters(SecurityHeadersFilter.getDefaultParameters(conf)); + ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); + } + + // login the server principal (if using secure Hadoop) + private static Pair> loginServerPrincipal( + UserProvider userProvider, Configuration conf) throws Exception { + Class containerClass = ServletContainer.class; + if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) { + String machineName = Strings.domainNamePointerToHostName( + DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), + conf.get(REST_DNS_NAMESERVER, "default"))); + String keytabFilename = conf.get(REST_KEYTAB_FILE); + Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(), + REST_KEYTAB_FILE + " should be set if security is enabled"); + String principalConfig = conf.get(REST_KERBEROS_PRINCIPAL); + Preconditions.checkArgument(principalConfig != null && !principalConfig.isEmpty(), + REST_KERBEROS_PRINCIPAL + " should be set if security is enabled"); + // Hook for unit tests, this will log out any other user and mess up tests. + if (!conf.getBoolean(SKIP_LOGIN_KEY, false)) { + userProvider.login(REST_KEYTAB_FILE, REST_KERBEROS_PRINCIPAL, machineName); + } + if (conf.get(REST_AUTHENTICATION_TYPE) != null) { + containerClass = RESTServletContainer.class; + FilterHolder authFilter = new FilterHolder(); + authFilter.setClassName(AuthFilter.class.getName()); + authFilter.setName("AuthenticationFilter"); + return new Pair<>(authFilter,containerClass); + } + } + return new Pair<>(null, containerClass); + } + + private static void parseCommandLine(String[] args, Configuration conf) { + Options options = new Options(); + options.addOption("p", "port", true, "Port to bind to [default: " + DEFAULT_LISTEN_PORT + "]"); + options.addOption("ro", "readonly", false, "Respond only to GET HTTP " + + "method requests [default: false]"); + options.addOption("i", "infoport", true, "Port for WEB UI"); + + CommandLine commandLine = null; + try { + commandLine = new PosixParser().parse(options, args); + } catch (ParseException e) { + LOG.error("Could not parse: ", e); + printUsageAndExit(options, -1); + } + + // check for user-defined port setting, if so override the conf + if (commandLine != null && commandLine.hasOption("port")) { + String val = commandLine.getOptionValue("port"); + conf.setInt("hbase.rest.port", Integer.parseInt(val)); + if (LOG.isDebugEnabled()) { + LOG.debug("port set to " + val); + } + } + + // check if server should only process GET requests, if so override the conf + if (commandLine != null && commandLine.hasOption("readonly")) { + conf.setBoolean("hbase.rest.readonly", true); + if (LOG.isDebugEnabled()) { + LOG.debug("readonly set to true"); + } + } + + // check for user-defined info server port setting, if so override the conf + if (commandLine != null && commandLine.hasOption("infoport")) { + String val = commandLine.getOptionValue("infoport"); + conf.setInt("hbase.rest.info.port", Integer.parseInt(val)); + if (LOG.isDebugEnabled()) { + LOG.debug("WEB UI port set to " + val); + } + } + + if (commandLine != null && commandLine.hasOption("skipLogin")) { + conf.setBoolean(SKIP_LOGIN_KEY, true); + if (LOG.isDebugEnabled()) { + LOG.debug("Skipping Kerberos login for REST server"); + } + } + + List remainingArgs = commandLine != null ? commandLine.getArgList() : new ArrayList<>(); + if (remainingArgs.size() != 1) { + printUsageAndExit(options, 1); + } + + String command = remainingArgs.get(0); + if ("start".equals(command)) { + // continue and start container + } else if ("stop".equals(command)) { + System.exit(1); + } else { + printUsageAndExit(options, 1); + } + } + + + /** + * Runs the REST server. + */ + public synchronized void run() throws Exception { + Pair> pair = loginServerPrincipal( + userProvider, conf); + FilterHolder authFilter = pair.getFirst(); + Class containerClass = pair.getSecond(); + RESTServlet servlet = RESTServlet.getInstance(conf, userProvider); + + // set up the Jersey servlet container for Jetty + ResourceConfig application = new ResourceConfig(). + packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class); + // Using our custom ServletContainer is tremendously important. This is what makes sure the + // UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself. + ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application); + ServletHolder sh = new ServletHolder(servletContainer); + + // Set the default max thread number to 100 to limit + // the number of concurrent requests so that REST server doesn't OOM easily. + // Jetty set the default max thread number to 250, if we don't set it. + // + // Our default min thread number 2 is the same as that used by Jetty. + int maxThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MAX, 100); + int minThreads = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREADS_MIN, 2); + // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use + // bounded {@link ArrayBlockingQueue} with the given size + int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1); + int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); + QueuedThreadPool threadPool = queueSize > 0 ? + new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : + new QueuedThreadPool(maxThreads, minThreads, idleTimeout); + + this.server = new Server(threadPool); + + // Setup JMX + MBeanContainer mbContainer=new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); + server.addEventListener(mbContainer); + server.addBean(mbContainer); + + + String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0"); + int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080); + HttpConfiguration httpConfig = new HttpConfiguration(); + httpConfig.setSecureScheme("https"); + httpConfig.setSecurePort(servicePort); + httpConfig.setHeaderCacheSize(DEFAULT_HTTP_MAX_HEADER_SIZE); + httpConfig.setRequestHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE); + httpConfig.setResponseHeaderSize(DEFAULT_HTTP_MAX_HEADER_SIZE); + httpConfig.setSendServerVersion(false); + httpConfig.setSendDateHeader(false); + + ServerConnector serverConnector; + if (conf.getBoolean(REST_SSL_ENABLED, false)) { + HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig); + httpsConfig.addCustomizer(new SecureRequestCustomizer()); + + SslContextFactory sslCtxFactory = new SslContextFactory(); + String keystore = conf.get(REST_SSL_KEYSTORE_STORE); + String password = HBaseConfiguration.getPassword(conf, + REST_SSL_KEYSTORE_PASSWORD, null); + String keyPassword = HBaseConfiguration.getPassword(conf, + REST_SSL_KEYSTORE_KEYPASSWORD, password); + sslCtxFactory.setKeyStorePath(keystore); + sslCtxFactory.setKeyStorePassword(password); + sslCtxFactory.setKeyManagerPassword(keyPassword); + + String[] excludeCiphers = servlet.getConfiguration().getStrings( + REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + if (excludeCiphers.length != 0) { + sslCtxFactory.setExcludeCipherSuites(excludeCiphers); + } + String[] includeCiphers = servlet.getConfiguration().getStrings( + REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + if (includeCiphers.length != 0) { + sslCtxFactory.setIncludeCipherSuites(includeCiphers); + } + + String[] excludeProtocols = servlet.getConfiguration().getStrings( + REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + if (excludeProtocols.length != 0) { + sslCtxFactory.setExcludeProtocols(excludeProtocols); + } + String[] includeProtocols = servlet.getConfiguration().getStrings( + REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + if (includeProtocols.length != 0) { + sslCtxFactory.setIncludeProtocols(includeProtocols); + } + + serverConnector = new ServerConnector(server, + new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), + new HttpConnectionFactory(httpsConfig)); + } else { + serverConnector = new ServerConnector(server, new HttpConnectionFactory(httpConfig)); + } + + int acceptQueueSize = servlet.getConfiguration().getInt(REST_CONNECTOR_ACCEPT_QUEUE_SIZE, -1); + if (acceptQueueSize >= 0) { + serverConnector.setAcceptQueueSize(acceptQueueSize); + } + + serverConnector.setPort(servicePort); + serverConnector.setHost(host); + + server.addConnector(serverConnector); + server.setStopAtShutdown(true); + + // set up context + ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); + ctxHandler.addServlet(sh, PATH_SPEC_ANY); + if (authFilter != null) { + ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); + } + + // Load filters from configuration. + String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, + GzipFilter.class.getName()); + for (String filter : filterClasses) { + filter = filter.trim(); + ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); + } + addCSRFFilter(ctxHandler, conf); + addClickjackingPreventionFilter(ctxHandler, conf); + addSecurityHeadersFilter(ctxHandler, conf); + HttpServerUtil.constrainHttpMethods(ctxHandler, servlet.getConfiguration() + .getBoolean(REST_HTTP_ALLOW_OPTIONS_METHOD, REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT)); + + // Put up info server. + int port = conf.getInt("hbase.rest.info.port", 8085); + if (port >= 0) { + conf.setLong("startcode", System.currentTimeMillis()); + String a = conf.get("hbase.rest.info.bindAddress", "0.0.0.0"); + this.infoServer = new InfoServer("rest", a, port, false, conf); + this.infoServer.setAttribute("hbase.conf", conf); + this.infoServer.start(); + } + try { + // start server + server.start(); + } catch (Exception e) { + LOG.error(HBaseMarkers.FATAL, "Failed to start server", e); + throw e; + } + } + + public synchronized void join() throws Exception { + if (server == null) { + throw new IllegalStateException("Server is not running"); + } + server.join(); + } + + public synchronized void stop() throws Exception { + if (server == null) { + throw new IllegalStateException("Server is not running"); + } + server.stop(); + server = null; + RESTServlet.stop(); + } + + public synchronized int getPort() { + if (server == null) { + throw new IllegalStateException("Server is not running"); + } + return ((ServerConnector) server.getConnectors()[0]).getLocalPort(); + } + + @SuppressWarnings("deprecation") + public synchronized int getInfoPort() { + if (infoServer == null) { + throw new IllegalStateException("InfoServer is not running"); + } + return infoServer.getPort(); + } + + public Configuration getConf() { + return conf; + } + + /** + * The main method for the HBase rest server. + * @param args command-line arguments + * @throws Exception exception + */ + public static void main(String[] args) throws Exception { + LOG.info("***** STARTING service '" + RESTServer.class.getSimpleName() + "' *****"); + VersionInfo.logVersion(); + final Configuration conf = HBaseConfiguration.create(); + parseCommandLine(args, conf); + RESTServer server = new RESTServer(conf); + + try { + server.run(); + server.join(); + } catch (Exception e) { + System.exit(1); + } + + LOG.info("***** STOPPING service '" + RESTServer.class.getSimpleName() + "' *****"); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java new file mode 100755 index 00000000..4213fa9c --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -0,0 +1,177 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.rest.util.ConnectionCache; +import org.apache.hadoop.hbase.rest.util.JvmPauseMonitor; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.ParseFilter; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.ProxyUsers; + +/** + * Singleton class encapsulating global REST servlet state and functions. + */ +@InterfaceAudience.Private +public class RESTServlet implements Constants { + private static final Logger LOG = LoggerFactory.getLogger(RESTServlet.class); + private static RESTServlet INSTANCE; + private final Configuration conf; + private final MetricsREST metrics; + private final ConnectionCache connectionCache; + private final UserGroupInformation realUser; + private final JvmPauseMonitor pauseMonitor; + + public static final String CLEANUP_INTERVAL = "hbase.rest.connection.cleanup-interval"; + public static final String MAX_IDLETIME = "hbase.rest.connection.max-idletime"; + static final String HBASE_REST_SUPPORT_PROXYUSER = "hbase.rest.support.proxyuser"; + + UserGroupInformation getRealUser() { + return realUser; + } + + /** + * @return the RESTServlet singleton instance + */ + public synchronized static RESTServlet getInstance() { + assert(INSTANCE != null); + return INSTANCE; + } + + /** + * @return the ConnectionCache instance + */ + public ConnectionCache getConnectionCache() { + return connectionCache; + } + + /** + * @param conf Existing configuration to use in rest servlet + * @param userProvider the login user provider + * @return the RESTServlet singleton instance + * @throws IOException + */ + public synchronized static RESTServlet getInstance(Configuration conf, + UserProvider userProvider) throws IOException { + if (INSTANCE == null) { + INSTANCE = new RESTServlet(conf, userProvider); + } + return INSTANCE; + } + + public synchronized static void stop() { + if (INSTANCE != null) { + INSTANCE.shutdown(); + INSTANCE = null; + } + } + + /** + * Constructor with existing configuration + * @param conf existing configuration + * @param userProvider the login user provider + * @throws IOException + */ + RESTServlet(final Configuration conf, + final UserProvider userProvider) throws IOException { + this.realUser = userProvider.getCurrent().getUGI(); + this.conf = conf; + registerCustomFilter(conf); + + int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); + int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); + connectionCache = new ConnectionCache( + conf, userProvider, cleanInterval, maxIdleTime); + if (supportsProxyuser()) { + ProxyUsers.refreshSuperUserGroupsConfiguration(conf); + } + + metrics = new MetricsREST(); + + pauseMonitor = new JvmPauseMonitor(conf, metrics.getSource()); + pauseMonitor.start(); + } + + Admin getAdmin() throws IOException { + return connectionCache.getAdmin(); + } + + /** + * Caller closes the table afterwards. + */ + Table getTable(String tableName) throws IOException { + return connectionCache.getTable(tableName); + } + + Configuration getConfiguration() { + return conf; + } + + MetricsREST getMetrics() { + return metrics; + } + + /** + * Helper method to determine if server should + * only respond to GET HTTP method requests. + * @return boolean for server read-only state + */ + boolean isReadOnly() { + return getConfiguration().getBoolean("hbase.rest.readonly", false); + } + + void setEffectiveUser(String effectiveUser) { + connectionCache.setEffectiveUser(effectiveUser); + } + + /** + * Shutdown any services that need to stop + */ + void shutdown() { + if (pauseMonitor != null) pauseMonitor.stop(); + if (connectionCache != null) connectionCache.shutdown(); + } + + boolean supportsProxyuser() { + return conf.getBoolean(HBASE_REST_SUPPORT_PROXYUSER, false); + } + + private void registerCustomFilter(Configuration conf) { + String[] filterList = conf.getStrings(Constants.CUSTOM_FILTERS); + if (filterList != null) { + for (String filterClass : filterList) { + String[] filterPart = filterClass.split(":"); + if (filterPart.length != 2) { + LOG.warn( + "Invalid filter specification " + filterClass + " - skipping"); + } else { + ParseFilter.registerFilter(filterPart[0], filterPart[1]); + } + } + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java new file mode 100755 index 00000000..2b6a6c7f --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.yetus.audience.InterfaceAudience; +import org.glassfish.jersey.server.ResourceConfig; +import org.glassfish.jersey.servlet.ServletContainer; + +/** + * REST servlet container. It is used to get the remote request user + * without going through @HttpContext, so that we can minimize code changes. + */ +@InterfaceAudience.Private +public class RESTServletContainer extends ServletContainer { + private static final long serialVersionUID = -2474255003443394314L; + + public RESTServletContainer(ResourceConfig config) { + super(config); + } + + /** + * This container is used only if authentication and + * impersonation is enabled. The remote request user is used + * as a proxy user for impersonation in invoking any REST service. + */ + @Override + public void service(final HttpServletRequest request, + final HttpServletResponse response) throws ServletException, IOException { + final String doAsUserFromQuery = request.getParameter("doAs"); + RESTServlet servlet = RESTServlet.getInstance(); + if (doAsUserFromQuery != null) { + Configuration conf = servlet.getConfiguration(); + if (!servlet.supportsProxyuser()) { + throw new ServletException("Support for proxyuser is not configured"); + } + // Authenticated remote user is attempting to do 'doAs' proxy user. + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser()); + // create and attempt to authorize a proxy user (the client is attempting + // to do proxy user) + ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi); + // validate the proxy user authorization + try { + ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf); + } catch(AuthorizationException e) { + throw new ServletException(e.getMessage()); + } + servlet.setEffectiveUser(doAsUserFromQuery); + } else { + String effectiveUser = request.getRemoteUser(); + servlet.setEffectiveUser(effectiveUser); + } + super.service(request, response); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java new file mode 100755 index 00000000..b70c13d2 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -0,0 +1,108 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import javax.ws.rs.GET; +import javax.ws.rs.Produces; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.UriInfo; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.rest.model.TableInfoModel; +import org.apache.hadoop.hbase.rest.model.TableRegionModel; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class RegionsResource extends ResourceBase { + private static final Logger LOG = LoggerFactory.getLogger(RegionsResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + TableResource tableResource; + + /** + * Constructor + * @param tableResource + * @throws IOException + */ + public RegionsResource(TableResource tableResource) throws IOException { + super(); + this.tableResource = tableResource; + } + + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + try { + TableName tableName = TableName.valueOf(tableResource.getName()); + TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); + + Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration()); + List> regions = MetaTableAccessor + .getTableRegionsAndLocations(connection, tableName); + connection.close(); + for (Pair e: regions) { + RegionInfo hri = e.getFirst(); + ServerName addr = e.getSecond(); + model.add( + new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(), + hri.getStartKey(), hri.getEndKey(), addr.getAddress().toString())); + } + ResponseBuilder response = Response.ok(model); + response.cacheControl(cacheControl); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } catch (TableNotFoundException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF) + .build(); + } catch (IOException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.SERVICE_UNAVAILABLE) + .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) + .build(); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java new file mode 100755 index 00000000..a0deb7e7 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; +import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class ResourceBase implements Constants { + + RESTServlet servlet; + Class accessDeniedClazz; + + public ResourceBase() throws IOException { + servlet = RESTServlet.getInstance(); + try { + accessDeniedClazz = Class.forName("org.apache.hadoop.hbase.security.AccessDeniedException"); + } catch (ClassNotFoundException e) { + } + } + + protected Response processException(Throwable exp) { + Throwable curr = exp; + if(accessDeniedClazz != null) { + //some access denied exceptions are buried + while (curr != null) { + if(accessDeniedClazz.isAssignableFrom(curr.getClass())) { + throw new WebApplicationException( + Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF + + StringUtils.stringifyException(exp) + CRLF) + .build()); + } + curr = curr.getCause(); + } + } + //TableNotFound may also be buried one level deep + if (exp instanceof TableNotFoundException || + exp.getCause() instanceof TableNotFoundException) { + throw new WebApplicationException( + Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF + + StringUtils.stringifyException(exp) + CRLF) + .build()); + } + if (exp instanceof NoSuchColumnFamilyException){ + throw new WebApplicationException( + Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF + + StringUtils.stringifyException(exp) + CRLF) + .build()); + } + if (exp instanceof RuntimeException) { + throw new WebApplicationException( + Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request" + CRLF + + StringUtils.stringifyException(exp) + CRLF) + .build()); + } + if (exp instanceof RetriesExhaustedException) { + RetriesExhaustedException retryException = (RetriesExhaustedException) exp; + processException(retryException.getCause()); + } + throw new WebApplicationException( + Response.status(Response.Status.SERVICE_UNAVAILABLE) + .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF + + StringUtils.stringifyException(exp) + CRLF) + .build()); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java new file mode 100755 index 00000000..d48bcb45 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java @@ -0,0 +1,49 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.yetus.audience.InterfaceAudience; + +import java.io.IOException; +import java.util.Iterator; + +@InterfaceAudience.Private +public abstract class ResultGenerator implements Iterator { + + public static ResultGenerator fromRowSpec(final String table, + final RowSpec rowspec, final Filter filter, final boolean cacheBlocks) + throws IOException { + if (rowspec.isSingleRow()) { + return new RowResultGenerator(table, rowspec, filter, cacheBlocks); + } else { + return new ScannerResultGenerator(table, rowspec, filter, cacheBlocks); + } + } + + public static Filter buildFilter(final String filter) throws Exception { + return ScannerModel.buildFilter(filter); + } + + public abstract void putBack(Cell kv); + + public abstract void close(); +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java new file mode 100755 index 00000000..98217451 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -0,0 +1,110 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import javax.ws.rs.core.Response.ResponseBuilder; + +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.rest.model.TableListModel; +import org.apache.hadoop.hbase.rest.model.TableModel; + +@Path("/") +@InterfaceAudience.Private +public class RootResource extends ResourceBase { + private static final Logger LOG = LoggerFactory.getLogger(RootResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + /** + * Constructor + * @throws IOException + */ + public RootResource() throws IOException { + super(); + } + + private final TableListModel getTableList() throws IOException { + TableListModel tableList = new TableListModel(); + TableName[] tableNames = servlet.getAdmin().listTableNames(); + for (TableName name: tableNames) { + tableList.add(new TableModel(name.getNameAsString())); + } + return tableList; + } + + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + try { + ResponseBuilder response = Response.ok(getTableList()); + response.cacheControl(cacheControl); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return processException(e); + } + } + + @Path("status/cluster") + public StorageClusterStatusResource getClusterStatusResource() + throws IOException { + return new StorageClusterStatusResource(); + } + + @Path("version") + public VersionResource getVersionResource() throws IOException { + return new VersionResource(); + } + + @Path("{table}") + public TableResource getTableResource( + final @PathParam("table") String table) throws IOException { + return new TableResource(table); + } + + @Path("namespaces") + public NamespacesResource getNamespaceResource() throws IOException { + return new NamespacesResource(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java new file mode 100755 index 00000000..3ac74723 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -0,0 +1,889 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Produces; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.UriInfo; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.Cell.Type; +import org.apache.hadoop.hbase.CellBuilderFactory; +import org.apache.hadoop.hbase.CellBuilderType; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class RowResource extends ResourceBase { + private static final Logger LOG = LoggerFactory.getLogger(RowResource.class); + + private static final String CHECK_PUT = "put"; + private static final String CHECK_DELETE = "delete"; + private static final String CHECK_APPEND = "append"; + private static final String CHECK_INCREMENT = "increment"; + + private TableResource tableResource; + private RowSpec rowspec; + private String check = null; + private boolean returnResult = false; + + /** + * Constructor + * @param tableResource + * @param rowspec + * @param versions + * @param check + * @param returnResult + * @throws IOException + */ + public RowResource(TableResource tableResource, String rowspec, + String versions, String check, String returnResult) throws IOException { + super(); + this.tableResource = tableResource; + this.rowspec = new RowSpec(rowspec); + if (versions != null) { + this.rowspec.setMaxVersions(Integer.parseInt(versions)); + } + this.check = check; + if (returnResult != null) { + this.returnResult = Boolean.valueOf(returnResult); + } + } + + @GET + @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + MultivaluedMap params = uriInfo.getQueryParameters(); + try { + ResultGenerator generator = + ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, + !params.containsKey(NOCACHE_PARAM_NAME)); + if (!generator.hasNext()) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF) + .build(); + } + int count = 0; + CellSetModel model = new CellSetModel(); + Cell value = generator.next(); + byte[] rowKey = CellUtil.cloneRow(value); + RowModel rowModel = new RowModel(rowKey); + do { + if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) { + model.addRow(rowModel); + rowKey = CellUtil.cloneRow(value); + rowModel = new RowModel(rowKey); + } + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), + value.getTimestamp(), CellUtil.cloneValue(value))); + if (++count > rowspec.getMaxValues()) { + break; + } + value = generator.next(); + } while (value != null); + model.addRow(rowModel); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return Response.ok(model).build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } + } + + @GET + @Produces(MIMETYPE_BINARY) + public Response getBinary(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + } + servlet.getMetrics().incrementRequests(1); + // doesn't make sense to use a non specific coordinate as this can only + // return a single cell + if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + + "in the row. Using the 'Accept' header with one of these formats lets you " + + "retrieve the entire row if it has multiple columns: " + + // Same as the @Produces list for the get method. + MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + + MIMETYPE_PROTOBUF + ", " + MIMETYPE_PROTOBUF_IETF + + CRLF).build(); + } + MultivaluedMap params = uriInfo.getQueryParameters(); + try { + ResultGenerator generator = + ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, + !params.containsKey(NOCACHE_PARAM_NAME)); + if (!generator.hasNext()) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF) + .build(); + } + Cell value = generator.next(); + ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); + response.header("X-Timestamp", value.getTimestamp()); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return processException(e); + } + } + + Response update(final CellSetModel model, final boolean replace) { + servlet.getMetrics().incrementRequests(1); + if (servlet.isReadOnly()) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) + .build(); + } + + if (CHECK_PUT.equalsIgnoreCase(check)) { + return checkAndPut(model); + } else if (CHECK_DELETE.equalsIgnoreCase(check)) { + return checkAndDelete(model); + } else if (CHECK_APPEND.equalsIgnoreCase(check)) { + return append(model); + } else if (CHECK_INCREMENT.equalsIgnoreCase(check)) { + return increment(model); + } else if (check != null && check.length() > 0) { + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF) + .build(); + } + + Table table = null; + try { + List rows = model.getRows(); + List puts = new ArrayList<>(); + for (RowModel row: rows) { + byte[] key = row.getKey(); + if (key == null) { + key = rowspec.getRow(); + } + if (key == null) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF) + .build(); + } + Put put = new Put(key); + int i = 0; + for (CellModel cell: row.getCells()) { + byte[] col = cell.getColumn(); + if (col == null) try { + col = rowspec.getColumns()[i++]; + } catch (ArrayIndexOutOfBoundsException e) { + col = null; + } + if (col == null) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) + .build(); + } + byte [][] parts = CellUtil.parseColumn(col); + if (parts.length != 2) { + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) + .build(); + } + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(put.getRow()) + .setFamily(parts[0]) + .setQualifier(parts[1]) + .setTimestamp(cell.getTimestamp()) + .setType(Type.Put) + .setValue(cell.getValue()) + .build()); + } + puts.add(put); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + put.toString()); + } + } + table = servlet.getTable(tableResource.getName()); + table.put(puts); + ResponseBuilder response = Response.ok(); + servlet.getMetrics().incrementSucessfulPutRequests(1); + return response.build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } finally { + if (table != null) try { + table.close(); + } catch (IOException ioe) { + LOG.debug("Exception received while closing the table", ioe); + } + } + } + + // This currently supports only update of one row at a time. + Response updateBinary(final byte[] message, final HttpHeaders headers, + final boolean replace) { + servlet.getMetrics().incrementRequests(1); + if (servlet.isReadOnly()) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) + .build(); + } + Table table = null; + try { + byte[] row = rowspec.getRow(); + byte[][] columns = rowspec.getColumns(); + byte[] column = null; + if (columns != null) { + column = columns[0]; + } + long timestamp = HConstants.LATEST_TIMESTAMP; + List vals = headers.getRequestHeader("X-Row"); + if (vals != null && !vals.isEmpty()) { + row = Bytes.toBytes(vals.get(0)); + } + vals = headers.getRequestHeader("X-Column"); + if (vals != null && !vals.isEmpty()) { + column = Bytes.toBytes(vals.get(0)); + } + vals = headers.getRequestHeader("X-Timestamp"); + if (vals != null && !vals.isEmpty()) { + timestamp = Long.parseLong(vals.get(0)); + } + if (column == null) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) + .build(); + } + Put put = new Put(row); + byte parts[][] = CellUtil.parseColumn(column); + if (parts.length != 2) { + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) + .build(); + } + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(put.getRow()) + .setFamily(parts[0]) + .setQualifier(parts[1]) + .setTimestamp(timestamp) + .setType(Type.Put) + .setValue(message) + .build()); + table = servlet.getTable(tableResource.getName()); + table.put(put); + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + put.toString()); + } + servlet.getMetrics().incrementSucessfulPutRequests(1); + return Response.ok().build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } finally { + if (table != null) try { + table.close(); + } catch (IOException ioe) { + LOG.debug("Exception received while closing the table", ioe); + } + } + } + + @PUT + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response put(final CellSetModel model, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath() + + " " + uriInfo.getQueryParameters()); + } + return update(model, true); + } + + @PUT + @Consumes(MIMETYPE_BINARY) + public Response putBinary(final byte[] message, + final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + } + return updateBinary(message, headers, true); + } + + @POST + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response post(final CellSetModel model, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath() + + " " + uriInfo.getQueryParameters()); + } + return update(model, false); + } + + @POST + @Consumes(MIMETYPE_BINARY) + public Response postBinary(final byte[] message, + final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY); + } + return updateBinary(message, headers, false); + } + + @DELETE + public Response delete(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + if (servlet.isReadOnly()) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) + .build(); + } + Delete delete = null; + if (rowspec.hasTimestamp()) + delete = new Delete(rowspec.getRow(), rowspec.getTimestamp()); + else + delete = new Delete(rowspec.getRow()); + + for (byte[] column: rowspec.getColumns()) { + byte[][] split = CellUtil.parseColumn(column); + if (rowspec.hasTimestamp()) { + if (split.length == 1) { + delete.addFamily(split[0], rowspec.getTimestamp()); + } else if (split.length == 2) { + delete.addColumns(split[0], split[1], rowspec.getTimestamp()); + } else { + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) + .build(); + } + } else { + if (split.length == 1) { + delete.addFamily(split[0]); + } else if (split.length == 2) { + delete.addColumns(split[0], split[1]); + } else { + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) + .build(); + } + } + } + Table table = null; + try { + table = servlet.getTable(tableResource.getName()); + table.delete(delete); + servlet.getMetrics().incrementSucessfulDeleteRequests(1); + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + delete.toString()); + } + } catch (Exception e) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return processException(e); + } finally { + if (table != null) try { + table.close(); + } catch (IOException ioe) { + LOG.debug("Exception received while closing the table", ioe); + } + } + return Response.ok().build(); + } + + /** + * Validates the input request parameters, parses columns from CellSetModel, + * and invokes checkAndPut on HTable. + * + * @param model instance of CellSetModel + * @return Response 200 OK, 304 Not modified, 400 Bad request + */ + Response checkAndPut(final CellSetModel model) { + Table table = null; + try { + table = servlet.getTable(tableResource.getName()); + if (model.getRows().size() != 1) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); + } + + RowModel rowModel = model.getRows().get(0); + byte[] key = rowModel.getKey(); + if (key == null) { + key = rowspec.getRow(); + } + + List cellModels = rowModel.getCells(); + int cellModelCount = cellModels.size(); + if (key == null || cellModelCount <= 1) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response + .status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT) + .entity( + "Bad request: Either row key is null or no data found for columns specified." + CRLF) + .build(); + } + + Put put = new Put(key); + boolean retValue; + CellModel valueToCheckCell = cellModels.get(cellModelCount - 1); + byte[] valueToCheckColumn = valueToCheckCell.getColumn(); + byte[][] valueToPutParts = CellUtil.parseColumn(valueToCheckColumn); + if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) { + CellModel valueToPutCell = null; + + // Copy all the cells to the Put request + // and track if the check cell's latest value is also sent + for (int i = 0, n = cellModelCount - 1; i < n ; i++) { + CellModel cell = cellModels.get(i); + byte[] col = cell.getColumn(); + + if (col == null) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) + .build(); + } + + byte [][] parts = CellUtil.parseColumn(col); + + if (parts.length != 2) { + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) + .build(); + } + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(put.getRow()) + .setFamily(parts[0]) + .setQualifier(parts[1]) + .setTimestamp(cell.getTimestamp()) + .setType(Type.Put) + .setValue(cell.getValue()) + .build()); + if(Bytes.equals(col, + valueToCheckCell.getColumn())) { + valueToPutCell = cell; + } + } + + if (valueToPutCell == null) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: The column to put and check do not match." + CRLF).build(); + } else { + retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1]) + .ifEquals(valueToCheckCell.getValue()).thenPut(put); + } + } else { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) + .build(); + } + + if (LOG.isTraceEnabled()) { + LOG.trace("CHECK-AND-PUT " + put.toString() + ", returns " + retValue); + } + if (!retValue) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.NOT_MODIFIED) + .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF) + .build(); + } + ResponseBuilder response = Response.ok(); + servlet.getMetrics().incrementSucessfulPutRequests(1); + return response.build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } finally { + if (table != null) try { + table.close(); + } catch (IOException ioe) { + LOG.debug("Exception received while closing the table", ioe); + } + } + } + + /** + * Validates the input request parameters, parses columns from CellSetModel, + * and invokes checkAndDelete on HTable. + * + * @param model instance of CellSetModel + * @return Response 200 OK, 304 Not modified, 400 Bad request + */ + Response checkAndDelete(final CellSetModel model) { + Table table = null; + Delete delete = null; + try { + table = servlet.getTable(tableResource.getName()); + if (model.getRows().size() != 1) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) + .build(); + } + RowModel rowModel = model.getRows().get(0); + byte[] key = rowModel.getKey(); + if (key == null) { + key = rowspec.getRow(); + } + if (key == null) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) + .build(); + } + + List cellModels = rowModel.getCells(); + int cellModelCount = cellModels.size(); + + delete = new Delete(key); + boolean retValue; + CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1); + byte[] valueToDeleteColumn = valueToDeleteCell.getColumn(); + if (valueToDeleteColumn == null) { + try { + valueToDeleteColumn = rowspec.getColumns()[0]; + } catch (final ArrayIndexOutOfBoundsException e) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF) + .build(); + } + } + + byte[][] parts ; + // Copy all the cells to the Delete request if extra cells are sent + if(cellModelCount > 1) { + for (int i = 0, n = cellModelCount - 1; i < n; i++) { + CellModel cell = cellModels.get(i); + byte[] col = cell.getColumn(); + + if (col == null) { + servlet.getMetrics().incrementFailedPutRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) + .build(); + } + + parts = CellUtil.parseColumn(col); + + if (parts.length == 1) { + // Only Column Family is specified + delete.addFamily(parts[0], cell.getTimestamp()); + } else if (parts.length == 2) { + delete.addColumn(parts[0], parts[1], cell.getTimestamp()); + } else { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT) + .entity("Bad request: Column to delete incorrectly specified." + CRLF) + .build(); + } + } + } + + parts = CellUtil.parseColumn(valueToDeleteColumn); + if (parts.length == 2) { + if (parts[1].length != 0) { + // To support backcompat of deleting a cell + // if that is the only cell passed to the rest api + if(cellModelCount == 1) { + delete.addColumns(parts[0], parts[1]); + } + retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1]) + .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + } else { + // The case of empty qualifier. + if(cellModelCount == 1) { + delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY)); + } + retValue = table.checkAndMutate(key, parts[0]) + .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + } + } else { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column to check incorrectly specified." + CRLF) + .build(); + } + + if (LOG.isTraceEnabled()) { + LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " + + retValue); + } + + if (!retValue) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return Response.status(Response.Status.NOT_MODIFIED) + .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF) + .build(); + } + ResponseBuilder response = Response.ok(); + servlet.getMetrics().incrementSucessfulDeleteRequests(1); + return response.build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return processException(e); + } finally { + if (table != null) try { + table.close(); + } catch (IOException ioe) { + LOG.debug("Exception received while closing the table", ioe); + } + } + } + + /** + * Validates the input request parameters, parses columns from CellSetModel, + * and invokes Append on HTable. + * + * @param model instance of CellSetModel + * @return Response 200 OK, 304 Not modified, 400 Bad request + */ + Response append(final CellSetModel model) { + Table table = null; + Append append = null; + try { + table = servlet.getTable(tableResource.getName()); + if (model.getRows().size() != 1) { + servlet.getMetrics().incrementFailedAppendRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) + .build(); + } + RowModel rowModel = model.getRows().get(0); + byte[] key = rowModel.getKey(); + if (key == null) { + key = rowspec.getRow(); + } + if (key == null) { + servlet.getMetrics().incrementFailedAppendRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) + .build(); + } + + append = new Append(key); + append.setReturnResults(returnResult); + int i = 0; + for (CellModel cell: rowModel.getCells()) { + byte[] col = cell.getColumn(); + if (col == null) { + try { + col = rowspec.getColumns()[i++]; + } catch (ArrayIndexOutOfBoundsException e) { + col = null; + } + } + if (col == null) { + servlet.getMetrics().incrementFailedAppendRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) + .build(); + } + byte [][] parts = CellUtil.parseColumn(col); + if (parts.length != 2) { + servlet.getMetrics().incrementFailedAppendRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) + .build(); + } + append.addColumn(parts[0], parts[1], cell.getValue()); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("APPEND " + append.toString()); + } + Result result = table.append(append); + if (returnResult) { + if (result.isEmpty()) { + servlet.getMetrics().incrementFailedAppendRequests(1); + return Response.status(Response.Status.NOT_MODIFIED) + .type(MIMETYPE_TEXT).entity("Append return empty." + CRLF) + .build(); + } + + CellSetModel rModel = new CellSetModel(); + RowModel rRowModel = new RowModel(result.getRow()); + for (Cell cell : result.listCells()) { + rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), + cell.getTimestamp(), CellUtil.cloneValue(cell))); + } + rModel.addRow(rRowModel); + servlet.getMetrics().incrementSucessfulAppendRequests(1); + return Response.ok(rModel).build(); + } + servlet.getMetrics().incrementSucessfulAppendRequests(1); + return Response.ok().build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedAppendRequests(1); + return processException(e); + } finally { + if (table != null) try { + table.close(); + } catch (IOException ioe) { + LOG.debug("Exception received while closing the table" + table.getName(), ioe); + } + } + } + + /** + * Validates the input request parameters, parses columns from CellSetModel, + * and invokes Increment on HTable. + * + * @param model instance of CellSetModel + * @return Response 200 OK, 304 Not modified, 400 Bad request + */ + Response increment(final CellSetModel model) { + Table table = null; + Increment increment = null; + try { + table = servlet.getTable(tableResource.getName()); + if (model.getRows().size() != 1) { + servlet.getMetrics().incrementFailedIncrementRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) + .build(); + } + RowModel rowModel = model.getRows().get(0); + byte[] key = rowModel.getKey(); + if (key == null) { + key = rowspec.getRow(); + } + if (key == null) { + servlet.getMetrics().incrementFailedIncrementRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) + .build(); + } + + increment = new Increment(key); + increment.setReturnResults(returnResult); + int i = 0; + for (CellModel cell: rowModel.getCells()) { + byte[] col = cell.getColumn(); + if (col == null) { + try { + col = rowspec.getColumns()[i++]; + } catch (ArrayIndexOutOfBoundsException e) { + col = null; + } + } + if (col == null) { + servlet.getMetrics().incrementFailedIncrementRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) + .build(); + } + byte [][] parts = CellUtil.parseColumn(col); + if (parts.length != 2) { + servlet.getMetrics().incrementFailedIncrementRequests(1); + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) + .build(); + } + increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue()))); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("INCREMENT " + increment.toString()); + } + Result result = table.increment(increment); + + if (returnResult) { + if (result.isEmpty()) { + servlet.getMetrics().incrementFailedIncrementRequests(1); + return Response.status(Response.Status.NOT_MODIFIED) + .type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF) + .build(); + } + + CellSetModel rModel = new CellSetModel(); + RowModel rRowModel = new RowModel(result.getRow()); + for (Cell cell : result.listCells()) { + rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), + cell.getTimestamp(), CellUtil.cloneValue(cell))); + } + rModel.addRow(rowModel); + servlet.getMetrics().incrementSucessfulIncrementRequests(1); + return Response.ok(rModel).build(); + } + + ResponseBuilder response = Response.ok(); + servlet.getMetrics().incrementSucessfulIncrementRequests(1); + return response.build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedIncrementRequests(1); + return processException(e); + } finally { + if (table != null) try { + table.close(); + } catch (IOException ioe) { + LOG.debug("Exception received while closing the table " + table.getName(), ioe); + } + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java new file mode 100755 index 00000000..3d81c414 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java @@ -0,0 +1,131 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.Iterator; +import java.util.NoSuchElementException; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.security.AccessDeniedException; + +import org.apache.hadoop.util.StringUtils; + +import org.apache.yetus.audience.InterfaceAudience; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class RowResultGenerator extends ResultGenerator { + private static final Logger LOG = LoggerFactory.getLogger(RowResultGenerator.class); + + private Iterator valuesI; + private Cell cache; + + public RowResultGenerator(final String tableName, final RowSpec rowspec, + final Filter filter, final boolean cacheBlocks) + throws IllegalArgumentException, IOException { + try (Table table = RESTServlet.getInstance().getTable(tableName)) { + Get get = new Get(rowspec.getRow()); + if (rowspec.hasColumns()) { + for (byte[] col : rowspec.getColumns()) { + byte[][] split = CellUtil.parseColumn(col); + if (split.length == 1) { + get.addFamily(split[0]); + } else if (split.length == 2) { + get.addColumn(split[0], split[1]); + } else { + throw new IllegalArgumentException("Invalid column specifier."); + } + } + } + get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); + get.readVersions(rowspec.getMaxVersions()); + if (filter != null) { + get.setFilter(filter); + } + get.setCacheBlocks(cacheBlocks); + Result result = table.get(get); + if (result != null && !result.isEmpty()) { + valuesI = result.listCells().iterator(); + } + } catch (DoNotRetryIOException e) { + // Warn here because Stargate will return 404 in the case if multiple + // column families were specified but one did not exist -- currently + // HBase will fail the whole Get. + // Specifying multiple columns in a URI should be uncommon usage but + // help to avoid confusion by leaving a record of what happened here in + // the log. + LOG.warn(StringUtils.stringifyException(e)); + // Lets get the exception rethrown to get a more meaningful error message than 404 + if (e instanceof AccessDeniedException) { + throw e; + } + } + } + + @Override + public void close() { + } + + @Override + public boolean hasNext() { + if (cache != null) { + return true; + } + if (valuesI == null) { + return false; + } + return valuesI.hasNext(); + } + + @Override + public Cell next() { + if (cache != null) { + Cell kv = cache; + cache = null; + return kv; + } + if (valuesI == null) { + return null; + } + try { + return valuesI.next(); + } catch (NoSuchElementException e) { + return null; + } + } + + @Override + public void putBack(Cell kv) { + this.cache = kv; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("remove not supported"); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java new file mode 100755 index 00000000..c510c9ed --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -0,0 +1,407 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.TreeSet; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Parses a path based row/column/timestamp specification into its component + * elements. + *

+ * + */ +@InterfaceAudience.Private +public class RowSpec { + public static final long DEFAULT_START_TIMESTAMP = 0; + public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE; + + private byte[] row = HConstants.EMPTY_START_ROW; + private byte[] endRow = null; + private TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); + private List labels = new ArrayList<>(); + private long startTime = DEFAULT_START_TIMESTAMP; + private long endTime = DEFAULT_END_TIMESTAMP; + private int maxVersions = 1; + private int maxValues = Integer.MAX_VALUE; + + public RowSpec(String path) throws IllegalArgumentException { + int i = 0; + while (path.charAt(i) == '/') { + i++; + } + i = parseRowKeys(path, i); + i = parseColumns(path, i); + i = parseTimestamp(path, i); + i = parseQueryParams(path, i); + } + + private int parseRowKeys(final String path, int i) + throws IllegalArgumentException { + String startRow = null, endRow = null; + try { + StringBuilder sb = new StringBuilder(); + char c; + while (i < path.length() && (c = path.charAt(i)) != '/') { + sb.append(c); + i++; + } + i++; + String row = startRow = sb.toString(); + int idx = startRow.indexOf(','); + if (idx != -1) { + startRow = URLDecoder.decode(row.substring(0, idx), + HConstants.UTF8_ENCODING); + endRow = URLDecoder.decode(row.substring(idx + 1), + HConstants.UTF8_ENCODING); + } else { + startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING); + } + } catch (IndexOutOfBoundsException e) { + throw new IllegalArgumentException(e); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + // HBase does not support wildcards on row keys so we will emulate a + // suffix glob by synthesizing appropriate start and end row keys for + // table scanning + if (startRow.charAt(startRow.length() - 1) == '*') { + if (endRow != null) + throw new IllegalArgumentException("invalid path: start row "+ + "specified with wildcard"); + this.row = Bytes.toBytes(startRow.substring(0, + startRow.lastIndexOf("*"))); + this.endRow = new byte[this.row.length + 1]; + System.arraycopy(this.row, 0, this.endRow, 0, this.row.length); + this.endRow[this.row.length] = (byte)255; + } else { + this.row = Bytes.toBytes(startRow.toString()); + if (endRow != null) { + this.endRow = Bytes.toBytes(endRow.toString()); + } + } + return i; + } + + private int parseColumns(final String path, int i) throws IllegalArgumentException { + if (i >= path.length()) { + return i; + } + try { + char c; + StringBuilder column = new StringBuilder(); + while (i < path.length() && (c = path.charAt(i)) != '/') { + if (c == ',') { + if (column.length() < 1) { + throw new IllegalArgumentException("invalid path"); + } + String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING); + this.columns.add(Bytes.toBytes(s)); + column.setLength(0); + i++; + continue; + } + column.append(c); + i++; + } + i++; + // trailing list entry + if (column.length() > 0) { + String s = URLDecoder.decode(column.toString(), HConstants.UTF8_ENCODING); + this.columns.add(Bytes.toBytes(s)); + } + } catch (IndexOutOfBoundsException e) { + throw new IllegalArgumentException(e); + } catch (UnsupportedEncodingException e) { + // shouldn't happen + throw new RuntimeException(e); + } + return i; + } + + private int parseTimestamp(final String path, int i) + throws IllegalArgumentException { + if (i >= path.length()) { + return i; + } + long time0 = 0, time1 = 0; + try { + char c = 0; + StringBuilder stamp = new StringBuilder(); + while (i < path.length()) { + c = path.charAt(i); + if (c == '/' || c == ',') { + break; + } + stamp.append(c); + i++; + } + try { + time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), + HConstants.UTF8_ENCODING)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(e); + } + if (c == ',') { + stamp = new StringBuilder(); + i++; + while (i < path.length() && ((c = path.charAt(i)) != '/')) { + stamp.append(c); + i++; + } + try { + time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), + HConstants.UTF8_ENCODING)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(e); + } + } + if (c == '/') { + i++; + } + } catch (IndexOutOfBoundsException e) { + throw new IllegalArgumentException(e); + } catch (UnsupportedEncodingException e) { + // shouldn't happen + throw new RuntimeException(e); + } + if (time1 != 0) { + startTime = time0; + endTime = time1; + } else { + endTime = time0; + } + return i; + } + + private int parseQueryParams(final String path, int i) { + if (i >= path.length()) { + return i; + } + StringBuilder query = new StringBuilder(); + try { + query.append(URLDecoder.decode(path.substring(i), + HConstants.UTF8_ENCODING)); + } catch (UnsupportedEncodingException e) { + // should not happen + throw new RuntimeException(e); + } + i += query.length(); + int j = 0; + while (j < query.length()) { + char c = query.charAt(j); + if (c != '?' && c != '&') { + break; + } + if (++j > query.length()) { + throw new IllegalArgumentException("malformed query parameter"); + } + char what = query.charAt(j); + if (++j > query.length()) { + break; + } + c = query.charAt(j); + if (c != '=') { + throw new IllegalArgumentException("malformed query parameter"); + } + if (++j > query.length()) { + break; + } + switch (what) { + case 'm': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); + } + maxVersions = Integer.parseInt(sb.toString()); + } break; + case 'n': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); + } + maxValues = Integer.parseInt(sb.toString()); + } break; + default: + throw new IllegalArgumentException("unknown parameter '" + c + "'"); + } + } + return i; + } + + public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, + long startTime, long endTime, int maxVersions) { + this.row = startRow; + this.endRow = endRow; + if (columns != null) { + Collections.addAll(this.columns, columns); + } + this.startTime = startTime; + this.endTime = endTime; + this.maxVersions = maxVersions; + } + + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, + long startTime, long endTime, int maxVersions, Collection labels) { + this(startRow, endRow, columns, startTime, endTime, maxVersions); + if(labels != null) { + this.labels.addAll(labels); + } + } + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, + long startTime, long endTime, int maxVersions) { + this.row = startRow; + this.endRow = endRow; + if (columns != null) { + this.columns.addAll(columns); + } + this.startTime = startTime; + this.endTime = endTime; + this.maxVersions = maxVersions; + } + + public boolean isSingleRow() { + return endRow == null; + } + + public int getMaxVersions() { + return maxVersions; + } + + public void setMaxVersions(final int maxVersions) { + this.maxVersions = maxVersions; + } + + public int getMaxValues() { + return maxValues; + } + + public void setMaxValues(final int maxValues) { + this.maxValues = maxValues; + } + + public boolean hasColumns() { + return !columns.isEmpty(); + } + + public boolean hasLabels() { + return !labels.isEmpty(); + } + + public byte[] getRow() { + return row; + } + + public byte[] getStartRow() { + return row; + } + + public boolean hasEndRow() { + return endRow != null; + } + + public byte[] getEndRow() { + return endRow; + } + + public void addColumn(final byte[] column) { + columns.add(column); + } + + public byte[][] getColumns() { + return columns.toArray(new byte[columns.size()][]); + } + + public List getLabels() { + return labels; + } + + public boolean hasTimestamp() { + return (startTime == 0) && (endTime != Long.MAX_VALUE); + } + + public long getTimestamp() { + return endTime; + } + + public long getStartTime() { + return startTime; + } + + public void setStartTime(final long startTime) { + this.startTime = startTime; + } + + public long getEndTime() { + return endTime; + } + + public void setEndTime(long endTime) { + this.endTime = endTime; + } + + @Override + public String toString() { + StringBuilder result = new StringBuilder(); + result.append("{startRow => '"); + if (row != null) { + result.append(Bytes.toString(row)); + } + result.append("', endRow => '"); + if (endRow != null) { + result.append(Bytes.toString(endRow)); + } + result.append("', columns => ["); + for (byte[] col: columns) { + result.append(" '"); + result.append(Bytes.toString(col)); + result.append("'"); + } + result.append(" ], startTime => "); + result.append(Long.toString(startTime)); + result.append(", endTime => "); + result.append(Long.toString(endTime)); + result.append(", maxVersions => "); + result.append(Integer.toString(maxVersions)); + result.append(", maxValues => "); + result.append(Integer.toString(maxValues)); + result.append("}"); + return result.toString(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java new file mode 100755 index 00000000..4a8f0bea --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -0,0 +1,215 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.Base64; + +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.UriInfo; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.util.Bytes; + +@InterfaceAudience.Private +public class ScannerInstanceResource extends ResourceBase { + private static final Logger LOG = + LoggerFactory.getLogger(ScannerInstanceResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + ResultGenerator generator = null; + String id = null; + int batch = 1; + + public ScannerInstanceResource() throws IOException { } + + public ScannerInstanceResource(String table, String id, + ResultGenerator generator, int batch) throws IOException { + this.id = id; + this.generator = generator; + this.batch = batch; + } + + @GET + @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context UriInfo uriInfo, + @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + if (generator == null) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF) + .build(); + } else { + // Updated the connection access time for each client next() call + RESTServlet.getInstance().getConnectionCache().updateConnectionAccessTime(); + } + CellSetModel model = new CellSetModel(); + RowModel rowModel = null; + byte[] rowKey = null; + int limit = batch; + if (maxValues > 0) { + limit = maxValues; + } + int count = limit; + do { + Cell value = null; + try { + value = generator.next(); + } catch (IllegalStateException e) { + if (ScannerResource.delete(id)) { + servlet.getMetrics().incrementSucessfulDeleteRequests(1); + } else { + servlet.getMetrics().incrementFailedDeleteRequests(1); + } + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.GONE) + .type(MIMETYPE_TEXT).entity("Gone" + CRLF) + .build(); + } catch (IllegalArgumentException e) { + Throwable t = e.getCause(); + if (t instanceof TableNotFoundException) { + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF) + .build(); + } + throw e; + } + if (value == null) { + if (LOG.isTraceEnabled()) { + LOG.trace("generator exhausted"); + } + // respond with 204 (No Content) if an empty cell set would be + // returned + if (count == limit) { + return Response.noContent().build(); + } + break; + } + if (rowKey == null) { + rowKey = CellUtil.cloneRow(value); + rowModel = new RowModel(rowKey); + } + if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) { + // if maxRows was given as a query param, stop if we would exceed the + // specified number of rows + if (maxRows > 0) { + if (--maxRows == 0) { + generator.putBack(value); + break; + } + } + model.addRow(rowModel); + rowKey = CellUtil.cloneRow(value); + rowModel = new RowModel(rowKey); + } + rowModel.addCell( + new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), + value.getTimestamp(), CellUtil.cloneValue(value))); + } while (--count > 0); + model.addRow(rowModel); + ResponseBuilder response = Response.ok(model); + response.cacheControl(cacheControl); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } + + @GET + @Produces(MIMETYPE_BINARY) + public Response getBinary(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + + MIMETYPE_BINARY); + } + servlet.getMetrics().incrementRequests(1); + try { + Cell value = generator.next(); + if (value == null) { + if (LOG.isTraceEnabled()) { + LOG.trace("generator exhausted"); + } + return Response.noContent().build(); + } + ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); + response.cacheControl(cacheControl); + response.header("X-Row", Bytes.toString(Base64.getEncoder().encode( + CellUtil.cloneRow(value)))); + response.header("X-Column", Bytes.toString(Base64.getEncoder().encode( + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); + response.header("X-Timestamp", value.getTimestamp()); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } catch (IllegalStateException e) { + if (ScannerResource.delete(id)) { + servlet.getMetrics().incrementSucessfulDeleteRequests(1); + } else { + servlet.getMetrics().incrementFailedDeleteRequests(1); + } + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.GONE) + .type(MIMETYPE_TEXT).entity("Gone" + CRLF) + .build(); + } + } + + @DELETE + public Response delete(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) + .build(); + } + if (ScannerResource.delete(id)) { + servlet.getMetrics().incrementSucessfulDeleteRequests(1); + } else { + servlet.getMetrics().incrementFailedDeleteRequests(1); + } + return Response.ok().build(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java new file mode 100755 index 00000000..f9b2d13b --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java @@ -0,0 +1,167 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.net.URI; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import javax.ws.rs.Consumes; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriBuilder; +import javax.ws.rs.core.UriInfo; + +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonMappingException; + +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.rest.model.ScannerModel; + +@InterfaceAudience.Private +public class ScannerResource extends ResourceBase { + + private static final Logger LOG = LoggerFactory.getLogger(ScannerResource.class); + + static final Map scanners = + Collections.synchronizedMap(new HashMap()); + + TableResource tableResource; + + /** + * Constructor + * @param tableResource + * @throws IOException + */ + public ScannerResource(TableResource tableResource)throws IOException { + super(); + this.tableResource = tableResource; + } + + static boolean delete(final String id) { + ScannerInstanceResource instance = scanners.remove(id); + if (instance != null) { + instance.generator.close(); + return true; + } else { + return false; + } + } + + Response update(final ScannerModel model, final boolean replace, + final UriInfo uriInfo) { + servlet.getMetrics().incrementRequests(1); + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) + .build(); + } + byte[] endRow = model.hasEndRow() ? model.getEndRow() : null; + RowSpec spec = null; + if (model.getLabels() != null) { + spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(), + model.getEndTime(), model.getMaxVersions(), model.getLabels()); + } else { + spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(), + model.getEndTime(), model.getMaxVersions()); + } + + try { + Filter filter = ScannerResultGenerator.buildFilterFromModel(model); + String tableName = tableResource.getName(); + ScannerResultGenerator gen = + new ScannerResultGenerator(tableName, spec, filter, model.getCaching(), + model.getCacheBlocks(), model.getLimit()); + String id = gen.getID(); + ScannerInstanceResource instance = + new ScannerInstanceResource(tableName, id, gen, model.getBatch()); + scanners.put(id, instance); + if (LOG.isTraceEnabled()) { + LOG.trace("new scanner: " + id); + } + UriBuilder builder = uriInfo.getAbsolutePathBuilder(); + URI uri = builder.path(id).build(); + servlet.getMetrics().incrementSucessfulPutRequests(1); + return Response.created(uri).build(); + } catch (Exception e) { + LOG.error("Exception occurred while processing " + uriInfo.getAbsolutePath() + " : ", e); + servlet.getMetrics().incrementFailedPutRequests(1); + if (e instanceof TableNotFoundException) { + return Response.status(Response.Status.NOT_FOUND) + .type(MIMETYPE_TEXT).entity("Not found" + CRLF) + .build(); + } else if (e instanceof RuntimeException + || e instanceof JsonMappingException | e instanceof JsonParseException) { + return Response.status(Response.Status.BAD_REQUEST) + .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) + .build(); + } + return Response.status(Response.Status.SERVICE_UNAVAILABLE) + .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) + .build(); + } + } + + @PUT + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response put(final ScannerModel model, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); + } + return update(model, true, uriInfo); + } + + @POST + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response post(final ScannerModel model, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("POST " + uriInfo.getAbsolutePath()); + } + return update(model, false, uriInfo); + } + + @Path("{scanner: .+}") + public ScannerInstanceResource getScannerInstanceResource( + final @PathParam("scanner") String id) throws IOException { + ScannerInstanceResource instance = scanners.get(id); + if (instance == null) { + servlet.getMetrics().incrementFailedGetRequests(1); + return new ScannerInstanceResource(); + } else { + servlet.getMetrics().incrementSucessfulGetRequests(1); + } + return instance; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java new file mode 100755 index 00000000..304930c4 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -0,0 +1,210 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.hadoop.hbase.security.visibility.Authorizations; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class ScannerResultGenerator extends ResultGenerator { + + private static final Logger LOG = + LoggerFactory.getLogger(ScannerResultGenerator.class); + + public static Filter buildFilterFromModel(final ScannerModel model) + throws Exception { + String filter = model.getFilter(); + if (filter == null || filter.length() == 0) { + return null; + } + return buildFilter(filter); + } + + private String id; + private Iterator rowI; + private Cell cache; + private ResultScanner scanner; + private Result cached; + + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, + final Filter filter, final boolean cacheBlocks) + throws IllegalArgumentException, IOException { + this(tableName, rowspec, filter, -1, cacheBlocks); + } + + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, + final Filter filter, final int caching, final boolean cacheBlocks) + throws IllegalArgumentException, IOException { + this(tableName, rowspec, filter, caching, cacheBlocks, -1); + } + + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, + final Filter filter, final int caching ,final boolean cacheBlocks, int limit) throws IOException { + Table table = RESTServlet.getInstance().getTable(tableName); + try { + Scan scan; + if (rowspec.hasEndRow()) { + scan = new Scan().withStartRow(rowspec.getStartRow()).withStopRow(rowspec.getEndRow()); + } else { + scan = new Scan().withStartRow(rowspec.getStartRow()); + } + if (rowspec.hasColumns()) { + byte[][] columns = rowspec.getColumns(); + for (byte[] column: columns) { + byte[][] split = CellUtil.parseColumn(column); + if (split.length == 1) { + scan.addFamily(split[0]); + } else if (split.length == 2) { + scan.addColumn(split[0], split[1]); + } else { + throw new IllegalArgumentException("Invalid familyAndQualifier provided."); + } + } + } + scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); + scan.readVersions(rowspec.getMaxVersions()); + if (filter != null) { + scan.setFilter(filter); + } + if (caching > 0 ) { + scan.setCaching(caching); + } + if (limit > 0) { + scan.setLimit(limit); + } + scan.setCacheBlocks(cacheBlocks); + if (rowspec.hasLabels()) { + scan.setAuthorizations(new Authorizations(rowspec.getLabels())); + } + scanner = table.getScanner(scan); + cached = null; + id = Long.toString(System.currentTimeMillis()) + + Integer.toHexString(scanner.hashCode()); + } finally { + table.close(); + } + } + + public String getID() { + return id; + } + + @Override + public void close() { + if (scanner != null) { + scanner.close(); + scanner = null; + } + } + + @Override + public boolean hasNext() { + if (cache != null) { + return true; + } + if (rowI != null && rowI.hasNext()) { + return true; + } + if (cached != null) { + return true; + } + try { + Result result = scanner.next(); + if (result != null && !result.isEmpty()) { + cached = result; + } + } catch (UnknownScannerException e) { + throw new IllegalArgumentException(e); + } catch (IOException e) { + LOG.error(StringUtils.stringifyException(e)); + } + return cached != null; + } + + @Override + public Cell next() { + if (cache != null) { + Cell kv = cache; + cache = null; + return kv; + } + boolean loop; + do { + loop = false; + if (rowI != null) { + if (rowI.hasNext()) { + return rowI.next(); + } else { + rowI = null; + } + } + if (cached != null) { + rowI = cached.listCells().iterator(); + loop = true; + cached = null; + } else { + Result result = null; + try { + result = scanner.next(); + } catch (UnknownScannerException e) { + throw new IllegalArgumentException(e); + } catch (TableNotEnabledException tnee) { + throw new IllegalStateException(tnee); + } catch (TableNotFoundException tnfe) { + throw new IllegalArgumentException(tnfe); + } catch (IOException e) { + LOG.error(StringUtils.stringifyException(e)); + } + if (result != null && !result.isEmpty()) { + rowI = result.listCells().iterator(); + loop = true; + } + } + } while (loop); + return null; + } + + @Override + public void putBack(Cell kv) { + this.cache = kv; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("remove not supported"); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java new file mode 100755 index 00000000..65f5bba8 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -0,0 +1,259 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel; +import org.apache.hadoop.hbase.rest.model.TableSchemaModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.Consumes; +import javax.ws.rs.DELETE; +import javax.ws.rs.GET; +import javax.ws.rs.POST; +import javax.ws.rs.PUT; +import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.UriInfo; +import javax.xml.namespace.QName; +import java.io.IOException; +import java.util.Map; + +@InterfaceAudience.Private +public class SchemaResource extends ResourceBase { + private static final Logger LOG = LoggerFactory.getLogger(SchemaResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + TableResource tableResource; + + /** + * Constructor + */ + public SchemaResource(TableResource tableResource) throws IOException { + super(); + this.tableResource = tableResource; + } + + private HTableDescriptor getTableSchema() throws IOException, TableNotFoundException { + try (Table table = servlet.getTable(tableResource.getName())) { + return new HTableDescriptor(table.getDescriptor()); + } + } + + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + try { + ResponseBuilder response = + Response.ok(new TableSchemaModel(getTableSchema())); + response.cacheControl(cacheControl); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return processException(e); + } + } + + private Response replace(final TableName name, final TableSchemaModel model, + final UriInfo uriInfo, final Admin admin) { + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) + .build(); + } + try { + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(name); + for (Map.Entry e : model.getAny().entrySet()) { + tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); + } + for (ColumnSchemaModel family : model.getColumns()) { + ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName())); + for (Map.Entry e : family.getAny().entrySet()) { + columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), + e.getValue().toString()); + } + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptorBuilder.build()); + } + TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); + if (admin.tableExists(name)) { + admin.disableTable(name); + admin.modifyTable(tableDescriptor); + admin.enableTable(name); + servlet.getMetrics().incrementSucessfulPutRequests(1); + } else { + try { + admin.createTable(tableDescriptor); + servlet.getMetrics().incrementSucessfulPutRequests(1); + } catch (TableExistsException e) { + // race, someone else created a table with the same name + return Response.status(Response.Status.NOT_MODIFIED) + .type(MIMETYPE_TEXT).entity("Not modified" + CRLF) + .build(); + } + } + return Response.created(uriInfo.getAbsolutePath()).build(); + } catch (Exception e) { + LOG.info("Caught exception", e); + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } + } + + private Response update(final TableName name, final TableSchemaModel model, + final UriInfo uriInfo, final Admin admin) { + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN) + .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) + .build(); + } + try { + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(admin.getDescriptor(name)); + admin.disableTable(name); + try { + for (ColumnSchemaModel family : model.getColumns()) { + ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family.getName())); + for (Map.Entry e : family.getAny().entrySet()) { + columnFamilyDescriptorBuilder.setValue(e.getKey().getLocalPart(), + e.getValue().toString()); + } + TableDescriptor tableDescriptor = tableDescriptorBuilder.build(); + ColumnFamilyDescriptor columnFamilyDescriptor = columnFamilyDescriptorBuilder.build(); + if (tableDescriptor.hasColumnFamily(columnFamilyDescriptor.getName())) { + admin.modifyColumnFamily(name, columnFamilyDescriptor); + } else { + admin.addColumnFamily(name, columnFamilyDescriptor); + } + } + } catch (IOException e) { + return Response.status(Response.Status.SERVICE_UNAVAILABLE) + .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) + .build(); + } finally { + admin.enableTable(TableName.valueOf(tableResource.getName())); + } + servlet.getMetrics().incrementSucessfulPutRequests(1); + return Response.ok().build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedPutRequests(1); + return processException(e); + } + } + + private Response update(final TableSchemaModel model, final boolean replace, + final UriInfo uriInfo) { + try { + TableName name = TableName.valueOf(tableResource.getName()); + Admin admin = servlet.getAdmin(); + if (replace || !admin.tableExists(name)) { + return replace(name, model, uriInfo, admin); + } else { + return update(name, model, uriInfo, admin); + } + } catch (Exception e) { + servlet.getMetrics().incrementFailedPutRequests(1); + // Avoid re-unwrapping the exception + if (e instanceof WebApplicationException) { + throw (WebApplicationException) e; + } + return processException(e); + } + } + + @PUT + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response put(final TableSchemaModel model, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + return update(model, true, uriInfo); + } + + @POST + @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response post(final TableSchemaModel model, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("PUT " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + return update(model, false, uriInfo); + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", + justification="Expected") + @DELETE + public Response delete(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("DELETE " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + if (servlet.isReadOnly()) { + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); + } + try { + Admin admin = servlet.getAdmin(); + try { + admin.disableTable(TableName.valueOf(tableResource.getName())); + } catch (TableNotEnabledException e) { /* this is what we want anyway */ } + admin.deleteTable(TableName.valueOf(tableResource.getName())); + servlet.getMetrics().incrementSucessfulDeleteRequests(1); + return Response.ok().build(); + } catch (Exception e) { + servlet.getMetrics().incrementFailedDeleteRequests(1); + return processException(e); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java new file mode 100755 index 00000000..abcd87cf --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -0,0 +1,117 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Map; +import javax.ws.rs.GET; +import javax.ws.rs.Produces; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.UriInfo; +import org.apache.hadoop.hbase.ClusterMetrics; +import org.apache.hadoop.hbase.ClusterMetrics.Option; +import org.apache.hadoop.hbase.RegionMetrics; +import org.apache.hadoop.hbase.ServerMetrics; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Size; +import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class StorageClusterStatusResource extends ResourceBase { + private static final Logger LOG = + LoggerFactory.getLogger(StorageClusterStatusResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + /** + * Constructor + * @throws IOException + */ + public StorageClusterStatusResource() throws IOException { + super(); + } + + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + try { + ClusterMetrics status = servlet.getAdmin().getClusterMetrics( + EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); + StorageClusterStatusModel model = new StorageClusterStatusModel(); + model.setRegions(status.getRegionCount()); + model.setRequests(status.getRequestCount()); + model.setAverageLoad(status.getAverageLoad()); + for (Map.Entry entry: status.getLiveServerMetrics().entrySet()) { + ServerName sn = entry.getKey(); + ServerMetrics load = entry.getValue(); + StorageClusterStatusModel.Node node = + model.addLiveNode( + sn.getHostname() + ":" + + Integer.toString(sn.getPort()), + sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), + (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); + node.setRequests(load.getRequestCount()); + for (RegionMetrics region: load.getRegionMetrics().values()) { + node.addRegion(region.getRegionName(), region.getStoreCount(), + region.getStoreFileCount(), + (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), + (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), + (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), + region.getReadRequestCount(), + region.getWriteRequestCount(), + (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), + (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), + (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), + region.getCompactingCellCount(), + region.getCompactedCellCount()); + } + } + for (ServerName name: status.getDeadServerNames()) { + model.addDeadNode(name.toString()); + } + ResponseBuilder response = Response.ok(model); + response.cacheControl(cacheControl); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } catch (IOException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.SERVICE_UNAVAILABLE) + .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) + .build(); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java new file mode 100755 index 00000000..67cc8c46 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java @@ -0,0 +1,81 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.ClusterMetrics.Option; +import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.GET; +import javax.ws.rs.Produces; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.UriInfo; +import java.io.IOException; +import java.util.EnumSet; + +@InterfaceAudience.Private +public class StorageClusterVersionResource extends ResourceBase { + private static final Logger LOG = + LoggerFactory.getLogger(StorageClusterVersionResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + /** + * Constructor + * @throws IOException + */ + public StorageClusterVersionResource() throws IOException { + super(); + } + + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) + public Response get(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + try { + StorageClusterVersionModel model = new StorageClusterVersionModel(); + model.setVersion( + servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)) + .getHBaseVersion()); + ResponseBuilder response = Response.ok(model); + response.cacheControl(cacheControl); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } catch (IOException e) { + servlet.getMetrics().incrementFailedGetRequests(1); + return Response.status(Response.Status.SERVICE_UNAVAILABLE) + .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) + .build(); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java new file mode 100755 index 00000000..0fad4427 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -0,0 +1,205 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.ParseFilter; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.DefaultValue; +import javax.ws.rs.Encoded; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.QueryParam; +import java.io.IOException; +import java.util.List; + +@InterfaceAudience.Private +public class TableResource extends ResourceBase { + + String table; + private static final Logger LOG = LoggerFactory.getLogger(TableResource.class); + + /** + * Constructor + * @param table + * @throws IOException + */ + public TableResource(String table) throws IOException { + super(); + this.table = table; + } + + /** @return the table name */ + String getName() { + return table; + } + + /** + * @return true if the table exists + * @throws IOException + */ + boolean exists() throws IOException { + return servlet.getAdmin().tableExists(TableName.valueOf(table)); + } + + @Path("exists") + public ExistsResource getExistsResource() throws IOException { + return new ExistsResource(this); + } + + @Path("regions") + public RegionsResource getRegionsResource() throws IOException { + return new RegionsResource(this); + } + + @Path("scanner") + public ScannerResource getScannerResource() throws IOException { + return new ScannerResource(this); + } + + @Path("schema") + public SchemaResource getSchemaResource() throws IOException { + return new SchemaResource(this); + } + + @Path("{multiget: multiget.*}") + public MultiRowResource getMultipleRowResource(final @QueryParam("v") String versions, + @PathParam("multiget") String path) throws IOException { + return new MultiRowResource(this, versions, path.replace("multiget", "").replace("/", "")); + } + + @Path("{rowspec: [^*]+}") + public RowResource getRowResource( + // We need the @Encoded decorator so Jersey won't urldecode before + // the RowSpec constructor has a chance to parse + final @PathParam("rowspec") @Encoded String rowspec, + final @QueryParam("v") String versions, + final @QueryParam("check") String check, + final @QueryParam("rr") String returnResult) throws IOException { + return new RowResource(this, rowspec, versions, check, returnResult); + } + + @Path("{suffixglobbingspec: .*\\*/.+}") + public RowResource getRowResourceWithSuffixGlobbing( + // We need the @Encoded decorator so Jersey won't urldecode before + // the RowSpec constructor has a chance to parse + final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, + final @QueryParam("v") String versions, + final @QueryParam("check") String check, + final @QueryParam("rr") String returnResult) throws IOException { + return new RowResource(this, suffixglobbingspec, versions, check, returnResult); + } + + @Path("{scanspec: .*[*]$}") + public TableScanResource getScanResource( + final @PathParam("scanspec") String scanSpec, + @DefaultValue(Integer.MAX_VALUE + "") + @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, + @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, + @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, + @QueryParam(Constants.SCAN_COLUMN) List column, + @DefaultValue("1") @QueryParam(Constants.SCAN_MAX_VERSIONS) int maxVersions, + @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize, + @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime, + @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime, + @DefaultValue("true") @QueryParam(Constants.SCAN_CACHE_BLOCKS) boolean cacheBlocks, + @DefaultValue("false") @QueryParam(Constants.SCAN_REVERSED) boolean reversed, + @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String paramFilter) { + try { + Filter prefixFilter = null; + Scan tableScan = new Scan(); + if (scanSpec.indexOf('*') > 0) { + String prefix = scanSpec.substring(0, scanSpec.indexOf('*')); + byte[] prefixBytes = Bytes.toBytes(prefix); + prefixFilter = new PrefixFilter(Bytes.toBytes(prefix)); + if (startRow.isEmpty()) { + tableScan.withStartRow(prefixBytes); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("Query parameters : Table Name = > " + this.table + " Start Row => " + startRow + + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime + + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => " + + maxVersions + " Batch Size => " + batchSize); + } + Table hTable = RESTServlet.getInstance().getTable(this.table); + tableScan.setBatch(batchSize); + tableScan.readVersions(maxVersions); + tableScan.setTimeRange(startTime, endTime); + if (!startRow.isEmpty()) { + tableScan.withStartRow(Bytes.toBytes(startRow)); + } + tableScan.withStopRow(Bytes.toBytes(endRow)); + for (String col : column) { + byte [][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); + if (parts.length == 1) { + if (LOG.isTraceEnabled()) { + LOG.trace("Scan family : " + Bytes.toStringBinary(parts[0])); + } + tableScan.addFamily(parts[0]); + } else if (parts.length == 2) { + if (LOG.isTraceEnabled()) { + LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) + + " " + Bytes.toStringBinary(parts[1])); + } + tableScan.addColumn(parts[0], parts[1]); + } else { + throw new IllegalArgumentException("Invalid column specifier."); + } + } + FilterList filterList = new FilterList(); + if (StringUtils.isNotEmpty(paramFilter)) { + ParseFilter pf = new ParseFilter(); + Filter parsedParamFilter = pf.parseFilterString(paramFilter); + if (parsedParamFilter != null) { + filterList.addFilter(parsedParamFilter); + } + if (prefixFilter != null) { + filterList.addFilter(prefixFilter); + } + } + if (filterList.size() > 0) { + tableScan.setFilter(filterList); + } + + int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); + tableScan.setCaching(fetchSize); + tableScan.setReversed(reversed); + tableScan.setCacheBlocks(cacheBlocks); + return new TableScanResource(hTable.getScanner(tableScan), userRequestedLimit); + } catch (IOException exp) { + servlet.getMetrics().incrementFailedScanRequests(1); + processException(exp); + LOG.warn(exp.toString(), exp); + return null; + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java new file mode 100755 index 00000000..8f5535e8 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java @@ -0,0 +1,158 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import javax.ws.rs.GET; +import javax.ws.rs.HeaderParam; +import javax.ws.rs.Produces; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.ResponseBuilder; +import javax.ws.rs.core.StreamingOutput; +import javax.ws.rs.core.UriInfo; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + +@InterfaceAudience.Private +public class TableScanResource extends ResourceBase { + private static final Logger LOG = LoggerFactory.getLogger(TableScanResource.class); + + TableResource tableResource; + ResultScanner results; + int userRequestedLimit; + + public TableScanResource(ResultScanner scanner, int userRequestedLimit) throws IOException { + super(); + this.results = scanner; + this.userRequestedLimit = userRequestedLimit; + } + + @GET + @Produces({ Constants.MIMETYPE_XML, Constants.MIMETYPE_JSON }) + public CellSetModelStream get(final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + final int rowsToSend = userRequestedLimit; + servlet.getMetrics().incrementSucessfulScanRequests(1); + final Iterator itr = results.iterator(); + return new CellSetModelStream(new ArrayList() { + @Override + public Iterator iterator() { + return new Iterator() { + int count = rowsToSend; + + @Override + public boolean hasNext() { + return count > 0 && itr.hasNext(); + } + + @Override + public RowModel next() { + Result rs = itr.next(); + if ((rs == null) || (count <= 0)) { + return null; + } + byte[] rowKey = rs.getRow(); + RowModel rModel = new RowModel(rowKey); + List kvs = rs.listCells(); + for (Cell kv : kvs) { + rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), + kv.getTimestamp(), CellUtil.cloneValue(kv))); + } + count--; + if (count == 0) { + results.close(); + } + return rModel; + } + }; + } + }); + } + + @GET + @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) + public Response getProtobuf( + final @Context UriInfo uriInfo, + final @HeaderParam("Accept") String contentType) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + + MIMETYPE_BINARY); + } + servlet.getMetrics().incrementRequests(1); + try { + int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); + StreamingOutput stream = new ProtobufStreamingOutput(this.results, contentType, + userRequestedLimit, fetchSize); + servlet.getMetrics().incrementSucessfulScanRequests(1); + ResponseBuilder response = Response.ok(stream); + response.header("content-type", contentType); + return response.build(); + } catch (Exception exp) { + servlet.getMetrics().incrementFailedScanRequests(1); + processException(exp); + LOG.warn(exp.toString(), exp); + return null; + } + } + + @XmlRootElement(name = "CellSet") + @XmlAccessorType(XmlAccessType.FIELD) + public static class CellSetModelStream { + // JAXB needs an arraylist for streaming + @XmlElement(name = "Row") + @JsonIgnore + private ArrayList Row; + + public CellSetModelStream() { + } + + public CellSetModelStream(final ArrayList rowList) { + this.Row = rowList; + } + + // jackson needs an iterator for streaming + @JsonProperty("Row") + public Iterator getIterator() { + return Row.iterator(); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java new file mode 100755 index 00000000..c2123341 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java @@ -0,0 +1,103 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import javax.servlet.ServletContext; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.CacheControl; +import javax.ws.rs.core.Context; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriInfo; +import javax.ws.rs.core.Response.ResponseBuilder; + +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.rest.model.VersionModel; + +/** + * Implements REST software version reporting + *

+ * /version/rest + *

+ * /version (alias for /version/rest) + */ +@InterfaceAudience.Private +public class VersionResource extends ResourceBase { + + private static final Logger LOG = LoggerFactory.getLogger(VersionResource.class); + + static CacheControl cacheControl; + static { + cacheControl = new CacheControl(); + cacheControl.setNoCache(true); + cacheControl.setNoTransform(false); + } + + /** + * Constructor + * @throws IOException + */ + public VersionResource() throws IOException { + super(); + } + + /** + * Build a response for a version request. + * @param context servlet context + * @param uriInfo (JAX-RS context variable) request URL + * @return a response for a version request + */ + @GET + @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF}) + public Response get(final @Context ServletContext context, + final @Context UriInfo uriInfo) { + if (LOG.isTraceEnabled()) { + LOG.trace("GET " + uriInfo.getAbsolutePath()); + } + servlet.getMetrics().incrementRequests(1); + ResponseBuilder response = Response.ok(new VersionModel(context)); + response.cacheControl(cacheControl); + servlet.getMetrics().incrementSucessfulGetRequests(1); + return response.build(); + } + + /** + * Dispatch to StorageClusterVersionResource + */ + @Path("cluster") + public StorageClusterVersionResource getClusterVersionResource() + throws IOException { + return new StorageClusterVersionResource(); + } + + /** + * Dispatch /version/rest to self. + */ + @Path("rest") + public VersionResource getVersionResource() { + return this; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java new file mode 100755 index 00000000..c2bc7c02 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -0,0 +1,727 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.client; + +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; +import org.apache.http.Header; +import org.apache.http.HttpResponse; +import org.apache.http.HttpStatus; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.entity.InputStreamEntity; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.message.BasicHeader; +import org.apache.http.params.CoreConnectionPNames; +import org.apache.http.util.EntityUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * A wrapper around HttpClient which provides some useful function and + * semantics for interacting with the REST gateway. + */ +@InterfaceAudience.Public +public class Client { + public static final Header[] EMPTY_HEADER_ARRAY = new Header[0]; + + private static final Logger LOG = LoggerFactory.getLogger(Client.class); + + private HttpClient httpClient; + private Cluster cluster; + private boolean sslEnabled; + private HttpResponse resp; + private HttpGet httpGet = null; + + private Map extraHeaders; + + private static final String AUTH_COOKIE = "hadoop.auth"; + private static final String AUTH_COOKIE_EQ = AUTH_COOKIE + "="; + private static final String COOKIE = "Cookie"; + + /** + * Default Constructor + */ + public Client() { + this(null); + } + + private void initialize(Cluster cluster, boolean sslEnabled) { + this.cluster = cluster; + this.sslEnabled = sslEnabled; + extraHeaders = new ConcurrentHashMap<>(); + String clspath = System.getProperty("java.class.path"); + LOG.debug("classpath " + clspath); + this.httpClient = new DefaultHttpClient(); + this.httpClient.getParams().setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 2000); + } + + /** + * Constructor + * @param cluster the cluster definition + */ + public Client(Cluster cluster) { + initialize(cluster, false); + } + + /** + * Constructor + * @param cluster the cluster definition + * @param sslEnabled enable SSL or not + */ + public Client(Cluster cluster, boolean sslEnabled) { + initialize(cluster, sslEnabled); + } + + /** + * Shut down the client. Close any open persistent connections. + */ + public void shutdown() { + } + + /** + * @return the wrapped HttpClient + */ + public HttpClient getHttpClient() { + return httpClient; + } + + /** + * Add extra headers. These extra headers will be applied to all http + * methods before they are removed. If any header is not used any more, + * client needs to remove it explicitly. + */ + public void addExtraHeader(final String name, final String value) { + extraHeaders.put(name, value); + } + + /** + * Get an extra header value. + */ + public String getExtraHeader(final String name) { + return extraHeaders.get(name); + } + + /** + * Get all extra headers (read-only). + */ + public Map getExtraHeaders() { + return Collections.unmodifiableMap(extraHeaders); + } + + /** + * Remove an extra header. + */ + public void removeExtraHeader(final String name) { + extraHeaders.remove(name); + } + + /** + * Execute a transaction method given only the path. Will select at random + * one of the members of the supplied cluster definition and iterate through + * the list until a transaction can be successfully completed. The + * definition of success here is a complete HTTP transaction, irrespective + * of result code. + * @param cluster the cluster definition + * @param method the transaction method + * @param headers HTTP header values to send + * @param path the properly urlencoded path + * @return the HTTP response code + * @throws IOException + */ + public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, + Header[] headers, String path) throws IOException { + IOException lastException; + if (cluster.nodes.size() < 1) { + throw new IOException("Cluster is empty"); + } + int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random()); + int i = start; + do { + cluster.lastHost = cluster.nodes.get(i); + try { + StringBuilder sb = new StringBuilder(); + if (sslEnabled) { + sb.append("https://"); + } else { + sb.append("http://"); + } + sb.append(cluster.lastHost); + sb.append(path); + URI uri = new URI(sb.toString()); + if (method instanceof HttpPut) { + HttpPut put = new HttpPut(uri); + put.setEntity(((HttpPut) method).getEntity()); + put.setHeaders(method.getAllHeaders()); + method = put; + } else if (method instanceof HttpGet) { + method = new HttpGet(uri); + } else if (method instanceof HttpHead) { + method = new HttpHead(uri); + } else if (method instanceof HttpDelete) { + method = new HttpDelete(uri); + } else if (method instanceof HttpPost) { + HttpPost post = new HttpPost(uri); + post.setEntity(((HttpPost) method).getEntity()); + post.setHeaders(method.getAllHeaders()); + method = post; + } + return executeURI(method, headers, uri.toString()); + } catch (IOException e) { + lastException = e; + } catch (URISyntaxException use) { + lastException = new IOException(use); + } + } while (++i != start && i < cluster.nodes.size()); + throw lastException; + } + + /** + * Execute a transaction method given a complete URI. + * @param method the transaction method + * @param headers HTTP header values to send + * @param uri a properly urlencoded URI + * @return the HTTP response code + * @throws IOException + */ + public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri) + throws IOException { + // method.setURI(new URI(uri, true)); + for (Map.Entry e: extraHeaders.entrySet()) { + method.addHeader(e.getKey(), e.getValue()); + } + if (headers != null) { + for (Header header: headers) { + method.addHeader(header); + } + } + long startTime = System.currentTimeMillis(); + if (resp != null) EntityUtils.consumeQuietly(resp.getEntity()); + resp = httpClient.execute(method); + if (resp.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { + // Authentication error + LOG.debug("Performing negotiation with the server."); + negotiate(method, uri); + resp = httpClient.execute(method); + } + + long endTime = System.currentTimeMillis(); + if (LOG.isTraceEnabled()) { + LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + + resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); + } + return resp; + } + + /** + * Execute a transaction method. Will call either executePathOnly + * or executeURI depending on whether a path only is supplied in + * 'path', or if a complete URI is passed instead, respectively. + * @param cluster the cluster definition + * @param method the HTTP method + * @param headers HTTP header values to send + * @param path the properly urlencoded path or URI + * @return the HTTP response code + * @throws IOException + */ + public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, + String path) throws IOException { + if (path.startsWith("/")) { + return executePathOnly(cluster, method, headers, path); + } + return executeURI(method, headers, path); + } + + /** + * Initiate client side Kerberos negotiation with the server. + * @param method method to inject the authentication token into. + * @param uri the String to parse as a URL. + * @throws IOException if unknown protocol is found. + */ + private void negotiate(HttpUriRequest method, String uri) throws IOException { + try { + AuthenticatedURL.Token token = new AuthenticatedURL.Token(); + KerberosAuthenticator authenticator = new KerberosAuthenticator(); + authenticator.authenticate(new URL(uri), token); + // Inject the obtained negotiated token in the method cookie + injectToken(method, token); + } catch (AuthenticationException e) { + LOG.error("Failed to negotiate with the server.", e); + throw new IOException(e); + } + } + + /** + * Helper method that injects an authentication token to send with the method. + * @param method method to inject the authentication token into. + * @param token authentication token to inject. + */ + private void injectToken(HttpUriRequest method, AuthenticatedURL.Token token) { + String t = token.toString(); + if (t != null) { + if (!t.startsWith("\"")) { + t = "\"" + t + "\""; + } + method.addHeader(COOKIE, AUTH_COOKIE_EQ + t); + } + } + + /** + * @return the cluster definition + */ + public Cluster getCluster() { + return cluster; + } + + /** + * @param cluster the cluster definition + */ + public void setCluster(Cluster cluster) { + this.cluster = cluster; + } + + /** + * Send a HEAD request + * @param path the path or URI + * @return a Response object with response detail + * @throws IOException + */ + public Response head(String path) throws IOException { + return head(cluster, path, null); + } + + /** + * Send a HEAD request + * @param cluster the cluster definition + * @param path the path or URI + * @param headers the HTTP headers to include in the request + * @return a Response object with response detail + * @throws IOException + */ + public Response head(Cluster cluster, String path, Header[] headers) + throws IOException { + HttpHead method = new HttpHead(path); + try { + HttpResponse resp = execute(cluster, method, null, path); + return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), null); + } finally { + method.releaseConnection(); + } + } + + /** + * Send a GET request + * @param path the path or URI + * @return a Response object with response detail + * @throws IOException + */ + public Response get(String path) throws IOException { + return get(cluster, path); + } + + /** + * Send a GET request + * @param cluster the cluster definition + * @param path the path or URI + * @return a Response object with response detail + * @throws IOException + */ + public Response get(Cluster cluster, String path) throws IOException { + return get(cluster, path, EMPTY_HEADER_ARRAY); + } + + /** + * Send a GET request + * @param path the path or URI + * @param accept Accept header value + * @return a Response object with response detail + * @throws IOException + */ + public Response get(String path, String accept) throws IOException { + return get(cluster, path, accept); + } + + /** + * Send a GET request + * @param cluster the cluster definition + * @param path the path or URI + * @param accept Accept header value + * @return a Response object with response detail + * @throws IOException + */ + public Response get(Cluster cluster, String path, String accept) + throws IOException { + Header[] headers = new Header[1]; + headers[0] = new BasicHeader("Accept", accept); + return get(cluster, path, headers); + } + + /** + * Send a GET request + * @param path the path or URI + * @param headers the HTTP headers to include in the request, + * Accept must be supplied + * @return a Response object with response detail + * @throws IOException + */ + public Response get(String path, Header[] headers) throws IOException { + return get(cluster, path, headers); + } + + /** + * Returns the response body of the HTTPResponse, if any, as an array of bytes. + * If response body is not available or cannot be read, returns null + * + * Note: This will cause the entire response body to be buffered in memory. A + * malicious server may easily exhaust all the VM memory. It is strongly + * recommended, to use getResponseAsStream if the content length of the response + * is unknown or reasonably large. + * + * @param resp HttpResponse + * @return The response body, null if body is empty + * @throws IOException If an I/O (transport) problem occurs while obtaining the + * response body. + */ + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = + "NP_LOAD_OF_KNOWN_NULL_VALUE", justification = "null is possible return value") + public static byte[] getResponseBody(HttpResponse resp) throws IOException { + if (resp.getEntity() == null) return null; + try (InputStream instream = resp.getEntity().getContent()) { + if (instream != null) { + long contentLength = resp.getEntity().getContentLength(); + if (contentLength > Integer.MAX_VALUE) { + //guard integer cast from overflow + throw new IOException("Content too large to be buffered: " + contentLength +" bytes"); + } + ByteArrayOutputStream outstream = new ByteArrayOutputStream( + contentLength > 0 ? (int) contentLength : 4*1024); + byte[] buffer = new byte[4096]; + int len; + while ((len = instream.read(buffer)) > 0) { + outstream.write(buffer, 0, len); + } + outstream.close(); + return outstream.toByteArray(); + } + return null; + } + } + + /** + * Send a GET request + * @param c the cluster definition + * @param path the path or URI + * @param headers the HTTP headers to include in the request + * @return a Response object with response detail + * @throws IOException + */ + public Response get(Cluster c, String path, Header[] headers) + throws IOException { + if (httpGet != null) { + httpGet.releaseConnection(); + } + httpGet = new HttpGet(path); + HttpResponse resp = execute(c, httpGet, headers, path); + return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), + resp, resp.getEntity() == null ? null : resp.getEntity().getContent()); + } + + /** + * Send a PUT request + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException + */ + public Response put(String path, String contentType, byte[] content) + throws IOException { + return put(cluster, path, contentType, content); + } + + /** + * Send a PUT request + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @param extraHdr extra Header to send + * @return a Response object with response detail + * @throws IOException + */ + public Response put(String path, String contentType, byte[] content, Header extraHdr) + throws IOException { + return put(cluster, path, contentType, content, extraHdr); + } + + /** + * Send a PUT request + * @param cluster the cluster definition + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException for error + */ + public Response put(Cluster cluster, String path, String contentType, + byte[] content) throws IOException { + Header[] headers = new Header[1]; + headers[0] = new BasicHeader("Content-Type", contentType); + return put(cluster, path, headers, content); + } + + /** + * Send a PUT request + * @param cluster the cluster definition + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @param extraHdr additional Header to send + * @return a Response object with response detail + * @throws IOException for error + */ + public Response put(Cluster cluster, String path, String contentType, + byte[] content, Header extraHdr) throws IOException { + int cnt = extraHdr == null ? 1 : 2; + Header[] headers = new Header[cnt]; + headers[0] = new BasicHeader("Content-Type", contentType); + if (extraHdr != null) { + headers[1] = extraHdr; + } + return put(cluster, path, headers, content); + } + + /** + * Send a PUT request + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be + * supplied + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException + */ + public Response put(String path, Header[] headers, byte[] content) + throws IOException { + return put(cluster, path, headers, content); + } + + /** + * Send a PUT request + * @param cluster the cluster definition + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be + * supplied + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException + */ + public Response put(Cluster cluster, String path, Header[] headers, + byte[] content) throws IOException { + HttpPut method = new HttpPut(path); + try { + method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); + HttpResponse resp = execute(cluster, method, headers, path); + headers = resp.getAllHeaders(); + content = getResponseBody(resp); + return new Response(resp.getStatusLine().getStatusCode(), headers, content); + } finally { + method.releaseConnection(); + } + } + + /** + * Send a POST request + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException + */ + public Response post(String path, String contentType, byte[] content) + throws IOException { + return post(cluster, path, contentType, content); + } + + /** + * Send a POST request + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @param extraHdr additional Header to send + * @return a Response object with response detail + * @throws IOException + */ + public Response post(String path, String contentType, byte[] content, Header extraHdr) + throws IOException { + return post(cluster, path, contentType, content, extraHdr); + } + + /** + * Send a POST request + * @param cluster the cluster definition + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException for error + */ + public Response post(Cluster cluster, String path, String contentType, + byte[] content) throws IOException { + Header[] headers = new Header[1]; + headers[0] = new BasicHeader("Content-Type", contentType); + return post(cluster, path, headers, content); + } + + /** + * Send a POST request + * @param cluster the cluster definition + * @param path the path or URI + * @param contentType the content MIME type + * @param content the content bytes + * @param extraHdr additional Header to send + * @return a Response object with response detail + * @throws IOException for error + */ + public Response post(Cluster cluster, String path, String contentType, + byte[] content, Header extraHdr) throws IOException { + int cnt = extraHdr == null ? 1 : 2; + Header[] headers = new Header[cnt]; + headers[0] = new BasicHeader("Content-Type", contentType); + if (extraHdr != null) { + headers[1] = extraHdr; + } + return post(cluster, path, headers, content); + } + + /** + * Send a POST request + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be + * supplied + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException + */ + public Response post(String path, Header[] headers, byte[] content) + throws IOException { + return post(cluster, path, headers, content); + } + + /** + * Send a POST request + * @param cluster the cluster definition + * @param path the path or URI + * @param headers the HTTP headers to include, Content-Type must be + * supplied + * @param content the content bytes + * @return a Response object with response detail + * @throws IOException + */ + public Response post(Cluster cluster, String path, Header[] headers, + byte[] content) throws IOException { + HttpPost method = new HttpPost(path); + try { + method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); + HttpResponse resp = execute(cluster, method, headers, path); + headers = resp.getAllHeaders(); + content = getResponseBody(resp); + return new Response(resp.getStatusLine().getStatusCode(), headers, content); + } finally { + method.releaseConnection(); + } + } + + /** + * Send a DELETE request + * @param path the path or URI + * @return a Response object with response detail + * @throws IOException + */ + public Response delete(String path) throws IOException { + return delete(cluster, path); + } + + /** + * Send a DELETE request + * @param path the path or URI + * @param extraHdr additional Header to send + * @return a Response object with response detail + * @throws IOException + */ + public Response delete(String path, Header extraHdr) throws IOException { + return delete(cluster, path, extraHdr); + } + + /** + * Send a DELETE request + * @param cluster the cluster definition + * @param path the path or URI + * @return a Response object with response detail + * @throws IOException for error + */ + public Response delete(Cluster cluster, String path) throws IOException { + HttpDelete method = new HttpDelete(path); + try { + HttpResponse resp = execute(cluster, method, null, path); + Header[] headers = resp.getAllHeaders(); + byte[] content = getResponseBody(resp); + return new Response(resp.getStatusLine().getStatusCode(), headers, content); + } finally { + method.releaseConnection(); + } + } + + /** + * Send a DELETE request + * @param cluster the cluster definition + * @param path the path or URI + * @return a Response object with response detail + * @throws IOException for error + */ + public Response delete(Cluster cluster, String path, Header extraHdr) throws IOException { + HttpDelete method = new HttpDelete(path); + try { + Header[] headers = { extraHdr }; + HttpResponse resp = execute(cluster, method, headers, path); + headers = resp.getAllHeaders(); + byte[] content = getResponseBody(resp); + return new Response(resp.getStatusLine().getStatusCode(), headers, content); + } finally { + method.releaseConnection(); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java new file mode 100755 index 00000000..00847082 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java @@ -0,0 +1,108 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.client; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A list of 'host:port' addresses of HTTP servers operating as a single + * entity, for example multiple redundant web service gateways. + */ +@InterfaceAudience.Public +public class Cluster { + protected List nodes = + Collections.synchronizedList(new ArrayList()); + protected String lastHost; + + /** + * Constructor + */ + public Cluster() {} + + /** + * Constructor + * @param nodes a list of service locations, in 'host:port' format + */ + public Cluster(List nodes) { + this.nodes.addAll(nodes); + } + + /** + * @return true if no locations have been added, false otherwise + */ + public boolean isEmpty() { + return nodes.isEmpty(); + } + + /** + * Add a node to the cluster + * @param node the service location in 'host:port' format + */ + public Cluster add(String node) { + nodes.add(node); + return this; + } + + /** + * Add a node to the cluster + * @param name host name + * @param port service port + */ + public Cluster add(String name, int port) { + StringBuilder sb = new StringBuilder(); + sb.append(name); + sb.append(':'); + sb.append(port); + return add(sb.toString()); + } + + /** + * Remove a node from the cluster + * @param node the service location in 'host:port' format + */ + public Cluster remove(String node) { + nodes.remove(node); + return this; + } + + /** + * Remove a node from the cluster + * @param name host name + * @param port service port + */ + public Cluster remove(String name, int port) { + StringBuilder sb = new StringBuilder(); + sb.append(name); + sb.append(':'); + sb.append(port); + return remove(sb.toString()); + } + + @Override public String toString() { + return "Cluster{" + + "nodes=" + nodes + + ", lastHost='" + lastHost + '\'' + + '}'; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java new file mode 100755 index 00000000..0e91005a --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java @@ -0,0 +1,170 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.client; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.http.Header; +import org.apache.http.HttpResponse; + +import org.apache.yetus.audience.InterfaceAudience; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * The HTTP result code, response headers, and body of an HTTP response. + */ +@InterfaceAudience.Public +public class Response { + private static final Logger LOG = LoggerFactory.getLogger(Response.class); + + private int code; + private Header[] headers; + private byte[] body; + private HttpResponse resp; + private InputStream stream; + + /** + * Constructor + * @param code the HTTP response code + */ + public Response(int code) { + this(code, null, null); + } + + /** + * Constructor + * @param code the HTTP response code + * @param headers the HTTP response headers + */ + public Response(int code, Header[] headers) { + this(code, headers, null); + } + + /** + * Constructor + * @param code the HTTP response code + * @param headers the HTTP response headers + * @param body the response body, can be null + */ + public Response(int code, Header[] headers, byte[] body) { + this.code = code; + this.headers = headers; + this.body = body; + } + + /** + * Constructor. Note: this is not thread-safe + * + * @param code the HTTP response code + * @param headers headers the HTTP response headers + * @param resp the response + * @param in Inputstream if the response had one. + */ + public Response(int code, Header[] headers, HttpResponse resp, InputStream in) { + this.code = code; + this.headers = headers; + this.body = null; + this.resp = resp; + this.stream = in; + } + + /** + * @return the HTTP response code + */ + public int getCode() { + return code; + } + + /** + * Gets the input stream instance. + * + * @return an instance of InputStream class. + */ + public InputStream getStream(){ + return this.stream; + } + + /** + * @return the HTTP response headers + */ + public Header[] getHeaders() { + return headers; + } + + public String getHeader(String key) { + for (Header header : headers) { + if (header.getName().equalsIgnoreCase(key)) { + return header.getValue(); + } + } + return null; + } + + /** + * @return the value of the Location header + */ + public String getLocation() { + return getHeader("Location"); + } + + /** + * @return true if a response body was sent + */ + public boolean hasBody() { + return body != null; + } + + /** + * @return the HTTP response body + */ + public byte[] getBody() { + if (body == null) { + try { + body = Client.getResponseBody(resp); + } catch (IOException ioe) { + LOG.debug("encountered ioe when obtaining body", ioe); + } + } + return body; + } + + /** + * @param code the HTTP response code + */ + public void setCode(int code) { + this.code = code; + } + + /** + * @param headers the HTTP response headers + */ + public void setHeaders(Header[] headers) { + this.headers = headers; + } + + /** + * @param body the response body + */ + public void setBody(byte[] body) { + this.body = body; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java new file mode 100755 index 00000000..b9b8a006 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import static org.apache.hadoop.hbase.rest.Constants.REST_AUTHENTICATION_PRINCIPAL; +import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_INTERFACE; +import static org.apache.hadoop.hbase.rest.Constants.REST_DNS_NAMESERVER; + +import java.io.IOException; +import java.util.Map; +import java.util.Properties; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.rest.RESTServer; +import org.apache.hadoop.hbase.util.DNS; +import org.apache.hadoop.hbase.util.Strings; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class AuthFilter extends AuthenticationFilter { + private static final Logger LOG = LoggerFactory.getLogger(AuthFilter.class); + private static final String REST_PREFIX = "hbase.rest.authentication."; + private static final int REST_PREFIX_LEN = REST_PREFIX.length(); + + /** + * Returns the configuration to be used by the authentication filter + * to initialize the authentication handler. + * + * This filter retrieves all HBase configurations and passes those started + * with REST_PREFIX to the authentication handler. It is useful to support + * plugging different authentication handlers. + */ + @Override + protected Properties getConfiguration( + String configPrefix, FilterConfig filterConfig) throws ServletException { + Properties props = super.getConfiguration(configPrefix, filterConfig); + //setting the cookie path to root '/' so it is used for all resources. + props.setProperty(AuthenticationFilter.COOKIE_PATH, "/"); + + Configuration conf = null; + // Dirty hack to get at the RESTServer's configuration. These should be pulled out + // of the FilterConfig. + if (RESTServer.conf != null) { + conf = RESTServer.conf; + } else { + conf = HBaseConfiguration.create(); + } + for (Map.Entry entry : conf) { + String name = entry.getKey(); + if (name.startsWith(REST_PREFIX)) { + String value = entry.getValue(); + if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) { + try { + String machineName = Strings.domainNamePointerToHostName( + DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), + conf.get(REST_DNS_NAMESERVER, "default"))); + value = SecurityUtil.getServerPrincipal(value, machineName); + } catch (IOException ie) { + throw new ServletException("Failed to retrieve server principal", ie); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace("Setting property " + name + "=" + value); + } + name = name.substring(REST_PREFIX_LEN); + props.setProperty(name, value); + } + } + return props; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java new file mode 100755 index 00000000..f74e10ca --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java @@ -0,0 +1,72 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import java.io.IOException; +import java.util.zip.GZIPInputStream; + +import javax.servlet.ReadListener; +import javax.servlet.ServletInputStream; +import javax.servlet.http.HttpServletRequest; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class GZIPRequestStream extends ServletInputStream { + private GZIPInputStream in; + + public GZIPRequestStream(HttpServletRequest request) throws IOException { + this.in = new GZIPInputStream(request.getInputStream()); + } + + @Override + public int read() throws IOException { + return in.read(); + } + + @Override + public int read(byte[] b) throws IOException { + return in.read(b); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return in.read(b, off, len); + } + + @Override + public void close() throws IOException { + in.close(); + } + + @Override + public boolean isFinished() { + throw new UnsupportedOperationException("Asynchonous operation is not supported."); + } + + @Override + public boolean isReady() { + throw new UnsupportedOperationException("Asynchonous operation is not supported."); + } + + @Override + public void setReadListener(ReadListener listener) { + throw new UnsupportedOperationException("Asynchonous operation is not supported."); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java new file mode 100755 index 00000000..2290ecc0 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java @@ -0,0 +1,51 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.filter; + +import org.apache.yetus.audience.InterfaceAudience; + +import javax.servlet.ServletInputStream; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequestWrapper; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; + +@InterfaceAudience.Private +public class GZIPRequestWrapper extends HttpServletRequestWrapper { + private ServletInputStream is; + private BufferedReader reader; + + public GZIPRequestWrapper(HttpServletRequest request) throws IOException { + super(request); + this.is = new GZIPRequestStream(request); + this.reader = new BufferedReader(new InputStreamReader(this.is)); + } + + @Override + public ServletInputStream getInputStream() throws IOException { + return is; + } + + @Override + public BufferedReader getReader() throws IOException { + return reader; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java new file mode 100755 index 00000000..3fa1ad6f --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java @@ -0,0 +1,87 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import java.io.IOException; +import java.util.zip.GZIPOutputStream; + +import javax.servlet.ServletOutputStream; +import javax.servlet.WriteListener; +import javax.servlet.http.HttpServletResponse; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class GZIPResponseStream extends ServletOutputStream { + private HttpServletResponse response; + private GZIPOutputStream out; + + public GZIPResponseStream(HttpServletResponse response) throws IOException { + this.response = response; + this.out = new GZIPOutputStream(response.getOutputStream()); + response.addHeader("Content-Encoding", "gzip"); + } + + public void resetBuffer() { + if (out != null && !response.isCommitted()) { + response.setHeader("Content-Encoding", null); + } + out = null; + } + + @Override + public void write(int b) throws IOException { + out.write(b); + } + + @Override + public void write(byte[] b) throws IOException { + out.write(b); + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + out.write(b, off, len); + } + + @Override + public void close() throws IOException { + finish(); + out.close(); + } + + @Override + public void flush() throws IOException { + out.flush(); + } + + public void finish() throws IOException { + out.finish(); + } + + @Override + public boolean isReady() { + throw new UnsupportedOperationException("Asynchonous operation is not supported."); + } + + @Override + public void setWriteListener(WriteListener writeListener) { + throw new UnsupportedOperationException("Asynchonous operation is not supported."); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java new file mode 100755 index 00000000..53a26ea1 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java @@ -0,0 +1,147 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.filter; + +import java.io.IOException; +import java.io.PrintWriter; + +import javax.servlet.ServletOutputStream; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpServletResponseWrapper; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class GZIPResponseWrapper extends HttpServletResponseWrapper { + private HttpServletResponse response; + private ServletOutputStream os; + private PrintWriter writer; + private boolean compress = true; + + public GZIPResponseWrapper(HttpServletResponse response) { + super(response); + this.response = response; + } + + @Override + public void setStatus(int status) { + super.setStatus(status); + if (status < 200 || status >= 300) { + compress = false; + } + } + + @Override + public void addHeader(String name, String value) { + if (!"content-length".equalsIgnoreCase(name)) { + super.addHeader(name, value); + } + } + + @Override + public void setContentLength(int length) { + // do nothing + } + + @Override + public void setIntHeader(String name, int value) { + if (!"content-length".equalsIgnoreCase(name)) { + super.setIntHeader(name, value); + } + } + + @Override + public void setHeader(String name, String value) { + if (!"content-length".equalsIgnoreCase(name)) { + super.setHeader(name, value); + } + } + + @Override + public void flushBuffer() throws IOException { + if (writer != null) { + writer.flush(); + } + if (os != null && (os instanceof GZIPResponseStream)) { + ((GZIPResponseStream)os).finish(); + } else { + getResponse().flushBuffer(); + } + } + + @Override + public void reset() { + super.reset(); + if (os != null && (os instanceof GZIPResponseStream)) { + ((GZIPResponseStream)os).resetBuffer(); + } + writer = null; + os = null; + compress = true; + } + + @Override + public void resetBuffer() { + super.resetBuffer(); + if (os != null && (os instanceof GZIPResponseStream)) { + ((GZIPResponseStream)os).resetBuffer(); + } + writer = null; + os = null; + } + + @Override + public void sendError(int status, String msg) throws IOException { + resetBuffer(); + super.sendError(status, msg); + } + + @Override + public void sendError(int status) throws IOException { + resetBuffer(); + super.sendError(status); + } + + @Override + public void sendRedirect(String location) throws IOException { + resetBuffer(); + super.sendRedirect(location); + } + + @Override + public ServletOutputStream getOutputStream() throws IOException { + if (os == null) { + if (!response.isCommitted() && compress) { + os = (ServletOutputStream)new GZIPResponseStream(response); + } else { + os = response.getOutputStream(); + } + } + return os; + } + + @Override + public PrintWriter getWriter() throws IOException { + if (writer == null) { + writer = new PrintWriter(getOutputStream()); + } + return writer; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java new file mode 100755 index 00000000..4ba9eca3 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java @@ -0,0 +1,85 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.HashSet; +import java.util.Locale; +import java.util.Set; +import java.util.StringTokenizer; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; + +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class GzipFilter implements Filter { + private Set mimeTypes = new HashSet<>(); + + @Override + public void init(FilterConfig filterConfig) { + String s = filterConfig.getInitParameter("mimeTypes"); + if (s != null) { + StringTokenizer tok = new StringTokenizer(s, ",", false); + while (tok.hasMoreTokens()) { + mimeTypes.add(tok.nextToken()); + } + } + } + + @Override + public void destroy() { + } + + @Override + public void doFilter(ServletRequest req, ServletResponse rsp, + FilterChain chain) throws IOException, ServletException { + HttpServletRequest request = (HttpServletRequest)req; + HttpServletResponse response = (HttpServletResponse)rsp; + String contentEncoding = request.getHeader("content-encoding"); + String acceptEncoding = request.getHeader("accept-encoding"); + String contentType = request.getHeader("content-type"); + if ((contentEncoding != null) && + (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { + request = new GZIPRequestWrapper(request); + } + if (((acceptEncoding != null) && + (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) || + ((contentType != null) && mimeTypes.contains(contentType))) { + response = new GZIPResponseWrapper(response); + } + chain.doFilter(request, response); + if (response instanceof GZIPResponseWrapper) { + OutputStream os = response.getOutputStream(); + if (os instanceof GZIPResponseStream) { + ((GZIPResponseStream)os).finish(); + } + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java new file mode 100755 index 00000000..94eb314e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.filter; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.hadoop.conf.Configuration; + +import org.apache.yetus.audience.InterfaceAudience; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This filter provides protection against cross site request forgery (CSRF) + * attacks for REST APIs. Enabling this filter on an endpoint results in the + * requirement of all client to send a particular (configurable) HTTP header + * with every request. In the absense of this header the filter will reject the + * attempt as a bad request. + */ +@InterfaceAudience.Public +public class RestCsrfPreventionFilter implements Filter { + private static final Logger LOG = + LoggerFactory.getLogger(RestCsrfPreventionFilter.class); + + public static final String HEADER_USER_AGENT = "User-Agent"; + public static final String BROWSER_USER_AGENT_PARAM = + "browser-useragents-regex"; + public static final String CUSTOM_HEADER_PARAM = "custom-header"; + public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = + "methods-to-ignore"; + static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; + public static final String HEADER_DEFAULT = "X-XSRF-HEADER"; + static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; + private String headerName = HEADER_DEFAULT; + private Set methodsToIgnore = null; + private Set browserUserAgents; + + @Override + public void init(FilterConfig filterConfig) { + String customHeader = filterConfig.getInitParameter(CUSTOM_HEADER_PARAM); + if (customHeader != null) { + headerName = customHeader; + } + String customMethodsToIgnore = + filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); + if (customMethodsToIgnore != null) { + parseMethodsToIgnore(customMethodsToIgnore); + } else { + parseMethodsToIgnore(METHODS_TO_IGNORE_DEFAULT); + } + + String agents = filterConfig.getInitParameter(BROWSER_USER_AGENT_PARAM); + if (agents == null) { + agents = BROWSER_USER_AGENTS_DEFAULT; + } + parseBrowserUserAgents(agents); + LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, " + + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", + headerName, methodsToIgnore, browserUserAgents)); + } + + void parseBrowserUserAgents(String userAgents) { + String[] agentsArray = userAgents.split(","); + browserUserAgents = new HashSet<>(); + for (String patternString : agentsArray) { + browserUserAgents.add(Pattern.compile(patternString)); + } + } + + void parseMethodsToIgnore(String mti) { + String[] methods = mti.split(","); + methodsToIgnore = new HashSet<>(); + Collections.addAll(methodsToIgnore, methods); + } + + /** + * This method interrogates the User-Agent String and returns whether it + * refers to a browser. If its not a browser, then the requirement for the + * CSRF header will not be enforced; if it is a browser, the requirement will + * be enforced. + *

+ * A User-Agent String is considered to be a browser if it matches + * any of the regex patterns from browser-useragent-regex; the default + * behavior is to consider everything a browser that matches the following: + * "^Mozilla.*,^Opera.*". Subclasses can optionally override + * this method to use different behavior. + * + * @param userAgent The User-Agent String, or null if there isn't one + * @return true if the User-Agent String refers to a browser, false if not + */ + protected boolean isBrowser(String userAgent) { + if (userAgent == null) { + return false; + } + for (Pattern pattern : browserUserAgents) { + Matcher matcher = pattern.matcher(userAgent); + if (matcher.matches()) { + return true; + } + } + return false; + } + + /** + * Defines the minimal API requirements for the filter to execute its + * filtering logic. This interface exists to facilitate integration in + * components that do not run within a servlet container and therefore cannot + * rely on a servlet container to dispatch to the {@link #doFilter} method. + * Applications that do run inside a servlet container will not need to write + * code that uses this interface. Instead, they can use typical servlet + * container configuration mechanisms to insert the filter. + */ + public interface HttpInteraction { + /** + * Returns the value of a header. + * + * @param header name of header + * @return value of header + */ + String getHeader(String header); + + /** + * Returns the method. + * + * @return method + */ + String getMethod(); + + /** + * Called by the filter after it decides that the request may proceed. + * + * @throws IOException if there is an I/O error + * @throws ServletException if the implementation relies on the servlet API + * and a servlet API call has failed + */ + void proceed() throws IOException, ServletException; + + /** + * Called by the filter after it decides that the request is a potential + * CSRF attack and therefore must be rejected. + * + * @param code status code to send + * @param message response message + * @throws IOException if there is an I/O error + */ + void sendError(int code, String message) throws IOException; + } + + /** + * Handles an {@link HttpInteraction} by applying the filtering logic. + * + * @param httpInteraction caller's HTTP interaction + * @throws IOException if there is an I/O error + * @throws ServletException if the implementation relies on the servlet API + * and a servlet API call has failed + */ + public void handleHttpInteraction(HttpInteraction httpInteraction) + throws IOException, ServletException { + if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) || + methodsToIgnore.contains(httpInteraction.getMethod()) || + httpInteraction.getHeader(headerName) != null) { + httpInteraction.proceed(); + } else { + httpInteraction.sendError(HttpServletResponse.SC_BAD_REQUEST, + "Missing Required Header for CSRF Vulnerability Protection"); + } + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, + final FilterChain chain) throws IOException, ServletException { + final HttpServletRequest httpRequest = (HttpServletRequest)request; + final HttpServletResponse httpResponse = (HttpServletResponse)response; + handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, + httpResponse, chain)); + } + + @Override + public void destroy() { + } + + /** + * Constructs a mapping of configuration properties to be used for filter + * initialization. The mapping includes all properties that start with the + * specified configuration prefix. Property names in the mapping are trimmed + * to remove the configuration prefix. + * + * @param conf configuration to read + * @param confPrefix configuration prefix + * @return mapping of configuration properties to be used for filter + * initialization + */ + public static Map getFilterParams(Configuration conf, String confPrefix) { + Map filterConfigMap = new HashMap<>(); + for (Map.Entry entry : conf) { + String name = entry.getKey(); + if (name.startsWith(confPrefix)) { + String value = conf.get(name); + name = name.substring(confPrefix.length()); + filterConfigMap.put(name, value); + } + } + return filterConfigMap; + } + + /** + * {@link HttpInteraction} implementation for use in the servlet filter. + */ + private static final class ServletFilterHttpInteraction implements HttpInteraction { + private final FilterChain chain; + private final HttpServletRequest httpRequest; + private final HttpServletResponse httpResponse; + + /** + * Creates a new ServletFilterHttpInteraction. + * + * @param httpRequest request to process + * @param httpResponse response to process + * @param chain filter chain to forward to if HTTP interaction is allowed + */ + public ServletFilterHttpInteraction(HttpServletRequest httpRequest, + HttpServletResponse httpResponse, FilterChain chain) { + this.httpRequest = httpRequest; + this.httpResponse = httpResponse; + this.chain = chain; + } + + @Override + public String getHeader(String header) { + return httpRequest.getHeader(header); + } + + @Override + public String getMethod() { + return httpRequest.getMethod(); + } + + @Override + public void proceed() throws IOException, ServletException { + chain.doFilter(httpRequest, httpResponse); + } + + @Override + public void sendError(int code, String message) throws IOException { + httpResponse.sendError(code, message); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/AdminAuthorizedFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/AdminAuthorizedFilter.java new file mode 100755 index 00000000..9f248241 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/AdminAuthorizedFilter.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.yetus.audience.InterfaceAudience; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; + +@InterfaceAudience.Private +public class AdminAuthorizedFilter implements Filter { + + private Configuration conf; + private AccessControlList adminsAcl; + + @Override public void init(FilterConfig filterConfig) throws ServletException { + adminsAcl = (AccessControlList) filterConfig.getServletContext().getAttribute( + HttpServer.ADMINS_ACL); + conf = (Configuration) filterConfig.getServletContext().getAttribute( + HttpServer.CONF_CONTEXT_ATTRIBUTE); + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + if (!(request instanceof HttpServletRequest) || !(response instanceof HttpServletResponse)) { + throw new UnsupportedOperationException("Only accepts HTTP"); + } + HttpServletRequest httpReq = (HttpServletRequest) request; + HttpServletResponse httpResp = (HttpServletResponse) response; + + if (!HttpServer.hasAdministratorAccess(conf, adminsAcl, httpReq, httpResp)) { + return; + } + + chain.doFilter(request, response); + } + + @Override public void destroy() {} +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ClickjackingPreventionFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ClickjackingPreventionFilter.java new file mode 100755 index 00000000..0db32b2e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ClickjackingPreventionFilter.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class ClickjackingPreventionFilter implements Filter { + private FilterConfig filterConfig; + private static final String DEFAULT_XFRAMEOPTIONS = "DENY"; + + @Override + public void init(FilterConfig filterConfig) throws ServletException { + this.filterConfig = filterConfig; + } + + @Override + public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) + throws IOException, ServletException { + HttpServletResponse httpRes = (HttpServletResponse) res; + httpRes.addHeader("X-Frame-Options", filterConfig.getInitParameter("xframeoptions")); + chain.doFilter(req, res); + } + + @Override + public void destroy() { + } + + public static Map getDefaultParameters(Configuration conf) { + Map params = new HashMap<>(); + params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode", + DEFAULT_XFRAMEOPTIONS)); + return params; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HtmlQuoting.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HtmlQuoting.java new file mode 100755 index 00000000..b47357a3 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HtmlQuoting.java @@ -0,0 +1,230 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; + +/** + * This class is responsible for quoting HTML characters. + */ +@InterfaceAudience.Private +public final class HtmlQuoting { + private static final byte[] ampBytes = Bytes.toBytes("&"); + private static final byte[] aposBytes = Bytes.toBytes("'"); + private static final byte[] gtBytes = Bytes.toBytes(">"); + private static final byte[] ltBytes = Bytes.toBytes("<"); + private static final byte[] quotBytes = Bytes.toBytes("""); + + /** + * Does the given string need to be quoted? + * @param data the string to check + * @param off the starting position + * @param len the number of bytes to check + * @return does the string contain any of the active html characters? + */ + public static boolean needsQuoting(byte[] data, int off, int len) { + if (off+len > data.length) { + throw new IllegalStateException("off+len=" + off+len + " should be lower" + + " than data length=" + data.length); + } + for(int i=off; i< off+len; ++i) { + switch(data[i]) { + case '&': + case '<': + case '>': + case '\'': + case '"': + return true; + default: + break; + } + } + return false; + } + + /** + * Does the given string need to be quoted? + * @param str the string to check + * @return does the string contain any of the active html characters? + */ + public static boolean needsQuoting(String str) { + if (str == null) { + return false; + } + byte[] bytes = Bytes.toBytes(str); + return needsQuoting(bytes, 0 , bytes.length); + } + + /** + * Quote all of the active HTML characters in the given string as they + * are added to the buffer. + * @param output the stream to write the output to + * @param buffer the byte array to take the characters from + * @param off the index of the first byte to quote + * @param len the number of bytes to quote + */ + public static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len) + throws IOException { + for(int i=off; i < off+len; i++) { + switch (buffer[i]) { + case '&': + output.write(ampBytes); + break; + case '<': + output.write(ltBytes); + break; + case '>': + output.write(gtBytes); + break; + case '\'': + output.write(aposBytes); + break; + case '"': + output.write(quotBytes); + break; + default: + output.write(buffer, i, 1); + break; + } + } + } + + /** + * Quote the given item to make it html-safe. + * @param item the string to quote + * @return the quoted string + */ + public static String quoteHtmlChars(String item) { + if (item == null) { + return null; + } + byte[] bytes = Bytes.toBytes(item); + if (needsQuoting(bytes, 0, bytes.length)) { + ByteArrayOutputStream buffer = new ByteArrayOutputStream(); + try { + quoteHtmlChars(buffer, bytes, 0, bytes.length); + } catch (IOException ioe) { + // Won't happen, since it is a bytearrayoutputstream + } + return buffer.toString(); + } else { + return item; + } + } + + /** + * Return an output stream that quotes all of the output. + * @param out the stream to write the quoted output to + * @return a new stream that the application show write to + */ + public static OutputStream quoteOutputStream(final OutputStream out) { + return new OutputStream() { + private byte[] data = new byte[1]; + @Override + public void write(byte[] data, int off, int len) throws IOException { + quoteHtmlChars(out, data, off, len); + } + + @Override + public void write(int b) throws IOException { + data[0] = (byte) b; + quoteHtmlChars(out, data, 0, 1); + } + + @Override + public void flush() throws IOException { + out.flush(); + } + + @Override + public void close() throws IOException { + out.close(); + } + }; + } + + /** + * Remove HTML quoting from a string. + * @param item the string to unquote + * @return the unquoted string + */ + public static String unquoteHtmlChars(String item) { + if (item == null) { + return null; + } + int next = item.indexOf('&'); + // nothing was quoted + if (next == -1) { + return item; + } + int len = item.length(); + int posn = 0; + StringBuilder buffer = new StringBuilder(); + while (next != -1) { + buffer.append(item.substring(posn, next)); + if (item.startsWith("&", next)) { + buffer.append('&'); + next += 5; + } else if (item.startsWith("'", next)) { + buffer.append('\''); + next += 6; + } else if (item.startsWith(">", next)) { + buffer.append('>'); + next += 4; + } else if (item.startsWith("<", next)) { + buffer.append('<'); + next += 4; + } else if (item.startsWith(""", next)) { + buffer.append('"'); + next += 6; + } else { + int end = item.indexOf(';', next)+1; + if (end == 0) { + end = len; + } + throw new IllegalArgumentException("Bad HTML quoting for " + + item.substring(next,end)); + } + posn = next; + next = item.indexOf('&', posn); + } + buffer.append(item.substring(posn, len)); + return buffer.toString(); + } + + public static void main(String[] args) { + if (args.length == 0) { + throw new IllegalArgumentException("Please provide some arguments"); + } + for(String arg:args) { + System.out.println("Original: " + arg); + String quoted = quoteHtmlChars(arg); + System.out.println("Quoted: "+ quoted); + String unquoted = unquoteHtmlChars(quoted); + System.out.println("Unquoted: " + unquoted); + System.out.println(); + } + } + + private HtmlQuoting() {} +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpConfig.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpConfig.java new file mode 100755 index 00000000..e27b0900 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpConfig.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.conf.Configuration; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +/** + * Statics to get access to Http related configuration. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class HttpConfig { + private Policy policy; + public enum Policy { + HTTP_ONLY, + HTTPS_ONLY, + HTTP_AND_HTTPS; + + public Policy fromString(String value) { + if (HTTPS_ONLY.name().equalsIgnoreCase(value)) { + return HTTPS_ONLY; + } else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) { + return HTTP_AND_HTTPS; + } + return HTTP_ONLY; + } + + public boolean isHttpEnabled() { + return this == HTTP_ONLY || this == HTTP_AND_HTTPS; + } + + public boolean isHttpsEnabled() { + return this == HTTPS_ONLY || this == HTTP_AND_HTTPS; + } + } + + public HttpConfig(final Configuration conf) { + boolean sslEnabled = conf.getBoolean( + ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, + ServerConfigurationKeys.HBASE_SSL_ENABLED_DEFAULT); + policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY; + if (sslEnabled) { + conf.addResource("ssl-server.xml"); + conf.addResource("ssl-client.xml"); + } + } + + public void setPolicy(Policy policy) { + this.policy = policy; + } + + public boolean isSecure() { + return policy == Policy.HTTPS_ONLY; + } + + public String getSchemePrefix() { + return (isSecure()) ? "https://" : "http://"; + } + + public String getScheme(Policy policy) { + return policy == Policy.HTTPS_ONLY ? "https://" : "http://"; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServer.java new file mode 100755 index 00000000..61c3e76d --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServer.java @@ -0,0 +1,1508 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.rest.http.conf.ConfServlet; +import org.apache.hadoop.hbase.rest.http.jmx.JMXJsonServlet; +import org.apache.hadoop.hbase.rest.http.log.LogLevel; +import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.http.AdminAuthorizedServlet; +import org.apache.hadoop.http.FilterContainer; +import org.apache.hadoop.http.FilterInitializer; +import org.apache.hadoop.http.HttpRequestLog; +import org.apache.hadoop.http.NoCacheFilter; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.Shell; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.eclipse.jetty.http.HttpVersion; +import org.eclipse.jetty.server.Handler; +import org.eclipse.jetty.server.HttpConfiguration; +import org.eclipse.jetty.server.HttpConnectionFactory; +import org.eclipse.jetty.server.RequestLog; +import org.eclipse.jetty.server.SecureRequestCustomizer; +import org.eclipse.jetty.server.Server; +import org.eclipse.jetty.server.ServerConnector; +import org.eclipse.jetty.server.SslConnectionFactory; +import org.eclipse.jetty.server.handler.ContextHandlerCollection; +import org.eclipse.jetty.server.handler.HandlerCollection; +import org.eclipse.jetty.server.handler.RequestLogHandler; +import org.eclipse.jetty.servlet.DefaultServlet; +import org.eclipse.jetty.servlet.FilterHolder; +import org.eclipse.jetty.servlet.FilterMapping; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.servlet.ServletHolder; +import org.eclipse.jetty.util.MultiException; +import org.eclipse.jetty.util.ssl.SslContextFactory; +import org.eclipse.jetty.util.thread.QueuedThreadPool; +import org.eclipse.jetty.webapp.WebAppContext; +import org.glassfish.jersey.server.ResourceConfig; +import org.glassfish.jersey.servlet.ServletContainer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequestWrapper; +import javax.servlet.http.HttpServletResponse; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.PrintStream; +import java.net.BindException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Create a Jetty embedded server to answer http requests. The primary goal + * is to serve up status information for the server. + * There are three contexts: + * "/logs/" -> points to the log directory + * "/static/" -> points to common static files (src/webapps/static) + * "/" -> the jsp server code from (src/webapps/<name>) + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class HttpServer implements FilterContainer { + private static final Logger LOG = LoggerFactory.getLogger(HttpServer.class); + private static final String EMPTY_STRING = ""; + + private static final int DEFAULT_MAX_HEADER_SIZE = 64 * 1024; // 64K + + static final String FILTER_INITIALIZERS_PROPERTY + = "hbase.http.filter.initializers"; + static final String HTTP_MAX_THREADS = "hbase.http.max.threads"; + + public static final String HTTP_UI_AUTHENTICATION = "hbase.security.authentication.ui"; + static final String HTTP_AUTHENTICATION_PREFIX = "hbase.security.authentication."; + static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX + + "spnego."; + static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX = "kerberos.principal"; + public static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY = + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX; + static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX = "kerberos.keytab"; + public static final String HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY = + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX; + static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = "kerberos.name.rules"; + public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY = + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX; + static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = "kerberos.proxyuser.enable"; + public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY = + HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX; + public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; + static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = + "signature.secret.file"; + public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY = + HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX; + public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY = + HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.users"; + public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY = + HTTP_SPNEGO_AUTHENTICATION_PREFIX + "admin.groups"; + public static final String HTTP_PRIVILEGED_CONF_KEY = + "hbase.security.authentication.ui.config.protected"; + public static final boolean HTTP_PRIVILEGED_CONF_DEFAULT = false; + + // The ServletContext attribute where the daemon Configuration + // gets stored. + public static final String CONF_CONTEXT_ATTRIBUTE = "hbase.conf"; + public static final String ADMINS_ACL = "admins.acl"; + public static final String BIND_ADDRESS = "bind.address"; + public static final String SPNEGO_FILTER = "SpnegoFilter"; + public static final String SPNEGO_PROXYUSER_FILTER = "SpnegoProxyUserFilter"; + public static final String NO_CACHE_FILTER = "NoCacheFilter"; + public static final String APP_DIR = "webapps"; + + private final AccessControlList adminsAcl; + + protected final Server webServer; + protected String appDir; + protected String logDir; + + private static final class ListenerInfo { + /** + * Boolean flag to determine whether the HTTP server should clean up the + * listener in stop(). + */ + private final boolean isManaged; + private final ServerConnector listener; + private ListenerInfo(boolean isManaged, ServerConnector listener) { + this.isManaged = isManaged; + this.listener = listener; + } + } + + private final List listeners = Lists.newArrayList(); + + @VisibleForTesting + public List getServerConnectors() { + return listeners.stream().map(info -> info.listener).collect(Collectors.toList()); + } + + protected final WebAppContext webAppContext; + protected final boolean findPort; + protected final Map defaultContexts = new HashMap<>(); + protected final List filterNames = new ArrayList<>(); + protected final boolean authenticationEnabled; + static final String STATE_DESCRIPTION_ALIVE = " - alive"; + static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; + + /** + * Class to construct instances of HTTP server with specific options. + */ + public static class Builder { + private ArrayList endpoints = Lists.newArrayList(); + private Configuration conf; + private String[] pathSpecs; + private AccessControlList adminsAcl; + private boolean securityEnabled = false; + private String usernameConfKey; + private String keytabConfKey; + private boolean needsClientAuth; + + private String hostName; + private String appDir = APP_DIR; + private String logDir; + private boolean findPort; + + private String trustStore; + private String trustStorePassword; + private String trustStoreType; + + private String keyStore; + private String keyStorePassword; + private String keyStoreType; + + // The -keypass option in keytool + private String keyPassword; + + private String kerberosNameRulesKey; + private String signatureSecretFileKey; + + /** + * @see #setAppDir(String) + * @deprecated Since 0.99.0. Use builder pattern via {@link #setAppDir(String)} instead. + */ + @Deprecated + private String name; + /** + * @see #addEndpoint(URI) + * @deprecated Since 0.99.0. Use builder pattern via {@link #addEndpoint(URI)} instead. + */ + @Deprecated + private String bindAddress; + /** + * @see #addEndpoint(URI) + * @deprecated Since 0.99.0. Use builder pattern via {@link #addEndpoint(URI)} instead. + */ + @Deprecated + private int port = -1; + + /** + * Add an endpoint that the HTTP server should listen to. + * + * @param endpoint + * the endpoint of that the HTTP server should listen to. The + * scheme specifies the protocol (i.e. HTTP / HTTPS), the host + * specifies the binding address, and the port specifies the + * listening port. Unspecified or zero port means that the server + * can listen to any port. + */ + public Builder addEndpoint(URI endpoint) { + endpoints.add(endpoint); + return this; + } + + /** + * Set the hostname of the http server. The host name is used to resolve the + * _HOST field in Kerberos principals. The hostname of the first listener + * will be used if the name is unspecified. + */ + public Builder hostName(String hostName) { + this.hostName = hostName; + return this; + } + + public Builder trustStore(String location, String password, String type) { + this.trustStore = location; + this.trustStorePassword = password; + this.trustStoreType = type; + return this; + } + + public Builder keyStore(String location, String password, String type) { + this.keyStore = location; + this.keyStorePassword = password; + this.keyStoreType = type; + return this; + } + + public Builder keyPassword(String password) { + this.keyPassword = password; + return this; + } + + /** + * Specify whether the server should authorize the client in SSL + * connections. + */ + public Builder needsClientAuth(boolean value) { + this.needsClientAuth = value; + return this; + } + + /** + * @see #setAppDir(String) + * @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead. + */ + @Deprecated + public Builder setName(String name){ + this.name = name; + return this; + } + + /** + * @see #addEndpoint(URI) + * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. + */ + @Deprecated + public Builder setBindAddress(String bindAddress){ + this.bindAddress = bindAddress; + return this; + } + + /** + * @see #addEndpoint(URI) + * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. + */ + @Deprecated + public Builder setPort(int port) { + this.port = port; + return this; + } + + public Builder setFindPort(boolean findPort) { + this.findPort = findPort; + return this; + } + + public Builder setConf(Configuration conf) { + this.conf = conf; + return this; + } + + public Builder setPathSpec(String[] pathSpec) { + this.pathSpecs = pathSpec; + return this; + } + + public Builder setACL(AccessControlList acl) { + this.adminsAcl = acl; + return this; + } + + public Builder setSecurityEnabled(boolean securityEnabled) { + this.securityEnabled = securityEnabled; + return this; + } + + public Builder setUsernameConfKey(String usernameConfKey) { + this.usernameConfKey = usernameConfKey; + return this; + } + + public Builder setKeytabConfKey(String keytabConfKey) { + this.keytabConfKey = keytabConfKey; + return this; + } + + public Builder setKerberosNameRulesKey(String kerberosNameRulesKey) { + this.kerberosNameRulesKey = kerberosNameRulesKey; + return this; + } + + public Builder setSignatureSecretFileKey(String signatureSecretFileKey) { + this.signatureSecretFileKey = signatureSecretFileKey; + return this; + } + + public Builder setAppDir(String appDir) { + this.appDir = appDir; + return this; + } + + public Builder setLogDir(String logDir) { + this.logDir = logDir; + return this; + } + + public HttpServer build() throws IOException { + + // Do we still need to assert this non null name if it is deprecated? + if (this.name == null) { + throw new HadoopIllegalArgumentException("name is not set"); + } + + // Make the behavior compatible with deprecated interfaces + if (bindAddress != null && port != -1) { + try { + endpoints.add(0, new URI("http", "", bindAddress, port, "", "", "")); + } catch (URISyntaxException e) { + throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e); + } + } + + if (endpoints.isEmpty()) { + throw new HadoopIllegalArgumentException("No endpoints specified"); + } + + if (hostName == null) { + hostName = endpoints.get(0).getHost(); + } + + if (this.conf == null) { + conf = new Configuration(); + } + + HttpServer server = new HttpServer(this); + + for (URI ep : endpoints) { + ServerConnector listener = null; + String scheme = ep.getScheme(); + HttpConfiguration httpConfig = new HttpConfiguration(); + httpConfig.setSecureScheme("https"); + httpConfig.setHeaderCacheSize(DEFAULT_MAX_HEADER_SIZE); + httpConfig.setResponseHeaderSize(DEFAULT_MAX_HEADER_SIZE); + httpConfig.setRequestHeaderSize(DEFAULT_MAX_HEADER_SIZE); + + if ("http".equals(scheme)) { + listener = new ServerConnector(server.webServer, new HttpConnectionFactory(httpConfig)); + } else if ("https".equals(scheme)) { + HttpConfiguration httpsConfig = new HttpConfiguration(httpConfig); + httpsConfig.addCustomizer(new SecureRequestCustomizer()); + SslContextFactory sslCtxFactory = new SslContextFactory(); + sslCtxFactory.setNeedClientAuth(needsClientAuth); + sslCtxFactory.setKeyManagerPassword(keyPassword); + + if (keyStore != null) { + sslCtxFactory.setKeyStorePath(keyStore); + sslCtxFactory.setKeyStoreType(keyStoreType); + sslCtxFactory.setKeyStorePassword(keyStorePassword); + } + + if (trustStore != null) { + sslCtxFactory.setTrustStorePath(trustStore); + sslCtxFactory.setTrustStoreType(trustStoreType); + sslCtxFactory.setTrustStorePassword(trustStorePassword); + + } + listener = new ServerConnector(server.webServer, new SslConnectionFactory(sslCtxFactory, + HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig)); + } else { + throw new HadoopIllegalArgumentException( + "unknown scheme for endpoint:" + ep); + } + + // default settings for connector + listener.setAcceptQueueSize(128); + if (Shell.WINDOWS) { + // result of setting the SO_REUSEADDR flag is different on Windows + // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx + // without this 2 NN's can start on the same machine and listen on + // the same port with indeterminate routing of incoming requests to them + listener.setReuseAddress(false); + } + + listener.setHost(ep.getHost()); + listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort()); + server.addManagedListener(listener); + } + + server.loadListeners(); + return server; + + } + + } + + /** + * @see #HttpServer(String, String, int, boolean, Configuration) + * @deprecated Since 0.99.0 + */ + @Deprecated + public HttpServer(String name, String bindAddress, int port, boolean findPort) + throws IOException { + this(name, bindAddress, port, findPort, new Configuration()); + } + + /** + * Create a status server on the given port. Allows you to specify the + * path specifications that this server will be serving so that they will be + * added to the filters properly. + * + * @param name The name of the server + * @param bindAddress The address for this server + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and + * increment by 1 until it finds a free port. + * @param conf Configuration + * @param pathSpecs Path specifications that this httpserver will be serving. + * These will be added to any filters. + * @deprecated Since 0.99.0 + */ + @Deprecated + public HttpServer(String name, String bindAddress, int port, + boolean findPort, Configuration conf, String[] pathSpecs) throws IOException { + this(name, bindAddress, port, findPort, conf, null, pathSpecs); + } + + /** + * Create a status server on the given port. + * The jsp scripts are taken from src/webapps/<name>. + * @param name The name of the server + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and + * increment by 1 until it finds a free port. + * @param conf Configuration + * @deprecated Since 0.99.0 + */ + @Deprecated + public HttpServer(String name, String bindAddress, int port, + boolean findPort, Configuration conf) throws IOException { + this(name, bindAddress, port, findPort, conf, null, null); + } + + /** + * Creates a status server on the given port. The JSP scripts are taken + * from src/webapp<name>. + * + * @param name the name of the server + * @param bindAddress the address for this server + * @param port the port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port + * @param conf the configuration to use + * @param adminsAcl {@link AccessControlList} of the admins + * @throws IOException when creating the server fails + * @deprecated Since 0.99.0 + */ + @Deprecated + public HttpServer(String name, String bindAddress, int port, + boolean findPort, Configuration conf, AccessControlList adminsAcl) + throws IOException { + this(name, bindAddress, port, findPort, conf, adminsAcl, null); + } + + /** + * Create a status server on the given port. + * The jsp scripts are taken from src/webapps/<name>. + * @param name The name of the server + * @param bindAddress The address for this server + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and + * increment by 1 until it finds a free port. + * @param conf Configuration + * @param adminsAcl {@link AccessControlList} of the admins + * @param pathSpecs Path specifications that this httpserver will be serving. + * These will be added to any filters. + * @deprecated Since 0.99.0 + */ + @Deprecated + public HttpServer(String name, String bindAddress, int port, + boolean findPort, Configuration conf, AccessControlList adminsAcl, + String[] pathSpecs) throws IOException { + this(new Builder().setName(name) + .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setFindPort(findPort).setConf(conf).setACL(adminsAcl) + .setPathSpec(pathSpecs)); + } + + private HttpServer(final Builder b) throws IOException { + this.appDir = b.appDir; + this.logDir = b.logDir; + final String appDir = getWebAppsPath(b.name); + + + int maxThreads = b.conf.getInt(HTTP_MAX_THREADS, 16); + // If HTTP_MAX_THREADS is less than or equal to 0, QueueThreadPool() will use the + // default value (currently 200). + QueuedThreadPool threadPool = maxThreads <= 0 ? new QueuedThreadPool() + : new QueuedThreadPool(maxThreads); + threadPool.setDaemon(true); + this.webServer = new Server(threadPool); + + this.adminsAcl = b.adminsAcl; + this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir); + this.findPort = b.findPort; + this.authenticationEnabled = b.securityEnabled; + initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs, b); + } + + private void initializeWebServer(String name, String hostName, + Configuration conf, String[] pathSpecs, HttpServer.Builder b) + throws FileNotFoundException, IOException { + + Preconditions.checkNotNull(webAppContext); + + HandlerCollection handlerCollection = new HandlerCollection(); + + ContextHandlerCollection contexts = new ContextHandlerCollection(); + RequestLog requestLog = HttpRequestLog.getRequestLog(name); + + if (requestLog != null) { + RequestLogHandler requestLogHandler = new RequestLogHandler(); + requestLogHandler.setRequestLog(requestLog); + handlerCollection.addHandler(requestLogHandler); + } + + final String appDir = getWebAppsPath(name); + + handlerCollection.addHandler(contexts); + handlerCollection.addHandler(webAppContext); + + webServer.setHandler(handlerCollection); + + webAppContext.setAttribute(ADMINS_ACL, adminsAcl); + + // Default apps need to be set first, so that all filters are applied to them. + // Because they're added to defaultContexts, we need them there before we start + // adding filters + addDefaultApps(contexts, appDir, conf); + + addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); + + addGlobalFilter("clickjackingprevention", + ClickjackingPreventionFilter.class.getName(), + ClickjackingPreventionFilter.getDefaultParameters(conf)); + + addGlobalFilter("securityheaders", + SecurityHeadersFilter.class.getName(), + SecurityHeadersFilter.getDefaultParameters(conf)); + + // But security needs to be enabled prior to adding the other servlets + if (authenticationEnabled) { + initSpnego(conf, hostName, b.usernameConfKey, b.keytabConfKey, b.kerberosNameRulesKey, + b.signatureSecretFileKey); + } + + final FilterInitializer[] initializers = getFilterInitializers(conf); + if (initializers != null) { + conf = new Configuration(conf); + conf.set(BIND_ADDRESS, hostName); + for (FilterInitializer c : initializers) { + c.initFilter(this, conf); + } + } + + addDefaultServlets(contexts, conf); + + if (pathSpecs != null) { + for (String path : pathSpecs) { + LOG.info("adding path spec: " + path); + addFilterPathMapping(path, webAppContext); + } + } + } + + private void addManagedListener(ServerConnector connector) { + listeners.add(new ListenerInfo(true, connector)); + } + + private static WebAppContext createWebAppContext(String name, + Configuration conf, AccessControlList adminsAcl, final String appDir) { + WebAppContext ctx = new WebAppContext(); + ctx.setDisplayName(name); + ctx.setContextPath("/"); + ctx.setWar(appDir + "/" + name); + ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); + // for org.apache.hadoop.metrics.MetricsServlet + ctx.getServletContext().setAttribute( + org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf); + ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); + addNoCacheFilter(ctx); + return ctx; + } + + private static void addNoCacheFilter(WebAppContext ctxt) { + defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(), + Collections. emptyMap(), new String[] { "/*" }); + } + + /** Get an array of FilterConfiguration specified in the conf */ + private static FilterInitializer[] getFilterInitializers(Configuration conf) { + if (conf == null) { + return null; + } + + Class[] classes = conf.getClasses(FILTER_INITIALIZERS_PROPERTY); + if (classes == null) { + return null; + } + + FilterInitializer[] initializers = new FilterInitializer[classes.length]; + for(int i = 0; i < classes.length; i++) { + initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(classes[i]); + } + return initializers; + } + + /** + * Add default apps. + * @param appDir The application directory + */ + protected void addDefaultApps(ContextHandlerCollection parent, + final String appDir, Configuration conf) { + // set up the context for "/logs/" if "hadoop.log.dir" property is defined. + String logDir = this.logDir; + if (logDir == null) { + logDir = System.getProperty("hadoop.log.dir"); + } + if (logDir != null) { + ServletContextHandler logContext = new ServletContextHandler(parent, "/logs"); + logContext.addServlet(AdminAuthorizedServlet.class, "/*"); + logContext.setResourceBase(logDir); + + if (conf.getBoolean( + ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, + ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) { + Map params = logContext.getInitParams(); + params.put( + "org.mortbay.jetty.servlet.Default.aliases", "true"); + } + logContext.setDisplayName("logs"); + setContextAttributes(logContext, conf); + defaultContexts.put(logContext, true); + } + // set up the context for "/static/*" + ServletContextHandler staticContext = new ServletContextHandler(parent, "/static"); + staticContext.setResourceBase(appDir + "/static"); + staticContext.addServlet(DefaultServlet.class, "/*"); + staticContext.setDisplayName("static"); + setContextAttributes(staticContext, conf); + defaultContexts.put(staticContext, true); + } + + private void setContextAttributes(ServletContextHandler context, Configuration conf) { + context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); + context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); + } + + /** + * Add default servlets. + */ + protected void addDefaultServlets( + ContextHandlerCollection contexts, Configuration conf) throws IOException { + // set up default servlets + addPrivilegedServlet("stacks", "/stacks", StackServlet.class); + addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class); + // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's + // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. + // Remove when we drop support for hbase on hadoop2.x. + try { + Class clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet"); + addPrivilegedServlet("metrics", "/metrics", clz.asSubclass(HttpServlet.class)); + } catch (Exception e) { + // do nothing + } + addPrivilegedServlet("jmx", "/jmx", JMXJsonServlet.class); + // While we don't expect users to have sensitive information in their configuration, they + // might. Give them an option to not expose the service configuration to all users. + if (conf.getBoolean(HTTP_PRIVILEGED_CONF_KEY, HTTP_PRIVILEGED_CONF_DEFAULT)) { + addPrivilegedServlet("conf", "/conf", ConfServlet.class); + } else { + addUnprivilegedServlet("conf", "/conf", ConfServlet.class); + } + final String asyncProfilerHome = ProfileServlet.getAsyncProfilerHome(); + if (asyncProfilerHome != null && !asyncProfilerHome.trim().isEmpty()) { + addPrivilegedServlet("prof", "/prof", ProfileServlet.class); + Path tmpDir = Paths.get(ProfileServlet.OUTPUT_DIR); + if (Files.notExists(tmpDir)) { + Files.createDirectories(tmpDir); + } + ServletContextHandler genCtx = new ServletContextHandler(contexts, "/prof-output"); + genCtx.addServlet(ProfileOutputServlet.class, "/*"); + genCtx.setResourceBase(tmpDir.toAbsolutePath().toString()); + genCtx.setDisplayName("prof-output"); + } else { + addUnprivilegedServlet("prof", "/prof", ProfileServlet.DisabledServlet.class); + LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + + "not specified. Disabling /prof endpoint."); + } + } + + /** + * Set a value in the webapp context. These values are available to the jsp + * pages as "application.getAttribute(name)". + * @param name The name of the attribute + * @param value The value of the attribute + */ + public void setAttribute(String name, Object value) { + webAppContext.setAttribute(name, value); + } + + /** + * Add a Jersey resource package. + * @param packageName The Java package name containing the Jersey resource. + * @param pathSpec The path spec for the servlet + */ + public void addJerseyResourcePackage(final String packageName, + final String pathSpec) { + LOG.info("addJerseyResourcePackage: packageName=" + packageName + + ", pathSpec=" + pathSpec); + + ResourceConfig application = new ResourceConfig().packages(packageName); + final ServletHolder sh = new ServletHolder(new ServletContainer(application)); + webAppContext.addServlet(sh, pathSpec); + } + + /** + * Adds a servlet in the server that any user can access. This method differs from + * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user + * can interact with the servlet added by this method. + * @param name The name of the servlet (can be passed as null) + * @param pathSpec The path spec for the servlet + * @param clazz The servlet class + */ + public void addUnprivilegedServlet(String name, String pathSpec, + Class clazz) { + addServletWithAuth(name, pathSpec, clazz, false); + } + + /** + * Adds a servlet in the server that only administrators can access. This method differs from + * {@link #addUnprivilegedServlet(String, String, Class)} in that only those authenticated user + * who are identified as administrators can interact with the servlet added by this method. + */ + public void addPrivilegedServlet(String name, String pathSpec, + Class clazz) { + addServletWithAuth(name, pathSpec, clazz, true); + } + + /** + * Internal method to add a servlet to the HTTP server. Developers should not call this method + * directly, but invoke it via {@link #addUnprivilegedServlet(String, String, Class)} or + * {@link #addPrivilegedServlet(String, String, Class)}. + */ + void addServletWithAuth(String name, String pathSpec, + Class clazz, boolean requireAuthz) { + addInternalServlet(name, pathSpec, clazz, requireAuthz); + addFilterPathMapping(pathSpec, webAppContext); + } + + /** + * Add an internal servlet in the server, specifying whether or not to + * protect with Kerberos authentication. + * Note: This method is to be used for adding servlets that facilitate + * internal communication and not for user facing functionality. For + * servlets added using this method, filters (except internal Kerberos + * filters) are not enabled. + * + * @param name The name of the servlet (can be passed as null) + * @param pathSpec The path spec for the servlet + * @param clazz The servlet class + * @param requireAuthz Require Kerberos authenticate to access servlet + */ + void addInternalServlet(String name, String pathSpec, + Class clazz, boolean requireAuthz) { + ServletHolder holder = new ServletHolder(clazz); + if (name != null) { + holder.setName(name); + } + if (authenticationEnabled && requireAuthz) { + FilterHolder filter = new FilterHolder(AdminAuthorizedFilter.class); + filter.setName(AdminAuthorizedFilter.class.getSimpleName()); + FilterMapping fmap = new FilterMapping(); + fmap.setPathSpec(pathSpec); + fmap.setDispatches(FilterMapping.ALL); + fmap.setFilterName(AdminAuthorizedFilter.class.getSimpleName()); + webAppContext.getServletHandler().addFilter(filter, fmap); + } + webAppContext.addServlet(holder, pathSpec); + } + + @Override + public void addFilter(String name, String classname, Map parameters) { + final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; + defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); + LOG.info("Added filter " + name + " (class=" + classname + + ") to context " + webAppContext.getDisplayName()); + final String[] ALL_URLS = { "/*" }; + for (Map.Entry e : defaultContexts.entrySet()) { + if (e.getValue()) { + ServletContextHandler handler = e.getKey(); + defineFilter(handler, name, classname, parameters, ALL_URLS); + LOG.info("Added filter " + name + " (class=" + classname + + ") to context " + handler.getDisplayName()); + } + } + filterNames.add(name); + } + + @Override + public void addGlobalFilter(String name, String classname, Map parameters) { + final String[] ALL_URLS = { "/*" }; + defineFilter(webAppContext, name, classname, parameters, ALL_URLS); + for (ServletContextHandler ctx : defaultContexts.keySet()) { + defineFilter(ctx, name, classname, parameters, ALL_URLS); + } + LOG.info("Added global filter '" + name + "' (class=" + classname + ")"); + } + + /** + * Define a filter for a context and set up default url mappings. + */ + public static void defineFilter(ServletContextHandler handler, String name, + String classname, Map parameters, String[] urls) { + FilterHolder holder = new FilterHolder(); + holder.setName(name); + holder.setClassName(classname); + if (parameters != null) { + holder.setInitParameters(parameters); + } + FilterMapping fmap = new FilterMapping(); + fmap.setPathSpecs(urls); + fmap.setDispatches(FilterMapping.ALL); + fmap.setFilterName(name); + handler.getServletHandler().addFilter(holder, fmap); + } + + /** + * Add the path spec to the filter path mapping. + * @param pathSpec The path spec + * @param webAppCtx The WebApplicationContext to add to + */ + protected void addFilterPathMapping(String pathSpec, + WebAppContext webAppCtx) { + for(String name : filterNames) { + FilterMapping fmap = new FilterMapping(); + fmap.setPathSpec(pathSpec); + fmap.setFilterName(name); + fmap.setDispatches(FilterMapping.ALL); + webAppCtx.getServletHandler().addFilterMapping(fmap); + } + } + + /** + * Get the value in the webapp context. + * @param name The name of the attribute + * @return The value of the attribute + */ + public Object getAttribute(String name) { + return webAppContext.getAttribute(name); + } + + public WebAppContext getWebAppContext(){ + return this.webAppContext; + } + + public String getWebAppsPath(String appName) throws FileNotFoundException { + return getWebAppsPath(this.appDir, appName); + } + + /** + * Get the pathname to the webapps files. + * @param appName eg "secondary" or "datanode" + * @return the pathname as a URL + * @throws FileNotFoundException if 'webapps' directory cannot be found on CLASSPATH. + */ + protected String getWebAppsPath(String webapps, String appName) throws FileNotFoundException { + URL url = getClass().getClassLoader().getResource(webapps + "/" + appName); + + if (url == null) { + throw new FileNotFoundException(webapps + "/" + appName + + " not found in CLASSPATH"); + } + + String urlString = url.toString(); + return urlString.substring(0, urlString.lastIndexOf('/')); + } + + /** + * Get the port that the server is on + * @return the port + * @deprecated Since 0.99.0 + */ + @Deprecated + public int getPort() { + return ((ServerConnector)webServer.getConnectors()[0]).getLocalPort(); + } + + /** + * Get the address that corresponds to a particular connector. + * + * @return the corresponding address for the connector, or null if there's no + * such connector or the connector is not bounded. + */ + public InetSocketAddress getConnectorAddress(int index) { + Preconditions.checkArgument(index >= 0); + + if (index > webServer.getConnectors().length) { + return null; + } + + ServerConnector c = (ServerConnector)webServer.getConnectors()[index]; + if (c.getLocalPort() == -1 || c.getLocalPort() == -2) { + // -1 if the connector has not been opened + // -2 if it has been closed + return null; + } + + return new InetSocketAddress(c.getHost(), c.getLocalPort()); + } + + /** + * Set the min, max number of worker threads (simultaneous connections). + */ + public void setThreads(int min, int max) { + QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool(); + pool.setMinThreads(min); + pool.setMaxThreads(max); + } + + private void initSpnego(Configuration conf, String hostName, + String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey, + String signatureSecretKeyFileKey) throws IOException { + Map params = new HashMap<>(); + String principalInConf = getOrEmptyString(conf, usernameConfKey); + if (!principalInConf.isEmpty()) { + params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal( + principalInConf, hostName)); + } + String httpKeytab = getOrEmptyString(conf, keytabConfKey); + if (!httpKeytab.isEmpty()) { + params.put(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX, httpKeytab); + } + String kerberosNameRule = getOrEmptyString(conf, kerberosNameRuleKey); + if (!kerberosNameRule.isEmpty()) { + params.put(HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX, kerberosNameRule); + } + String signatureSecretKeyFile = getOrEmptyString(conf, signatureSecretKeyFileKey); + if (!signatureSecretKeyFile.isEmpty()) { + params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, + signatureSecretKeyFile); + } + params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); + + // Verify that the required options were provided + if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) || + isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) { + throw new IllegalArgumentException(usernameConfKey + " and " + + keytabConfKey + " are both required in the configuration " + + "to enable SPNEGO/Kerberos authentication for the Web UI"); + } + + if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY, + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) { + //Copy/rename standard hadoop proxyuser settings to filter + for(Map.Entry proxyEntry : + conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { + params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), + proxyEntry.getValue()); + } + addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), params); + } else { + addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); + } + } + + /** + * Returns true if the argument is non-null and not whitespace + */ + private boolean isMissing(String value) { + if (null == value) { + return true; + } + return value.trim().isEmpty(); + } + + /** + * Extracts the value for the given key from the configuration of returns a string of + * zero length. + */ + private String getOrEmptyString(Configuration conf, String key) { + if (null == key) { + return EMPTY_STRING; + } + final String value = conf.get(key.trim()); + return null == value ? EMPTY_STRING : value; + } + + /** + * Start the server. Does not wait for the server to start. + */ + public void start() throws IOException { + try { + try { + openListeners(); + webServer.start(); + } catch (IOException ex) { + LOG.info("HttpServer.start() threw a non Bind IOException", ex); + throw ex; + } catch (MultiException ex) { + LOG.info("HttpServer.start() threw a MultiException", ex); + throw ex; + } + // Make sure there is no handler failures. + Handler[] handlers = webServer.getHandlers(); + for (int i = 0; i < handlers.length; i++) { + if (handlers[i].isFailed()) { + throw new IOException( + "Problem in starting http server. Server handlers failed"); + } + } + // Make sure there are no errors initializing the context. + Throwable unavailableException = webAppContext.getUnavailableException(); + if (unavailableException != null) { + // Have to stop the webserver, or else its non-daemon threads + // will hang forever. + webServer.stop(); + throw new IOException("Unable to initialize WebAppContext", + unavailableException); + } + } catch (IOException e) { + throw e; + } catch (InterruptedException e) { + throw (IOException) new InterruptedIOException( + "Interrupted while starting HTTP server").initCause(e); + } catch (Exception e) { + throw new IOException("Problem starting http server", e); + } + } + + private void loadListeners() { + for (ListenerInfo li : listeners) { + webServer.addConnector(li.listener); + } + } + + /** + * Open the main listener for the server + * @throws Exception if the listener cannot be opened or the appropriate port is already in use + */ + @VisibleForTesting + void openListeners() throws Exception { + for (ListenerInfo li : listeners) { + ServerConnector listener = li.listener; + if (!li.isManaged || (li.listener.getLocalPort() != -1 && li.listener.getLocalPort() != -2)) { + // This listener is either started externally, or has not been opened, or has been closed + continue; + } + int port = listener.getPort(); + while (true) { + // jetty has a bug where you can't reopen a listener that previously + // failed to open w/o issuing a close first, even if the port is changed + try { + listener.close(); + listener.open(); + LOG.info("Jetty bound to port " + listener.getLocalPort()); + break; + } catch (IOException ex) { + if(!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { + throw ex; + } + if (port == 0 || !findPort) { + BindException be = new BindException("Port in use: " + + listener.getHost() + ":" + listener.getPort()); + be.initCause(ex); + throw be; + } + } + // try the next port number + listener.setPort(++port); + Thread.sleep(100); + } + } + } + + /** + * stop the server + */ + public void stop() throws Exception { + MultiException exception = null; + for (ListenerInfo li : listeners) { + if (!li.isManaged) { + continue; + } + + try { + li.listener.close(); + } catch (Exception e) { + LOG.error( + "Error while stopping listener for webapp" + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); + } + } + + try { + // clear & stop webAppContext attributes to avoid memory leaks. + webAppContext.clearAttributes(); + webAppContext.stop(); + } catch (Exception e) { + LOG.error("Error while stopping web app context for webapp " + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); + } + + try { + webServer.stop(); + } catch (Exception e) { + LOG.error("Error while stopping web server for webapp " + + webAppContext.getDisplayName(), e); + exception = addMultiException(exception, e); + } + + if (exception != null) { + exception.ifExceptionThrow(); + } + + } + + private MultiException addMultiException(MultiException exception, Exception e) { + if(exception == null){ + exception = new MultiException(); + } + exception.add(e); + return exception; + } + + public void join() throws InterruptedException { + webServer.join(); + } + + /** + * Test for the availability of the web server + * @return true if the web server is started, false otherwise + */ + public boolean isAlive() { + return webServer != null && webServer.isStarted(); + } + + /** + * Return the host and port of the HttpServer, if live + * @return the classname and any HTTP URL + */ + @Override + public String toString() { + if (listeners.isEmpty()) { + return "Inactive HttpServer"; + } else { + StringBuilder sb = new StringBuilder("HttpServer (") + .append(isAlive() ? STATE_DESCRIPTION_ALIVE : + STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); + for (ListenerInfo li : listeners) { + ServerConnector l = li.listener; + sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); + } + return sb.toString(); + } + } + + /** + * Checks the user has privileges to access to instrumentation servlets. + *

+ * If hadoop.security.instrumentation.requires.admin is set to FALSE + * (default value) it always returns TRUE. + *

+ * If hadoop.security.instrumentation.requires.admin is set to TRUE + * it will check that if the current user is in the admin ACLS. If the user is + * in the admin ACLs it returns TRUE, otherwise it returns FALSE. + *

+ * + * @param servletContext the servlet context. + * @param request the servlet request. + * @param response the servlet response. + * @return TRUE/FALSE based on the logic decribed above. + */ + public static boolean isInstrumentationAccessAllowed( + ServletContext servletContext, HttpServletRequest request, + HttpServletResponse response) throws IOException { + Configuration conf = + (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + + boolean access = true; + boolean adminAccess = conf.getBoolean( + CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, + false); + if (adminAccess) { + access = hasAdministratorAccess(servletContext, request, response); + } + return access; + } + + /** + * Does the user sending the HttpServletRequest has the administrator ACLs? If + * it isn't the case, response will be modified to send an error to the user. + * + * @param servletContext the {@link ServletContext} to use + * @param request the {@link HttpServletRequest} to check + * @param response used to send the error response if user does not have admin access. + * @return true if admin-authorized, false otherwise + * @throws IOException if an unauthenticated or unauthorized user tries to access the page + */ + public static boolean hasAdministratorAccess( + ServletContext servletContext, HttpServletRequest request, + HttpServletResponse response) throws IOException { + Configuration conf = + (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + AccessControlList acl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); + + return hasAdministratorAccess(conf, acl, request, response); + } + + public static boolean hasAdministratorAccess(Configuration conf, AccessControlList acl, + HttpServletRequest request, HttpServletResponse response) throws IOException { + // If there is no authorization, anybody has administrator access. + if (!conf.getBoolean( + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { + return true; + } + + String remoteUser = request.getRemoteUser(); + if (remoteUser == null) { + response.sendError(HttpServletResponse.SC_UNAUTHORIZED, + "Unauthenticated users are not " + + "authorized to access this page."); + return false; + } + + if (acl != null && !userHasAdministratorAccess(acl, remoteUser)) { + response.sendError(HttpServletResponse.SC_FORBIDDEN, "User " + + remoteUser + " is unauthorized to access this page."); + return false; + } + + return true; + } + + /** + * Get the admin ACLs from the given ServletContext and check if the given + * user is in the ACL. + * + * @param servletContext the context containing the admin ACL. + * @param remoteUser the remote user to check for. + * @return true if the user is present in the ACL, false if no ACL is set or + * the user is not present + */ + public static boolean userHasAdministratorAccess(ServletContext servletContext, + String remoteUser) { + AccessControlList adminsAcl = (AccessControlList) servletContext + .getAttribute(ADMINS_ACL); + return userHasAdministratorAccess(adminsAcl, remoteUser); + } + + public static boolean userHasAdministratorAccess(AccessControlList acl, String remoteUser) { + UserGroupInformation remoteUserUGI = + UserGroupInformation.createRemoteUser(remoteUser); + return acl != null && acl.isUserAllowed(remoteUserUGI); + } + + /** + * A very simple servlet to serve up a text representation of the current + * stack traces. It both returns the stacks to the caller and logs them. + * Currently the stack traces are done sequentially rather than exactly the + * same data. + */ + public static class StackServlet extends HttpServlet { + private static final long serialVersionUID = -6284183679759467039L; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { + return; + } + response.setContentType("text/plain; charset=UTF-8"); + try (PrintStream out = new PrintStream( + response.getOutputStream(), false, "UTF-8")) { + Threads.printThreadInfo(out, ""); + out.flush(); + } + ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1); + } + } + + /** + * A Servlet input filter that quotes all HTML active characters in the + * parameter names and values. The goal is to quote the characters to make + * all of the servlets resistant to cross-site scripting attacks. + */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) + public static class QuotingInputFilter implements Filter { + private FilterConfig config; + + public static class RequestQuoter extends HttpServletRequestWrapper { + private final HttpServletRequest rawRequest; + public RequestQuoter(HttpServletRequest rawRequest) { + super(rawRequest); + this.rawRequest = rawRequest; + } + + /** + * Return the set of parameter names, quoting each name. + */ + @Override + public Enumeration getParameterNames() { + return new Enumeration() { + private Enumeration rawIterator = + rawRequest.getParameterNames(); + @Override + public boolean hasMoreElements() { + return rawIterator.hasMoreElements(); + } + + @Override + public String nextElement() { + return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement()); + } + }; + } + + /** + * Unquote the name and quote the value. + */ + @Override + public String getParameter(String name) { + return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter( + HtmlQuoting.unquoteHtmlChars(name))); + } + + @Override + public String[] getParameterValues(String name) { + String unquoteName = HtmlQuoting.unquoteHtmlChars(name); + String[] unquoteValue = rawRequest.getParameterValues(unquoteName); + if (unquoteValue == null) { + return null; + } + String[] result = new String[unquoteValue.length]; + for(int i=0; i < result.length; ++i) { + result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]); + } + return result; + } + + @Override + public Map getParameterMap() { + Map result = new HashMap<>(); + Map raw = rawRequest.getParameterMap(); + for (Map.Entry item: raw.entrySet()) { + String[] rawValue = item.getValue(); + String[] cookedValue = new String[rawValue.length]; + for(int i=0; i< rawValue.length; ++i) { + cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]); + } + result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue); + } + return result; + } + + /** + * Quote the url so that users specifying the HOST HTTP header + * can't inject attacks. + */ + @Override + public StringBuffer getRequestURL(){ + String url = rawRequest.getRequestURL().toString(); + return new StringBuffer(HtmlQuoting.quoteHtmlChars(url)); + } + + /** + * Quote the server name so that users specifying the HOST HTTP header + * can't inject attacks. + */ + @Override + public String getServerName() { + return HtmlQuoting.quoteHtmlChars(rawRequest.getServerName()); + } + } + + @Override + public void init(FilterConfig config) throws ServletException { + this.config = config; + } + + @Override + public void destroy() { + } + + @Override + public void doFilter(ServletRequest request, + ServletResponse response, + FilterChain chain + ) throws IOException, ServletException { + HttpServletRequestWrapper quoted = + new RequestQuoter((HttpServletRequest) request); + HttpServletResponse httpResponse = (HttpServletResponse) response; + + String mime = inferMimeType(request); + if (mime == null) { + httpResponse.setContentType("text/plain; charset=utf-8"); + } else if (mime.startsWith("text/html")) { + // HTML with unspecified encoding, we want to + // force HTML with utf-8 encoding + // This is to avoid the following security issue: + // http://openmya.hacker.jp/hasegawa/security/utf7cs.html + httpResponse.setContentType("text/html; charset=utf-8"); + } else if (mime.startsWith("application/xml")) { + httpResponse.setContentType("text/xml; charset=utf-8"); + } + chain.doFilter(quoted, httpResponse); + } + + /** + * Infer the mime type for the response based on the extension of the request + * URI. Returns null if unknown. + */ + private String inferMimeType(ServletRequest request) { + String path = ((HttpServletRequest)request).getRequestURI(); + ServletContext context = config.getServletContext(); + return context.getMimeType(path); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServerUtil.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServerUtil.java new file mode 100755 index 00000000..a043ba6c --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/HttpServerUtil.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.yetus.audience.InterfaceAudience; +import org.eclipse.jetty.security.ConstraintMapping; +import org.eclipse.jetty.security.ConstraintSecurityHandler; +import org.eclipse.jetty.servlet.ServletContextHandler; +import org.eclipse.jetty.util.security.Constraint; + +/** + * HttpServer utility. + */ +@InterfaceAudience.Private +public final class HttpServerUtil { + /** + * Add constraints to a Jetty Context to disallow undesirable Http methods. + * @param ctxHandler The context to modify + * @param allowOptionsMethod if true then OPTIONS method will not be set in constraint mapping + */ + public static void constrainHttpMethods(ServletContextHandler ctxHandler, + boolean allowOptionsMethod) { + Constraint c = new Constraint(); + c.setAuthenticate(true); + + ConstraintMapping cmt = new ConstraintMapping(); + cmt.setConstraint(c); + cmt.setMethod("TRACE"); + cmt.setPathSpec("/*"); + + ConstraintSecurityHandler securityHandler = new ConstraintSecurityHandler(); + + if (!allowOptionsMethod) { + ConstraintMapping cmo = new ConstraintMapping(); + cmo.setConstraint(c); + cmo.setMethod("OPTIONS"); + cmo.setPathSpec("/*"); + securityHandler.setConstraintMappings(new ConstraintMapping[] { cmt, cmo }); + } else { + securityHandler.setConstraintMappings(new ConstraintMapping[] { cmt }); + } + + ctxHandler.setSecurityHandler(securityHandler); + } + + private HttpServerUtil() {} +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/InfoServer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/InfoServer.java new file mode 100755 index 00000000..33079740 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/InfoServer.java @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.security.authorize.AccessControlList; +import org.apache.yetus.audience.InterfaceAudience; + +import javax.servlet.ServletContext; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import java.io.IOException; +import java.net.URI; + +/** + * Create a Jetty embedded server to answer http requests. The primary goal + * is to serve up status information for the server. + * There are three contexts: + * "/stacks/" -> points to stack trace + * "/static/" -> points to common static files (src/hbase-webapps/static) + * "/" -> the jsp server code from (src/hbase-webapps/<name>) + */ +@InterfaceAudience.Private +public class InfoServer { + private static final String HBASE_APP_DIR = "hbase-webapps"; + private final HttpServer httpServer; + + /** + * Create a status server on the given port. + * The jsp scripts are taken from src/hbase-webapps/name. + * @param name The name of the server + * @param bindAddress address to bind to + * @param port The port to use on the server + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. + * @param c the {@link Configuration} to build the server + * @throws IOException if getting one of the password fails or the server cannot be created + */ + public InfoServer(String name, String bindAddress, int port, boolean findPort, + final Configuration c) throws IOException { + HttpConfig httpConfig = new HttpConfig(c); + HttpServer.Builder builder = + new HttpServer.Builder(); + + builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() + + bindAddress + ":" + + port)).setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); + String logDir = System.getProperty("hbase.log.dir"); + if (logDir != null) { + builder.setLogDir(logDir); + } + if (httpConfig.isSecure()) { + builder.keyPassword(HBaseConfiguration + .getPassword(c, "ssl.server.keystore.keypassword", null)) + .keyStore(c.get("ssl.server.keystore.location"), + HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null), + c.get("ssl.server.keystore.type", "jks")) + .trustStore(c.get("ssl.server.truststore.location"), + HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), + c.get("ssl.server.truststore.type", "jks")); + } + // Enable SPNEGO authentication + if ("kerberos".equalsIgnoreCase(c.get(HttpServer.HTTP_UI_AUTHENTICATION, null))) { + builder.setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) + .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY) + .setSignatureSecretFileKey( + HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) + .setSecurityEnabled(true); + + // Set an admin ACL on sensitive webUI endpoints + AccessControlList acl = buildAdminAcl(c); + builder.setACL(acl); + } + this.httpServer = builder.build(); + } + + /** + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI + * which are meant only for administrators. + */ + AccessControlList buildAdminAcl(Configuration conf) { + final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); + final String adminGroups = conf.get( + HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + if (userGroups == null && adminGroups == null) { + // Backwards compatibility - if the user doesn't have anything set, allow all users in. + return new AccessControlList("*", null); + } + return new AccessControlList(userGroups, adminGroups); + } + + /** + * Explicitly invoke {@link #addPrivilegedServlet(String, String, Class)} or + * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. + * This method will add a servlet which any authenticated user can access. + * + * @deprecated Use {@link #addUnprivilegedServlet(String, String, Class)} or + * {@link #addPrivilegedServlet(String, String, Class)} instead of this + * method which does not state outwardly what kind of authz rules will + * be applied to this servlet. + */ + @Deprecated + public void addServlet(String name, String pathSpec, + Class clazz) { + addUnprivilegedServlet(name, pathSpec, clazz); + } + + /** + * @see HttpServer#addUnprivilegedServlet(String, String, Class) + */ + public void addUnprivilegedServlet(String name, String pathSpec, + Class clazz) { + this.httpServer.addUnprivilegedServlet(name, pathSpec, clazz); + } + + /** + * @see HttpServer#addPrivilegedServlet(String, String, Class) + */ + public void addPrivilegedServlet(String name, String pathSpec, + Class clazz) { + this.httpServer.addPrivilegedServlet(name, pathSpec, clazz); + } + + public void setAttribute(String name, Object value) { + this.httpServer.setAttribute(name, value); + } + + public void start() throws IOException { + this.httpServer.start(); + } + + /** + * @return the port of the info server + * @deprecated Since 0.99.0 + */ + @Deprecated + public int getPort() { + return this.httpServer.getPort(); + } + + public void stop() throws Exception { + this.httpServer.stop(); + } + + + /** + * Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled, + * and the requesting user is defined as an administrator. If the UI is set to readonly, this + * method always returns false. + */ + public static boolean canUserModifyUI( + HttpServletRequest req, ServletContext ctx, Configuration conf) { + if (conf.getBoolean("hbase.master.ui.readonly", false)) { + return false; + } + String remoteUser = req.getRemoteUser(); + if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && + conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && + remoteUser != null) { + return HttpServer.userHasAdministratorAccess(ctx, remoteUser); + } + return false; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileOutputServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileOutputServlet.java new file mode 100755 index 00000000..c867b727 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileOutputServlet.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.yetus.audience.InterfaceAudience; +import org.eclipse.jetty.servlet.DefaultServlet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.File; +import java.io.IOException; +import java.util.regex.Pattern; + +/** + * Servlet to serve files generated by {@link ProfileServlet} + */ +@InterfaceAudience.Private +public class ProfileOutputServlet extends DefaultServlet { + private static final long serialVersionUID = 1L; + private static final Logger LOG = LoggerFactory.getLogger(ProfileOutputServlet.class); + private static final int REFRESH_PERIOD = 2; + // Alphanumeric characters, plus percent (url-encoding), equals, ampersand, dot and hyphen + private static final Pattern ALPHA_NUMERIC = Pattern.compile("[a-zA-Z0-9%=&.\\-]*"); + + @Override + protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) + throws ServletException, IOException { + String absoluteDiskPath = getServletContext().getRealPath(req.getPathInfo()); + File requestedFile = new File(absoluteDiskPath); + // async-profiler version 1.4 writes 'Started [cpu] profiling' to output file when profiler is + // running which gets replaced by final output. If final output is not ready yet, the file size + // will be <100 bytes (in all modes). + if (requestedFile.length() < 100) { + LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); + String refreshUrl = req.getRequestURI(); + // Rebuild the query string (if we have one) + if (req.getQueryString() != null) { + refreshUrl += "?" + sanitize(req.getQueryString()); + } + ProfileServlet.setResponseHeader(resp); + resp.setHeader("Refresh", REFRESH_PERIOD + ";" + refreshUrl); + resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + + " seconds until the output file is ready. Redirecting to " + refreshUrl); + } else { + super.doGet(req, resp); + } + } + + static String sanitize(String input) { + // Basic test to try to avoid any XSS attacks or HTML content showing up. + // Duplicates HtmlQuoting a little, but avoid destroying ampersand. + if (ALPHA_NUMERIC.matcher(input).matches()) { + return input; + } + throw new RuntimeException("Non-alphanumeric data found in input, aborting."); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileServlet.java new file mode 100755 index 00000000..1cb3d68a --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProfileServlet.java @@ -0,0 +1,398 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.hbase.rest.util.ProcessUtils; +import org.apache.hbase.thirdparty.com.google.common.base.Joiner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +/** + * Servlet that runs async-profiler as web-endpoint. + * Following options from async-profiler can be specified as query paramater. + * // -e event profiling event: cpu|alloc|lock|cache-misses etc. + * // -d duration run profiling for 'duration' seconds (integer) + * // -i interval sampling interval in nanoseconds (long) + * // -j jstackdepth maximum Java stack depth (integer) + * // -b bufsize frame buffer size (long) + * // -t profile different threads separately + * // -s simple class names instead of FQN + * // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr + * // --width px SVG width pixels (integer) + * // --height px SVG frame height pixels (integer) + * // --minwidth px skip frames smaller than px (double) + * // --reverse generate stack-reversed FlameGraph / Call tree + * Example: + * - To collect 30 second CPU profile of current process (returns FlameGraph svg) + * curl "http://localhost:10002/prof" + * - To collect 1 minute CPU profile of current process and output in tree format (html) + * curl "http://localhost:10002/prof?output=tree&duration=60" + * - To collect 30 second heap allocation profile of current process (returns FlameGraph svg) + * curl "http://localhost:10002/prof?event=alloc" + * - To collect lock contention profile of current process (returns FlameGraph svg) + * curl "http://localhost:10002/prof?event=lock" + * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events) + * // Perf events: + * // cpu + * // page-faults + * // context-switches + * // cycles + * // instructions + * // cache-references + * // cache-misses + * // branches + * // branch-misses + * // bus-cycles + * // L1-dcache-load-misses + * // LLC-load-misses + * // dTLB-load-misses + * // mem:breakpoint + * // trace:tracepoint + * // Java events: + * // alloc + * // lock + */ +@InterfaceAudience.Private +public class ProfileServlet extends HttpServlet { + + private static final long serialVersionUID = 1L; + private static final Logger LOG = LoggerFactory.getLogger(ProfileServlet.class); + + private static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods"; + private static final String ALLOWED_METHODS = "GET"; + private static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"; + private static final String CONTENT_TYPE_TEXT = "text/plain; charset=utf-8"; + private static final String ASYNC_PROFILER_HOME_ENV = "ASYNC_PROFILER_HOME"; + private static final String ASYNC_PROFILER_HOME_SYSTEM_PROPERTY = "async.profiler.home"; + private static final String PROFILER_SCRIPT = "/profiler.sh"; + private static final int DEFAULT_DURATION_SECONDS = 10; + private static final AtomicInteger ID_GEN = new AtomicInteger(0); + static final String OUTPUT_DIR = System.getProperty("java.io.tmpdir") + "/prof-output"; + + enum Event { + CPU("cpu"), + ALLOC("alloc"), + LOCK("lock"), + PAGE_FAULTS("page-faults"), + CONTEXT_SWITCHES("context-switches"), + CYCLES("cycles"), + INSTRUCTIONS("instructions"), + CACHE_REFERENCES("cache-references"), + CACHE_MISSES("cache-misses"), + BRANCHES("branches"), + BRANCH_MISSES("branch-misses"), + BUS_CYCLES("bus-cycles"), + L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"), + LLC_LOAD_MISSES("LLC-load-misses"), + DTLB_LOAD_MISSES("dTLB-load-misses"), + MEM_BREAKPOINT("mem:breakpoint"), + TRACE_TRACEPOINT("trace:tracepoint"),; + + private final String internalName; + + Event(final String internalName) { + this.internalName = internalName; + } + + public String getInternalName() { + return internalName; + } + + public static Event fromInternalName(final String name) { + for (Event event : values()) { + if (event.getInternalName().equalsIgnoreCase(name)) { + return event; + } + } + + return null; + } + } + + enum Output { + SUMMARY, + TRACES, + FLAT, + COLLAPSED, + SVG, + TREE, + JFR + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED", + justification = "This class is never serialized nor restored.") + private transient Lock profilerLock = new ReentrantLock(); + private transient volatile Process process; + private String asyncProfilerHome; + private Integer pid; + + public ProfileServlet() { + this.asyncProfilerHome = getAsyncProfilerHome(); + this.pid = ProcessUtils.getPid(); + LOG.info("Servlet process PID: " + pid + " asyncProfilerHome: " + asyncProfilerHome); + } + + @Override + protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) + throws IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), req, resp)) { + resp.setStatus(HttpServletResponse.SC_UNAUTHORIZED); + setResponseHeader(resp); + resp.getWriter().write("Unauthorized: Instrumentation access is not allowed!"); + return; + } + + // make sure async profiler home is set + if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + setResponseHeader(resp); + resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); + return; + } + + // if pid is explicitly specified, use it else default to current process + pid = getInteger(req, "pid", pid); + + // if pid is not specified in query param and if current process pid cannot be determined + if (pid == null) { + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + setResponseHeader(resp); + resp.getWriter().write( + "'pid' query parameter unspecified or unable to determine PID of current process."); + return; + } + + final int duration = getInteger(req, "duration", DEFAULT_DURATION_SECONDS); + final Output output = getOutput(req); + final Event event = getEvent(req); + final Long interval = getLong(req, "interval"); + final Integer jstackDepth = getInteger(req, "jstackdepth", null); + final Long bufsize = getLong(req, "bufsize"); + final boolean thread = req.getParameterMap().containsKey("thread"); + final boolean simple = req.getParameterMap().containsKey("simple"); + final Integer width = getInteger(req, "width", null); + final Integer height = getInteger(req, "height", null); + final Double minwidth = getMinWidth(req); + final boolean reverse = req.getParameterMap().containsKey("reverse"); + + if (process == null || !process.isAlive()) { + try { + int lockTimeoutSecs = 3; + if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) { + try { + File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + + event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." + + output.name().toLowerCase()); + List cmd = new ArrayList<>(); + cmd.add(asyncProfilerHome + PROFILER_SCRIPT); + cmd.add("-e"); + cmd.add(event.getInternalName()); + cmd.add("-d"); + cmd.add("" + duration); + cmd.add("-o"); + cmd.add(output.name().toLowerCase()); + cmd.add("-f"); + cmd.add(outputFile.getAbsolutePath()); + if (interval != null) { + cmd.add("-i"); + cmd.add(interval.toString()); + } + if (jstackDepth != null) { + cmd.add("-j"); + cmd.add(jstackDepth.toString()); + } + if (bufsize != null) { + cmd.add("-b"); + cmd.add(bufsize.toString()); + } + if (thread) { + cmd.add("-t"); + } + if (simple) { + cmd.add("-s"); + } + if (width != null) { + cmd.add("--width"); + cmd.add(width.toString()); + } + if (height != null) { + cmd.add("--height"); + cmd.add(height.toString()); + } + if (minwidth != null) { + cmd.add("--minwidth"); + cmd.add(minwidth.toString()); + } + if (reverse) { + cmd.add("--reverse"); + } + cmd.add(pid.toString()); + process = ProcessUtils.runCmdAsync(cmd); + + // set response and set refresh header to output location + setResponseHeader(resp); + resp.setStatus(HttpServletResponse.SC_ACCEPTED); + String relativeUrl = "/prof-output/" + outputFile.getName(); + resp.getWriter().write( + "Started [" + event.getInternalName() + + "] profiling. This page will automatically redirect to " + + relativeUrl + " after " + duration + " seconds. " + + "If empty diagram and Linux 4.6+, see 'Basic Usage' section on the Async " + + "Profiler Home Page, https://github.com/jvm-profiling-tools/async-profiler." + + "\n\nCommand:\n" + + Joiner.on(" ").join(cmd)); + + // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified + // via url param + int refreshDelay = getInteger(req, "refreshDelay", 0); + + // instead of sending redirect, set auto-refresh so that browsers will refresh + // with redirected url + resp.setHeader("Refresh", (duration + refreshDelay) + ";" + relativeUrl); + resp.getWriter().flush(); + } finally { + profilerLock.unlock(); + } + } else { + setResponseHeader(resp); + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + resp.getWriter().write( + "Unable to acquire lock. Another instance of profiler might be running."); + LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + + " seconds. Another instance of profiler might be running."); + } + } catch (InterruptedException e) { + LOG.warn("Interrupted while acquiring profile lock.", e); + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + } + } else { + setResponseHeader(resp); + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + resp.getWriter().write("Another instance of profiler is already running."); + } + } + + private Integer getInteger(final HttpServletRequest req, final String param, + final Integer defaultValue) { + final String value = req.getParameter(param); + if (value != null) { + try { + return Integer.valueOf(value); + } catch (NumberFormatException e) { + return defaultValue; + } + } + return defaultValue; + } + + private Long getLong(final HttpServletRequest req, final String param) { + final String value = req.getParameter(param); + if (value != null) { + try { + return Long.valueOf(value); + } catch (NumberFormatException e) { + return null; + } + } + return null; + } + + private Double getMinWidth(final HttpServletRequest req) { + final String value = req.getParameter("minwidth"); + if (value != null) { + try { + return Double.valueOf(value); + } catch (NumberFormatException e) { + return null; + } + } + return null; + } + + private Event getEvent(final HttpServletRequest req) { + final String eventArg = req.getParameter("event"); + if (eventArg != null) { + Event event = Event.fromInternalName(eventArg); + return event == null ? Event.CPU : event; + } + return Event.CPU; + } + + private Output getOutput(final HttpServletRequest req) { + final String outputArg = req.getParameter("output"); + if (req.getParameter("output") != null) { + try { + return Output.valueOf(outputArg.trim().toUpperCase()); + } catch (IllegalArgumentException e) { + return Output.SVG; + } + } + return Output.SVG; + } + + static void setResponseHeader(final HttpServletResponse response) { + response.setHeader(ACCESS_CONTROL_ALLOW_METHODS, ALLOWED_METHODS); + response.setHeader(ACCESS_CONTROL_ALLOW_ORIGIN, "*"); + response.setContentType(CONTENT_TYPE_TEXT); + } + + static String getAsyncProfilerHome() { + String asyncProfilerHome = System.getenv(ASYNC_PROFILER_HOME_ENV); + // if ENV is not set, see if -Dasync.profiler.home=/path/to/async/profiler/home is set + if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { + asyncProfilerHome = System.getProperty(ASYNC_PROFILER_HOME_SYSTEM_PROPERTY); + } + + return asyncProfilerHome; + } + + public static class DisabledServlet extends HttpServlet { + + private static final long serialVersionUID = 1L; + + @Override + protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) + throws IOException { + resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + setResponseHeader(resp); + resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" + + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); + return; + } + + } + +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProxyUserAuthenticationFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProxyUserAuthenticationFilter.java new file mode 100755 index 00000000..55e774a2 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ProxyUserAuthenticationFilter.java @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequestWrapper; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.security.Principal; +import java.util.ArrayList; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * This file has been copied directly (changing only the package name and and the ASF license + * text format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase + * depends on doesn't have it yet + * (as of 2020 Apr 24, there is no Hadoop release that has it either). + * + * Hadoop version: + * unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 + * + * Haddop path: + * hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/ + * server/ProxyUserAuthenticationFilter.java + * + * AuthenticationFilter which adds support to perform operations + * using end user instead of proxy user. Fetches the end user from + * doAs Query Parameter. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProxyUserAuthenticationFilter extends AuthenticationFilter { + + private static final Logger LOG = LoggerFactory.getLogger( + ProxyUserAuthenticationFilter.class); + + private static final String DO_AS = "doas"; + public static final String PROXYUSER_PREFIX = "proxyuser"; + + @Override + public void init(FilterConfig filterConfig) throws ServletException { + Configuration conf = getProxyuserConfiguration(filterConfig); + ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX); + super.init(filterConfig); + } + + @Override + protected void doFilter(FilterChain filterChain, HttpServletRequest request, + HttpServletResponse response) throws IOException, ServletException { + final HttpServletRequest lowerCaseRequest = toLowerCase(request); + String doAsUser = lowerCaseRequest.getParameter(DO_AS); + + if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) { + LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", + doAsUser, request.getRemoteUser(), request.getRemoteAddr()); + UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ? + UserGroupInformation.createRemoteUser(request.getRemoteUser()) + : null; + if (requestUgi != null) { + requestUgi = UserGroupInformation.createProxyUser(doAsUser, + requestUgi); + try { + ProxyUsers.authorize(requestUgi, request.getRemoteAddr()); + + final UserGroupInformation ugiF = requestUgi; + request = new HttpServletRequestWrapper(request) { + @Override + public String getRemoteUser() { + return ugiF.getShortUserName(); + } + + @Override + public Principal getUserPrincipal() { + return new Principal() { + @Override + public String getName() { + return ugiF.getUserName(); + } + }; + } + }; + LOG.debug("Proxy user Authentication successful"); + } catch (AuthorizationException ex) { + HttpExceptionUtils.createServletExceptionResponse(response, + HttpServletResponse.SC_FORBIDDEN, ex); + LOG.warn("Proxy user Authentication exception", ex); + return; + } + } + } + super.doFilter(filterChain, request, response); + } + + protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) + throws ServletException { + Configuration conf = new Configuration(false); + Enumeration names = filterConfig.getInitParameterNames(); + while (names.hasMoreElements()) { + String name = (String) names.nextElement(); + if (name.startsWith(PROXYUSER_PREFIX + ".")) { + String value = filterConfig.getInitParameter(name); + conf.set(name, value); + } + } + return conf; + } + + static boolean containsUpperCase(final Iterable strings) { + for(String s : strings) { + for(int i = 0; i < s.length(); i++) { + if (Character.isUpperCase(s.charAt(i))) { + return true; + } + } + } + return false; + } + + public static HttpServletRequest toLowerCase( + final HttpServletRequest request) { + @SuppressWarnings("unchecked") + final Map original = (Map) + request.getParameterMap(); + if (!containsUpperCase(original.keySet())) { + return request; + } + + final Map> m = new HashMap>(); + for (Map.Entry entry : original.entrySet()) { + final String key = StringUtils.toLowerCase(entry.getKey()); + List strings = m.get(key); + if (strings == null) { + strings = new ArrayList(); + m.put(key, strings); + } + for (String v : entry.getValue()) { + strings.add(v); + } + } + + return new HttpServletRequestWrapper(request) { + private Map parameters = null; + + @Override + public Map getParameterMap() { + if (parameters == null) { + parameters = new HashMap(); + for (Map.Entry> entry : m.entrySet()) { + final List a = entry.getValue(); + parameters.put(entry.getKey(), a.toArray(new String[a.size()])); + } + } + return parameters; + } + + @Override + public String getParameter(String name) { + final List a = m.get(name); + return a == null ? null : a.get(0); + } + + @Override + public String[] getParameterValues(String name) { + return getParameterMap().get(name); + } + + @Override + public Enumeration getParameterNames() { + final Iterator i = m.keySet().iterator(); + return new Enumeration() { + @Override + public boolean hasMoreElements() { + return i.hasNext(); + } + + @Override + public String nextElement() { + return i.next(); + } + }; + } + }; + } + +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/SecurityHeadersFilter.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/SecurityHeadersFilter.java new file mode 100755 index 00000000..b2cddc0c --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/SecurityHeadersFilter.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.http; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class SecurityHeadersFilter implements Filter { + private static final Logger LOG = + LoggerFactory.getLogger(SecurityHeadersFilter.class); + private static final String DEFAULT_HSTS = ""; + private static final String DEFAULT_CSP = ""; + private FilterConfig filterConfig; + + @Override + public void init(FilterConfig filterConfig) throws ServletException { + this.filterConfig = filterConfig; + LOG.info("Added security headers filter"); + } + + @Override + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + HttpServletResponse httpResponse = (HttpServletResponse) response; + httpResponse.addHeader("X-Content-Type-Options", "nosniff"); + httpResponse.addHeader("X-XSS-Protection", "1; mode=block"); + String hsts = filterConfig.getInitParameter("hsts"); + if (StringUtils.isNotBlank(hsts)) { + httpResponse.addHeader("Strict-Transport-Security", hsts); + } + String csp = filterConfig.getInitParameter("csp"); + if (StringUtils.isNotBlank(csp)) { + httpResponse.addHeader("Content-Security-Policy", csp); + } + chain.doFilter(request, response); + } + + @Override + public void destroy() { + } + + public static Map getDefaultParameters(Configuration conf) { + Map params = new HashMap<>(); + params.put("hsts", conf.get("hbase.http.filter.hsts.value", + DEFAULT_HSTS)); + params.put("csp", conf.get("hbase.http.filter.csp.value", + DEFAULT_CSP)); + return params; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ServerConfigurationKeys.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ServerConfigurationKeys.java new file mode 100755 index 00000000..8b8f9aeb --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/ServerConfigurationKeys.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +/** + * This interface contains constants for configuration keys used + * in the hbase http server code. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface ServerConfigurationKeys { + + /** Enable/Disable ssl for http server */ + public static final String HBASE_SSL_ENABLED_KEY = "hbase.ssl.enabled"; + + public static final boolean HBASE_SSL_ENABLED_DEFAULT = false; + + /** Enable/Disable aliases serving from jetty */ + public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = + "hbase.jetty.logs.serve.aliases"; + + public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = + true; + + public static final String HBASE_HTTP_STATIC_USER = "hbase.http.staticuser.user"; + + public static final String DEFAULT_HBASE_HTTP_STATIC_USER = "dr.stack"; + +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/conf/ConfServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/conf/ConfServlet.java new file mode 100755 index 00000000..f3c69f3b --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/conf/ConfServlet.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http.conf; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.rest.http.HttpServer; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.Writer; + +/** + * A servlet to print out the running configuration data. + */ +@InterfaceAudience.LimitedPrivate({"HBase"}) +@InterfaceStability.Unstable +public class ConfServlet extends HttpServlet { + private static final long serialVersionUID = 1L; + + private static final String FORMAT_JSON = "json"; + private static final String FORMAT_XML = "xml"; + private static final String FORMAT_PARAM = "format"; + + /** + * Return the Configuration of the daemon hosting this servlet. + * This is populated when the HttpServer starts. + */ + private Configuration getConfFromContext() { + Configuration conf = (Configuration)getServletContext().getAttribute( + HttpServer.CONF_CONTEXT_ATTRIBUTE); + assert conf != null; + return conf; + } + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), + request, response)) { + return; + } + + String format = request.getParameter(FORMAT_PARAM); + if (null == format) { + format = FORMAT_XML; + } + + if (FORMAT_XML.equals(format)) { + response.setContentType("text/xml; charset=utf-8"); + } else if (FORMAT_JSON.equals(format)) { + response.setContentType("application/json; charset=utf-8"); + } + + Writer out = response.getWriter(); + try { + writeResponse(getConfFromContext(), out, format); + } catch (BadFormatException bfe) { + response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage()); + } + out.close(); + } + + /** + * Guts of the servlet - extracted for easy testing. + */ + static void writeResponse(Configuration conf, Writer out, String format) + throws IOException, BadFormatException { + if (FORMAT_JSON.equals(format)) { + Configuration.dumpConfiguration(conf, out); + } else if (FORMAT_XML.equals(format)) { + conf.writeXml(out); + } else { + throw new BadFormatException("Bad format: " + format); + } + } + + public static class BadFormatException extends Exception { + private static final long serialVersionUID = 1L; + + public BadFormatException(String msg) { + super(msg); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/jmx/JMXJsonServlet.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/jmx/JMXJsonServlet.java new file mode 100755 index 00000000..4b93ab5e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/jmx/JMXJsonServlet.java @@ -0,0 +1,245 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.http.jmx; + +import org.apache.hadoop.hbase.rest.http.HttpServer; +import org.apache.hadoop.hbase.rest.util.JSONBean; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.TabularData; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.PrintWriter; +import java.lang.management.ManagementFactory; + +/* + * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has + * been rewritten to be read only and to output in a JSON format so it is not + * really that close to the original. + */ + +/** + * Provides Read only web access to JMX. + *

+ * This servlet generally will be placed under the /jmx URL for each + * HttpServer. It provides read only + * access to JMX metrics. The optional qry parameter + * may be used to query only a subset of the JMX Beans. This query + * functionality is provided through the + * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} + * method. + *

+ *

+ * For example http://.../jmx?qry=Hadoop:* will return + * all hadoop metrics exposed through JMX. + *

+ *

+ * The optional get parameter is used to query an specific + * attribute of a JMX bean. The format of the URL is + * http://.../jmx?get=MXBeanName::AttributeName + *

+ *

+ * For example + * + * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId + * will return the cluster id of the namenode mxbean. + *

+ *

+ * If the qry or the get parameter is not formatted + * correctly then a 400 BAD REQUEST http response code will be returned. + *

+ *

+ * If a resouce such as a mbean or attribute can not be found, + * a 404 SC_NOT_FOUND http response code will be returned. + *

+ *

+ * The return format is JSON and in the form + *

+ *

+ *  {
+ *    "beans" : [
+ *      {
+ *        "name":"bean-name"
+ *        ...
+ *      }
+ *    ]
+ *  }
+ *  
+ *

+ * The servlet attempts to convert the the JMXBeans into JSON. Each + * bean's attributes will be converted to a JSON object member. + * + * If the attribute is a boolean, a number, a string, or an array + * it will be converted to the JSON equivalent. + * + * If the value is a {@link CompositeData} then it will be converted + * to a JSON object with the keys as the name of the JSON member and + * the value is converted following these same rules. + * + * If the value is a {@link TabularData} then it will be converted + * to an array of the {@link CompositeData} elements that it contains. + * + * All other objects will be converted to a string and output as such. + * + * The bean's name and modelerType will be returned for all beans. + * + * Optional paramater "callback" should be used to deliver JSONP response. + *

+ * + */ +@InterfaceAudience.Private +public class JMXJsonServlet extends HttpServlet { + private static final Logger LOG = LoggerFactory.getLogger( + JMXJsonServlet.class); + + private static final long serialVersionUID = 1L; + + private static final String CALLBACK_PARAM = "callback"; + /** + * If query string includes 'description', then we will emit bean and attribute descriptions to + * output IFF they are not null and IFF the description is not the same as the attribute name: + * i.e. specify a URL like so: /jmx?description=true + */ + private static final String INCLUDE_DESCRIPTION = "description"; + + /** + * MBean server. + */ + protected transient MBeanServer mBeanServer; + + protected transient JSONBean jsonBeanWriter; + + /** + * Initialize this servlet. + */ + @Override + public void init() throws ServletException { + // Retrieve the MBean server + mBeanServer = ManagementFactory.getPlatformMBeanServer(); + this.jsonBeanWriter = new JSONBean(); + } + + /** + * Process a GET request for the specified resource. + * + * @param request + * The servlet request we are processing + * @param response + * The servlet response we are creating + */ + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { + try { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { + return; + } + String jsonpcb = null; + PrintWriter writer = null; + JSONBean.Writer beanWriter = null; + try { + jsonpcb = checkCallbackName(request.getParameter(CALLBACK_PARAM)); + writer = response.getWriter(); + + // "callback" parameter implies JSONP outpout + if (jsonpcb != null) { + response.setContentType("application/javascript; charset=utf8"); + writer.write(jsonpcb + "("); + } else { + response.setContentType("application/json; charset=utf8"); + } + beanWriter = this.jsonBeanWriter.open(writer); + // Should we output description on each attribute and bean? + String tmpStr = request.getParameter(INCLUDE_DESCRIPTION); + boolean description = tmpStr != null && tmpStr.length() > 0; + + // query per mbean attribute + String getmethod = request.getParameter("get"); + if (getmethod != null) { + String[] splitStrings = getmethod.split("\\:\\:"); + if (splitStrings.length != 2) { + beanWriter.write("result", "ERROR"); + beanWriter.write("message", "query format is not as expected."); + beanWriter.flush(); + response.setStatus(HttpServletResponse.SC_BAD_REQUEST); + return; + } + if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), + splitStrings[1], description) != 0) { + beanWriter.flush(); + response.setStatus(HttpServletResponse.SC_BAD_REQUEST); + } + return; + } + + // query per mbean + String qry = request.getParameter("qry"); + if (qry == null) { + qry = "*:*"; + } + if (beanWriter.write(this.mBeanServer, new ObjectName(qry), null, description) != 0) { + beanWriter.flush(); + response.setStatus(HttpServletResponse.SC_BAD_REQUEST); + } + } finally { + if (beanWriter != null) { + beanWriter.close(); + } + if (jsonpcb != null) { + writer.write(");"); + } + if (writer != null) { + writer.close(); + } + } + } catch (IOException e) { + LOG.error("Caught an exception while processing JMX request", e); + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); + } catch (MalformedObjectNameException e) { + LOG.error("Caught an exception while processing JMX request", e); + response.sendError(HttpServletResponse.SC_BAD_REQUEST); + } + } + + /** + * Verifies that the callback property, if provided, is purely alphanumeric. + * This prevents a malicious callback name (that is javascript code) from being + * returned by the UI to an unsuspecting user. + * + * @param callbackName The callback name, can be null. + * @return The callback name + * @throws IOException If the name is disallowed. + */ + private String checkCallbackName(String callbackName) throws IOException { + if (null == callbackName) { + return null; + } + if (callbackName.matches("[A-Za-z0-9_]+")) { + return callbackName; + } + throw new IOException("'callback' must be alphanumeric"); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/Log4jUtils.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/Log4jUtils.java new file mode 100755 index 00000000..368167e1 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/Log4jUtils.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http.log; + +import org.apache.yetus.audience.InterfaceAudience; + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.Set; + +/** + * A bridge class for operating on log4j, such as changing log level, etc. + *

+ * Will call the methods in {@link InternalLog4jUtils} to actually operate on the log4j stuff. + */ +@InterfaceAudience.Private +public final class Log4jUtils { + + private static final String INTERNAL_UTILS_CLASS_NAME = + "org.apache.hadoop.hbase.logging.InternalLog4jUtils"; + + private Log4jUtils() { + } + + // load class when calling to avoid introducing class not found exception on log4j when loading + // this class even without calling any of the methods below. + private static Method getMethod(String methodName, Class... args) { + try { + Class clazz = Class.forName(INTERNAL_UTILS_CLASS_NAME); + return clazz.getDeclaredMethod(methodName, args); + } catch (ClassNotFoundException | NoSuchMethodException e) { + throw new AssertionError("should not happen", e); + } + } + + private static void throwUnchecked(Throwable throwable) { + if (throwable instanceof RuntimeException) { + throw (RuntimeException) throwable; + } + if (throwable instanceof Error) { + throw (Error) throwable; + } + } + + public static void setLogLevel(String loggerName, String levelName) { + Method method = getMethod("setLogLevel", String.class, String.class); + try { + method.invoke(null, loggerName, levelName); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("should not happen", e.getCause()); + } + } + + public static String getEffectiveLevel(String loggerName) { + Method method = getMethod("getEffectiveLevel", String.class); + try { + return (String) method.invoke(null, loggerName); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + throwUnchecked(e.getCause()); + throw new AssertionError("should not happen", e.getCause()); + } + } + + @SuppressWarnings("unchecked") + public static Set getActiveLogFiles() throws IOException { + Method method = getMethod("getActiveLogFiles"); + try { + return (Set) method.invoke(null); + } catch (IllegalAccessException e) { + throw new AssertionError("should not happen", e); + } catch (InvocationTargetException e) { + Throwable cause = e.getCause(); + throwUnchecked(cause); + if (cause instanceof IOException) { + throw (IOException) cause; + } + throw new AssertionError("should not happen", cause); + } + } + + /** + * Disables Zk- and HBase client logging + */ + public static void disableZkAndClientLoggers() { + // disable zookeeper log to avoid it mess up command output + setLogLevel("org.apache.zookeeper", "OFF"); + // disable hbase zookeeper tool log to avoid it mess up command output + setLogLevel("org.apache.hadoop.hbase.zookeeper", "OFF"); + // disable hbase client log to avoid it mess up command output + setLogLevel("org.apache.hadoop.hbase.client", "OFF"); + } + + /** + * Switches the logger for the given class to DEBUG level. + * @param clazz The class for which to switch to debug logging. + */ + public static void enableDebug(Class clazz) { + setLogLevel(clazz.getName(), "DEBUG"); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/LogLevel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/LogLevel.java new file mode 100755 index 00000000..26a0c72d --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/http/log/LogLevel.java @@ -0,0 +1,401 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.http.log; + +import org.apache.hadoop.HadoopIllegalArgumentException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.rest.http.HttpServer; +import org.apache.hadoop.security.authentication.client.AuthenticatedURL; +import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; +import org.apache.hadoop.security.ssl.SSLFactory; +import org.apache.hadoop.util.ServletUtil; +import org.apache.hadoop.util.Tool; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.base.Charsets; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSocketFactory; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.BufferedReader; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.PrintWriter; +import java.net.URL; +import java.net.URLConnection; +import java.util.Objects; +import java.util.regex.Pattern; + +/** + * Change log level in runtime. + */ +@InterfaceAudience.Private +public final class LogLevel { + private static final String USAGES = "\nUsage: General options are:\n" + + "\t[-getlevel [-protocol (http|https)]\n" + + "\t[-setlevel [-protocol (http|https)]"; + + public static final String PROTOCOL_HTTP = "http"; + public static final String PROTOCOL_HTTPS = "https"; + + /** + * A command line implementation + */ + public static void main(String[] args) throws Exception { + CLI cli = new CLI(new Configuration()); + System.exit(cli.run(args)); + } + + /** + * Valid command line options. + */ + private enum Operations { + GETLEVEL, + SETLEVEL, + UNKNOWN + } + + private static void printUsage() { + System.err.println(USAGES); + System.exit(-1); + } + + public static boolean isValidProtocol(String protocol) { + return ((protocol.equals(PROTOCOL_HTTP) || + protocol.equals(PROTOCOL_HTTPS))); + } + + @VisibleForTesting + static class CLI extends Configured implements Tool { + private Operations operation = Operations.UNKNOWN; + private String protocol; + private String hostName; + private String className; + private String level; + + CLI(Configuration conf) { + setConf(conf); + } + + @Override + public int run(String[] args) throws Exception { + try { + parseArguments(args); + sendLogLevelRequest(); + } catch (HadoopIllegalArgumentException e) { + printUsage(); + } + return 0; + } + + /** + * Send HTTP request to the daemon. + * @throws HadoopIllegalArgumentException if arguments are invalid. + * @throws Exception if unable to connect + */ + private void sendLogLevelRequest() + throws HadoopIllegalArgumentException, Exception { + switch (operation) { + case GETLEVEL: + doGetLevel(); + break; + case SETLEVEL: + doSetLevel(); + break; + default: + throw new HadoopIllegalArgumentException( + "Expect either -getlevel or -setlevel"); + } + } + + public void parseArguments(String[] args) throws HadoopIllegalArgumentException { + if (args.length == 0) { + throw new HadoopIllegalArgumentException("No arguments specified"); + } + int nextArgIndex = 0; + while (nextArgIndex < args.length) { + switch (args[nextArgIndex]) { + case "-getlevel": + nextArgIndex = parseGetLevelArgs(args, nextArgIndex); + break; + case "-setlevel": + nextArgIndex = parseSetLevelArgs(args, nextArgIndex); + break; + case "-protocol": + nextArgIndex = parseProtocolArgs(args, nextArgIndex); + break; + default: + throw new HadoopIllegalArgumentException( + "Unexpected argument " + args[nextArgIndex]); + } + } + + // if operation is never specified in the arguments + if (operation == Operations.UNKNOWN) { + throw new HadoopIllegalArgumentException( + "Must specify either -getlevel or -setlevel"); + } + + // if protocol is unspecified, set it as http. + if (protocol == null) { + protocol = PROTOCOL_HTTP; + } + } + + private int parseGetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { + // fail if multiple operations are specified in the arguments + if (operation != Operations.UNKNOWN) { + throw new HadoopIllegalArgumentException("Redundant -getlevel command"); + } + // check number of arguments is sufficient + if (index + 2 >= args.length) { + throw new HadoopIllegalArgumentException("-getlevel needs two parameters"); + } + operation = Operations.GETLEVEL; + hostName = args[index + 1]; + className = args[index + 2]; + return index + 3; + } + + private int parseSetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { + // fail if multiple operations are specified in the arguments + if (operation != Operations.UNKNOWN) { + throw new HadoopIllegalArgumentException("Redundant -setlevel command"); + } + // check number of arguments is sufficient + if (index + 3 >= args.length) { + throw new HadoopIllegalArgumentException("-setlevel needs three parameters"); + } + operation = Operations.SETLEVEL; + hostName = args[index + 1]; + className = args[index + 2]; + level = args[index + 3]; + return index + 4; + } + + private int parseProtocolArgs(String[] args, int index) throws HadoopIllegalArgumentException { + // make sure only -protocol is specified + if (protocol != null) { + throw new HadoopIllegalArgumentException( + "Redundant -protocol command"); + } + // check number of arguments is sufficient + if (index + 1 >= args.length) { + throw new HadoopIllegalArgumentException( + "-protocol needs one parameter"); + } + // check protocol is valid + protocol = args[index + 1]; + if (!isValidProtocol(protocol)) { + throw new HadoopIllegalArgumentException( + "Invalid protocol: " + protocol); + } + return index + 2; + } + + /** + * Send HTTP request to get log level. + * + * @throws HadoopIllegalArgumentException if arguments are invalid. + * @throws Exception if unable to connect + */ + private void doGetLevel() throws Exception { + process(protocol + "://" + hostName + "/logLevel?log=" + className); + } + + /** + * Send HTTP request to set log level. + * + * @throws HadoopIllegalArgumentException if arguments are invalid. + * @throws Exception if unable to connect + */ + private void doSetLevel() throws Exception { + process(protocol + "://" + hostName + "/logLevel?log=" + className + + "&level=" + level); + } + + /** + * Connect to the URL. Supports HTTP and supports SPNEGO + * authentication. It falls back to simple authentication if it fails to + * initiate SPNEGO. + * + * @param url the URL address of the daemon servlet + * @return a connected connection + * @throws Exception if it can not establish a connection. + */ + private URLConnection connect(URL url) throws Exception { + AuthenticatedURL.Token token = new AuthenticatedURL.Token(); + AuthenticatedURL aUrl; + SSLFactory clientSslFactory; + URLConnection connection; + // If https is chosen, configures SSL client. + if (PROTOCOL_HTTPS.equals(url.getProtocol())) { + clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, this.getConf()); + clientSslFactory.init(); + SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory(); + + aUrl = new AuthenticatedURL(new KerberosAuthenticator(), clientSslFactory); + connection = aUrl.openConnection(url, token); + HttpsURLConnection httpsConn = (HttpsURLConnection) connection; + httpsConn.setSSLSocketFactory(sslSocketF); + } else { + aUrl = new AuthenticatedURL(new KerberosAuthenticator()); + connection = aUrl.openConnection(url, token); + } + connection.connect(); + return connection; + } + + /** + * Configures the client to send HTTP request to the URL. + * Supports SPENGO for authentication. + * @param urlString URL and query string to the daemon's web UI + * @throws Exception if unable to connect + */ + private void process(String urlString) throws Exception { + URL url = new URL(urlString); + System.out.println("Connecting to " + url); + + URLConnection connection = connect(url); + + // read from the servlet + + try (InputStreamReader streamReader = + new InputStreamReader(connection.getInputStream(), Charsets.UTF_8); + BufferedReader bufferedReader = new BufferedReader(streamReader)) { + bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)) + .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); + } catch (IOException ioe) { + System.err.println("" + ioe); + } + } + } + + private static final String MARKER = ""; + private static final Pattern TAG = Pattern.compile("<[^>]*>"); + + /** + * A servlet implementation + */ + @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) + @InterfaceStability.Unstable + public static class Servlet extends HttpServlet { + private static final long serialVersionUID = 1L; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws ServletException, IOException { + // Do the authorization + if (!HttpServer.hasAdministratorAccess(getServletContext(), request, + response)) { + return; + } + // Disallow modification of the LogLevel if explicitly set to readonly + Configuration conf = (Configuration) getServletContext().getAttribute( + HttpServer.CONF_CONTEXT_ATTRIBUTE); + if (conf.getBoolean("hbase.master.ui.readonly", false)) { + response.sendError(HttpServletResponse.SC_FORBIDDEN, "Modification of HBase via" + + " the UI is disallowed in configuration."); + return; + } + response.setContentType("text/html"); + PrintWriter out; + try { + String headerPath = "header.jsp?pageTitle=Log Level"; + request.getRequestDispatcher(headerPath).include(request, response); + out = response.getWriter(); + } catch (FileNotFoundException e) { + // in case file is not found fall back to old design + out = ServletUtil.initHTML(response, "Log Level"); + } + out.println(FORMS); + + String logName = ServletUtil.getParameter(request, "log"); + String level = ServletUtil.getParameter(request, "level"); + + if (logName != null) { + out.println("

Results:

"); + out.println(MARKER + + "Submitted Log Name: " + logName + "
"); + + Logger log = LoggerFactory.getLogger(logName); + out.println(MARKER + + "Log Class: " + log.getClass().getName() +"
"); + if (level != null) { + out.println(MARKER + "Submitted Level: " + level + "
"); + } + process(log, level, out); + } + + try { + String footerPath = "footer.jsp"; + out.println(""); + request.getRequestDispatcher(footerPath).include(request, response); + } catch (FileNotFoundException e) { + out.println(ServletUtil.HTML_TAIL); + } + out.close(); + } + + static final String FORMS = "
\n" + + "
\n" + "\n" + "
\n" + "Actions:" + "

" + + "

\n" + "\n" + "\n" + + "\n" + "\n" + "\n" + "\n" + "\n" + + "\n" + "\n" + "\n" + "\n" + + "\n" + "\n" + "\n" + + "\n" + "
\n" + + "\n" + "\n" + + "\n" + "" + + "Get the current log level for the specified log name." + "
\n" + + "\n" + "\n" + + "\n" + + "\n" + "" + + "Set the specified log level for the specified log name." + "
\n" + "
\n" + "

\n" + "
\n"; + + private static void process(Logger logger, String levelName, PrintWriter out) { + if (levelName != null) { + try { + Log4jUtils.setLogLevel(logger.getName(), levelName); + out.println(MARKER + "
" + "Setting Level to " + + levelName + " ...
" + "
"); + } catch (IllegalArgumentException e) { + out.println(MARKER + "
" + "Bad level : " + levelName + + "
" + "
"); + } + } + out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + + "
"); + } + } + + private LogLevel() {} +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java new file mode 100755 index 00000000..128be02b --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -0,0 +1,248 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import com.fasterxml.jackson.annotation.JsonProperty; +import java.io.IOException; +import java.io.Serializable; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlValue; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; +/** + * Representation of a cell. A cell is a single value associated a column and + * optional qualifier, and either the timestamp when it was stored or the user- + * provided timestamp if one was explicitly supplied. + * + *
+ * <complexType name="Cell">
+ *   <sequence>
+ *     <element name="value" maxOccurs="1" minOccurs="1">
+ *       <simpleType>
+ *         <restriction base="base64Binary"/>
+ *       </simpleType>
+ *     </element>
+ *   </sequence>
+ *   <attribute name="column" type="base64Binary" />
+ *   <attribute name="timestamp" type="int" />
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="Cell") +@XmlAccessorType(XmlAccessType.FIELD) +@InterfaceAudience.Private +public class CellModel implements ProtobufMessageHandler, Serializable { + private static final long serialVersionUID = 1L; + + @JsonProperty("column") + @XmlAttribute + private byte[] column; + + @JsonProperty("timestamp") + @XmlAttribute + private long timestamp = HConstants.LATEST_TIMESTAMP; + + @JsonProperty("$") + @XmlValue + private byte[] value; + + /** + * Default constructor + */ + public CellModel() {} + + /** + * Constructor + * @param column + * @param value + */ + public CellModel(byte[] column, byte[] value) { + this(column, HConstants.LATEST_TIMESTAMP, value); + } + + /** + * Constructor + * @param column + * @param qualifier + * @param value + */ + public CellModel(byte[] column, byte[] qualifier, byte[] value) { + this(column, qualifier, HConstants.LATEST_TIMESTAMP, value); + } + + /** + * Constructor from KeyValue + * @param cell + */ + public CellModel(org.apache.hadoop.hbase.Cell cell) { + this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil + .cloneValue(cell)); + } + + /** + * Constructor + * @param column + * @param timestamp + * @param value + */ + public CellModel(byte[] column, long timestamp, byte[] value) { + this.column = column; + this.timestamp = timestamp; + this.value = value; + } + + /** + * Constructor + * @param column + * @param qualifier + * @param timestamp + * @param value + */ + public CellModel(byte[] column, byte[] qualifier, long timestamp, + byte[] value) { + this.column = CellUtil.makeColumn(column, qualifier); + this.timestamp = timestamp; + this.value = value; + } + + /** + * @return the column + */ + public byte[] getColumn() { + return column; + } + + /** + * @param column the column to set + */ + public void setColumn(byte[] column) { + this.column = column; + } + + /** + * @return true if the timestamp property has been specified by the + * user + */ + public boolean hasUserTimestamp() { + return timestamp != HConstants.LATEST_TIMESTAMP; + } + + /** + * @return the timestamp + */ + public long getTimestamp() { + return timestamp; + } + + /** + * @param timestamp the timestamp to set + */ + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + /** + * @return the value + */ + public byte[] getValue() { + return value; + } + + /** + * @param value the value to set + */ + public void setValue(byte[] value) { + this.value = value; + } + + @Override + public byte[] createProtobufOutput() { + Cell.Builder builder = Cell.newBuilder(); + builder.setColumn(UnsafeByteOperations.unsafeWrap(getColumn())); + builder.setData(UnsafeByteOperations.unsafeWrap(getValue())); + if (hasUserTimestamp()) { + builder.setTimestamp(getTimestamp()); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + Cell.Builder builder = Cell.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + setColumn(builder.getColumn().toByteArray()); + setValue(builder.getData().toByteArray()); + if (builder.hasTimestamp()) { + setTimestamp(builder.getTimestamp()); + } + return this; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj == this) { + return true; + } + if (obj.getClass() != getClass()) { + return false; + } + CellModel cellModel = (CellModel) obj; + return new EqualsBuilder(). + append(column, cellModel.column). + append(timestamp, cellModel.timestamp). + append(value, cellModel.value). + isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(). + append(column). + append(timestamp). + append(value). + toHashCode(); + } + + @Override + public String toString() { + return new ToStringBuilder(this). + append("column", column). + append("timestamp", timestamp). + append("value", value). + toString(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java new file mode 100755 index 00000000..ebb2b183 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -0,0 +1,154 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellMessage.Cell; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.CellSetMessage.CellSet; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Representation of a grouping of cells. May contain cells from more than + * one row. Encapsulates RowModel and CellModel models. + * + *
+ * <complexType name="CellSet">
+ *   <sequence>
+ *     <element name="row" type="tns:Row" maxOccurs="unbounded"
+ *       minOccurs="1"></element>
+ *   </sequence>
+ * </complexType>
+ *
+ * <complexType name="Row">
+ *   <sequence>
+ *     <element name="key" type="base64Binary"></element>
+ *     <element name="cell" type="tns:Cell"
+ *       maxOccurs="unbounded" minOccurs="1"></element>
+ *   </sequence>
+ * </complexType>
+ *
+ * <complexType name="Cell">
+ *   <sequence>
+ *     <element name="value" maxOccurs="1" minOccurs="1">
+ *       <simpleType>
+ *         <restriction base="base64Binary"/>
+ *       </simpleType>
+ *     </element>
+ *   </sequence>
+ *   <attribute name="column" type="base64Binary" />
+ *   <attribute name="timestamp" type="int" />
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="CellSet") +@XmlAccessorType(XmlAccessType.FIELD) +@InterfaceAudience.Private +public class CellSetModel implements Serializable, ProtobufMessageHandler { + private static final long serialVersionUID = 1L; + + @XmlElement(name="Row") + private List rows; + + /** + * Constructor + */ + public CellSetModel() { + this.rows = new ArrayList<>(); + } + + /** + * @param rows the rows + */ + public CellSetModel(List rows) { + super(); + this.rows = rows; + } + + /** + * Add a row to this cell set + * @param row the row + */ + public void addRow(RowModel row) { + rows.add(row); + } + + /** + * @return the rows + */ + public List getRows() { + return rows; + } + + @Override + public byte[] createProtobufOutput() { + CellSet.Builder builder = CellSet.newBuilder(); + for (RowModel row : getRows()) { + CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder(); + rowBuilder.setKey(UnsafeByteOperations.unsafeWrap(row.getKey())); + for (CellModel cell : row.getCells()) { + Cell.Builder cellBuilder = Cell.newBuilder(); + cellBuilder.setColumn(UnsafeByteOperations.unsafeWrap(cell.getColumn())); + cellBuilder.setData(UnsafeByteOperations.unsafeWrap(cell.getValue())); + if (cell.hasUserTimestamp()) { + cellBuilder.setTimestamp(cell.getTimestamp()); + } + rowBuilder.addValues(cellBuilder); + } + builder.addRows(rowBuilder); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + CellSet.Builder builder = CellSet.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + for (CellSet.Row row : builder.getRowsList()) { + RowModel rowModel = new RowModel(row.getKey().toByteArray()); + for (Cell cell : row.getValuesList()) { + long timestamp = HConstants.LATEST_TIMESTAMP; + if (cell.hasTimestamp()) { + timestamp = cell.getTimestamp(); + } + rowModel.addCell( + new CellModel(cell.getColumn().toByteArray(), timestamp, + cell.getData().toByteArray())); + } + addRow(rowModel); + } + return this; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java new file mode 100755 index 00000000..967f6ba2 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java @@ -0,0 +1,242 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.Serializable; +import java.util.LinkedHashMap; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAnyAttribute; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.namespace.QName; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; + +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonAnySetter; + +/** + * Representation of a column family schema. + * + *
+ * <complexType name="ColumnSchema">
+ *   <attribute name="name" type="string"></attribute>
+ *   <anyAttribute></anyAttribute>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="ColumnSchema") +@InterfaceAudience.Private +public class ColumnSchemaModel implements Serializable { + private static final long serialVersionUID = 1L; + private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE); + private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE); + private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER); + private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION); + private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY); + private static QName TTL = new QName(HColumnDescriptor.TTL); + private static QName VERSIONS = new QName(HConstants.VERSIONS); + + private String name; + private Map attrs = new LinkedHashMap<>(); + + /** + * Default constructor + */ + public ColumnSchemaModel() {} + + /** + * Add an attribute to the column family schema + * @param name the attribute name + * @param value the attribute value + */ + @JsonAnySetter + public void addAttribute(String name, Object value) { + attrs.put(new QName(name), value); + } + + /** + * @param name the attribute name + * @return the attribute value + */ + public String getAttribute(String name) { + Object o = attrs.get(new QName(name)); + return o != null ? o.toString(): null; + } + + /** + * @return the column name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @return the map for holding unspecified (user) attributes + */ + @XmlAnyAttribute + @JsonAnyGetter + public Map getAny() { + return attrs; + } + + /** + * @param name the table name + */ + public void setName(String name) { + this.name = name; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{ NAME => '"); + sb.append(name); + sb.append('\''); + for (Map.Entry e: attrs.entrySet()) { + sb.append(", "); + sb.append(e.getKey().getLocalPart()); + sb.append(" => '"); + sb.append(e.getValue().toString()); + sb.append('\''); + } + sb.append(" }"); + return sb.toString(); + } + + // getters and setters for common schema attributes + + // cannot be standard bean type getters and setters, otherwise this would + // confuse JAXB + + /** + * @return true if the BLOCKCACHE attribute is present and true + */ + public boolean __getBlockcache() { + Object o = attrs.get(BLOCKCACHE); + return o != null ? + Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; + } + + /** + * @return the value of the BLOCKSIZE attribute or its default if it is unset + */ + public int __getBlocksize() { + Object o = attrs.get(BLOCKSIZE); + return o != null ? + Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; + } + + /** + * @return the value of the BLOOMFILTER attribute or its default if unset + */ + public String __getBloomfilter() { + Object o = attrs.get(BLOOMFILTER); + return o != null ? o.toString() : HColumnDescriptor.DEFAULT_BLOOMFILTER; + } + + /** + * @return the value of the COMPRESSION attribute or its default if unset + */ + public String __getCompression() { + Object o = attrs.get(COMPRESSION); + return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION; + } + + /** + * @return true if the IN_MEMORY attribute is present and true + */ + public boolean __getInMemory() { + Object o = attrs.get(IN_MEMORY); + return o != null ? + Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; + } + + /** + * @return the value of the TTL attribute or its default if it is unset + */ + public int __getTTL() { + Object o = attrs.get(TTL); + return o != null ? + Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL; + } + + /** + * @return the value of the VERSIONS attribute or its default if it is unset + */ + public int __getVersions() { + Object o = attrs.get(VERSIONS); + return o != null ? + Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; + } + + /** + * @param value the desired value of the BLOCKSIZE attribute + */ + public void __setBlocksize(int value) { + attrs.put(BLOCKSIZE, Integer.toString(value)); + } + + /** + * @param value the desired value of the BLOCKCACHE attribute + */ + public void __setBlockcache(boolean value) { + attrs.put(BLOCKCACHE, Boolean.toString(value)); + } + + public void __setBloomfilter(String value) { + attrs.put(BLOOMFILTER, value); + } + + /** + * @param value the desired value of the COMPRESSION attribute + */ + public void __setCompression(String value) { + attrs.put(COMPRESSION, value); + } + + /** + * @param value the desired value of the IN_MEMORY attribute + */ + public void __setInMemory(boolean value) { + attrs.put(IN_MEMORY, Boolean.toString(value)); + } + + /** + * @param value the desired value of the TTL attribute + */ + public void __setTTL(int value) { + attrs.put(TTL, Integer.toString(value)); + } + + /** + * @param value the desired value of the VERSIONS attribute + */ + public void __setVersions(int value) { + attrs.put(VERSIONS, Integer.toString(value)); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java new file mode 100755 index 00000000..aa7df1e9 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -0,0 +1,171 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlTransient; + +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; + +import org.apache.hadoop.hbase.shaded.rest.protobuf + .generated.NamespacePropertiesMessage.NamespaceProperties; + +/** + * List a HBase namespace's key/value properties. + *
    + *
  • NamespaceProperties: outer element
  • + *
  • properties: sequence property elements
  • + *
  • entry
  • + *
  • key: property key
  • + *
  • value: property value
  • + *
+ */ +@XmlRootElement(name="NamespaceProperties") +@XmlAccessorType(XmlAccessType.FIELD) +@InterfaceAudience.Private +public class NamespacesInstanceModel implements Serializable, ProtobufMessageHandler { + + private static final long serialVersionUID = 1L; + + // JAX-RS automatically converts Map to XMLAnyElement. + private Map properties = null; + + @XmlTransient + private String namespaceName; + + /** + * Default constructor. Do not use. + */ + public NamespacesInstanceModel() {} + + /** + * Constructor to use if namespace does not exist in HBASE. + * @param namespaceName the namespace name. + * @throws IOException + */ + public NamespacesInstanceModel(String namespaceName) throws IOException { + this(null, namespaceName); + } + + /** + * Constructor + * @param admin the administrative API + * @param namespaceName the namespace name. + * @throws IOException + */ + public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException { + this.namespaceName = namespaceName; + if(admin == null) { return; } + + NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName); + + // For properly formed JSON, if no properties, field has to be null (not just no elements). + if(nd.getConfiguration().isEmpty()){ return; } + + properties = new HashMap<>(); + properties.putAll(nd.getConfiguration()); + } + + /** + * Add property to the namespace. + * @param key attribute name + * @param value attribute value + */ + public void addProperty(String key, String value) { + if(properties == null){ + properties = new HashMap<>(); + } + properties.put(key, value); + } + + /** + * @return The map of uncategorized namespace properties. + */ + public Map getProperties() { + if(properties == null){ + properties = new HashMap<>(); + } + return properties; + } + + public String getNamespaceName(){ + return namespaceName; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{NAME => \'"); + sb.append(namespaceName); + sb.append("\'"); + if(properties != null){ + for (Map.Entry entry : properties.entrySet()) { + sb.append(", "); + sb.append(entry.getKey()); + sb.append(" => '"); + sb.append(entry.getValue()); + sb.append("\'"); + } + } + sb.append("}"); + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); + if(properties != null){ + for (Map.Entry entry : properties.entrySet()) { + String key = entry.getKey(); + NamespaceProperties.Property.Builder property = NamespaceProperties.Property.newBuilder(); + property.setKey(key); + property.setValue(entry.getValue()); + builder.addProps(property); + } + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); + builder.mergeFrom(message); + List properties = builder.getPropsList(); + for(NamespaceProperties.Property property: properties){ + addProperty(property.getKey(), property.getValue()); + } + return this; + } + +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java new file mode 100755 index 00000000..0be558d2 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -0,0 +1,118 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; + +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.NamespacesMessage.Namespaces; + +import com.fasterxml.jackson.annotation.JsonProperty; + + +/** + * A list of HBase namespaces. + *
    + *
  • Namespace: namespace name
  • + *
+ */ +@XmlRootElement(name="Namespaces") +@XmlAccessorType(XmlAccessType.FIELD) +@InterfaceAudience.Private +public class NamespacesModel implements Serializable, ProtobufMessageHandler { + + private static final long serialVersionUID = 1L; + + @JsonProperty("Namespace") + @XmlElement(name="Namespace") + private List namespaces = new ArrayList<>(); + + /** + * Default constructor. Do not use. + */ + public NamespacesModel() {} + + /** + * Constructor + * @param admin the administrative API + * @throws IOException + */ + public NamespacesModel(Admin admin) throws IOException { + NamespaceDescriptor[] nds = admin.listNamespaceDescriptors(); + namespaces = new ArrayList<>(nds.length); + for (NamespaceDescriptor nd : nds) { + namespaces.add(nd.getName()); + } + } + + /** + * @return all namespaces + */ + public List getNamespaces() { + return namespaces; + } + + /** + * @param namespaces the namespace name array + */ + public void setNamespaces(List namespaces) { + this.namespaces = namespaces; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for (String namespace : namespaces) { + sb.append(namespace); + sb.append("\n"); + } + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + Namespaces.Builder builder = Namespaces.newBuilder(); + builder.addAllNamespace(namespaces); + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + Namespaces.Builder builder = Namespaces.newBuilder(); + builder.mergeFrom(message); + namespaces = builder.getNamespaceList(); + return this; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java new file mode 100755 index 00000000..b560f697 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -0,0 +1,190 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Representation of a row. A row is a related set of cells, grouped by common + * row key. RowModels do not appear in results by themselves. They are always + * encapsulated within CellSetModels. + * + *
+ * <complexType name="Row">
+ *   <sequence>
+ *     <element name="key" type="base64Binary"></element>
+ *     <element name="cell" type="tns:Cell" 
+ *       maxOccurs="unbounded" minOccurs="1"></element>
+ *   </sequence>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="Row") +@XmlAccessorType(XmlAccessType.FIELD) +@InterfaceAudience.Private +public class RowModel implements ProtobufMessageHandler, Serializable { + private static final long serialVersionUID = 1L; + + @JsonProperty("key") + @XmlAttribute + private byte[] key; + + @JsonProperty("Cell") + @XmlElement(name="Cell") + private List cells = new ArrayList<>(); + + + /** + * Default constructor + */ + public RowModel() { } + + /** + * Constructor + * @param key the row key + */ + public RowModel(final String key) { + this(Bytes.toBytes(key)); + } + + /** + * Constructor + * @param key the row key + */ + public RowModel(final byte[] key) { + this.key = key; + cells = new ArrayList<>(); + } + + /** + * Constructor + * @param key the row key + * @param cells the cells + */ + public RowModel(final String key, final List cells) { + this(Bytes.toBytes(key), cells); + } + + /** + * Constructor + * @param key the row key + * @param cells the cells + */ + public RowModel(final byte[] key, final List cells) { + this.key = key; + this.cells = cells; + } + + /** + * Adds a cell to the list of cells for this row + * @param cell the cell + */ + public void addCell(CellModel cell) { + cells.add(cell); + } + + /** + * @return the row key + */ + public byte[] getKey() { + return key; + } + + /** + * @param key the row key + */ + public void setKey(byte[] key) { + this.key = key; + } + + /** + * @return the cells + */ + public List getCells() { + return cells; + } + + @Override + public byte[] createProtobufOutput() { + // there is no standalone row protobuf message + throw new UnsupportedOperationException( + "no protobuf equivalent to RowModel"); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + // there is no standalone row protobuf message + throw new UnsupportedOperationException( + "no protobuf equivalent to RowModel"); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj == this) { + return true; + } + if (obj.getClass() != getClass()) { + return false; + } + RowModel rowModel = (RowModel) obj; + return new EqualsBuilder(). + append(key, rowModel.key). + append(cells, rowModel.cells). + isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(). + append(key). + append(cells). + toHashCode(); + } + + @Override + public String toString() { + return new ToStringBuilder(this). + append("key", key). + append("cells", cells). + toString(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java new file mode 100755 index 00000000..c0822c6e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -0,0 +1,902 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; + +import javax.ws.rs.core.MediaType; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.BinaryPrefixComparator; +import org.apache.hadoop.hbase.filter.BitComparator; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; +import org.apache.hadoop.hbase.filter.ColumnPaginationFilter; +import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; +import org.apache.hadoop.hbase.filter.ColumnRangeFilter; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.filter.DependentColumnFilter; +import org.apache.hadoop.hbase.filter.FamilyFilter; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.InclusiveStopFilter; +import org.apache.hadoop.hbase.filter.KeyOnlyFilter; +import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; +import org.apache.hadoop.hbase.filter.MultiRowRangeFilter.RowRange; +import org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter; +import org.apache.hadoop.hbase.filter.NullComparator; +import org.apache.hadoop.hbase.filter.PageFilter; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.filter.QualifierFilter; +import org.apache.hadoop.hbase.filter.RandomRowFilter; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueExcludeFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.filter.SkipFilter; +import org.apache.hadoop.hbase.filter.SubstringComparator; +import org.apache.hadoop.hbase.filter.TimestampsFilter; +import org.apache.hadoop.hbase.filter.ValueFilter; +import org.apache.hadoop.hbase.filter.WhileMatchFilter; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.security.visibility.Authorizations; +import org.apache.hadoop.hbase.util.Bytes; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.ScannerMessage.Scanner; + +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; + +/** + * A representation of Scanner parameters. + * + *
+ * <complexType name="Scanner">
+ *   <sequence>
+ *     <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/>
+ *     <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element>
+ *   </sequence>
+ *   <attribute name="startRow" type="base64Binary"></attribute>
+ *   <attribute name="endRow" type="base64Binary"></attribute>
+ *   <attribute name="batch" type="int"></attribute>
+ *   <attribute name="caching" type="int"></attribute>
+ *   <attribute name="startTime" type="int"></attribute>
+ *   <attribute name="endTime" type="int"></attribute>
+ *   <attribute name="maxVersions" type="int"></attribute>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="Scanner") +@JsonInclude(JsonInclude.Include.NON_NULL) +@InterfaceAudience.Private +public class ScannerModel implements ProtobufMessageHandler, Serializable { + + private static final long serialVersionUID = 1L; + + private byte[] startRow = HConstants.EMPTY_START_ROW; + private byte[] endRow = HConstants.EMPTY_END_ROW; + private List columns = new ArrayList<>(); + private int batch = Integer.MAX_VALUE; + private long startTime = 0; + private long endTime = Long.MAX_VALUE; + private String filter = null; + private int maxVersions = Integer.MAX_VALUE; + private int caching = -1; + private List labels = new ArrayList<>(); + private boolean cacheBlocks = true; + private int limit = -1; + + /** + * Implement lazily-instantiated singleton as per recipe + * here: http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ + */ + private static class JaxbJsonProviderHolder { + static final JacksonJaxbJsonProvider INSTANCE = new JacksonJaxbJsonProvider(); + } + + @XmlRootElement + static class FilterModel { + + @XmlRootElement + static class ByteArrayComparableModel { + @XmlAttribute public String type; + @XmlAttribute public String value; + @XmlAttribute public String op; + + static enum ComparatorType { + BinaryComparator, + BinaryPrefixComparator, + BitComparator, + NullComparator, + RegexStringComparator, + SubstringComparator + } + + public ByteArrayComparableModel() { } + + public ByteArrayComparableModel( + ByteArrayComparable comparator) { + String typeName = comparator.getClass().getSimpleName(); + ComparatorType type = ComparatorType.valueOf(typeName); + this.type = typeName; + switch (type) { + case BinaryComparator: + case BinaryPrefixComparator: + this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); + break; + case BitComparator: + this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); + this.op = ((BitComparator)comparator).getOperator().toString(); + break; + case NullComparator: + break; + case RegexStringComparator: + case SubstringComparator: + this.value = Bytes.toString(comparator.getValue()); + break; + default: + throw new RuntimeException("unhandled filter type: " + type); + } + } + + public ByteArrayComparable build() { + ByteArrayComparable comparator; + switch (ComparatorType.valueOf(type)) { + case BinaryComparator: + comparator = new BinaryComparator(Base64.getDecoder().decode(value)); + break; + case BinaryPrefixComparator: + comparator = new BinaryPrefixComparator(Base64.getDecoder().decode(value)); + break; + case BitComparator: + comparator = new BitComparator(Base64.getDecoder().decode(value), + BitComparator.BitwiseOp.valueOf(op)); + break; + case NullComparator: + comparator = new NullComparator(); + break; + case RegexStringComparator: + comparator = new RegexStringComparator(value); + break; + case SubstringComparator: + comparator = new SubstringComparator(value); + break; + default: + throw new RuntimeException("unhandled comparator type: " + type); + } + return comparator; + } + + } + + // A grab bag of fields, would have been a union if this were C. + // These are null by default and will only be serialized if set (non null). + @XmlAttribute public String type; + @XmlAttribute public String op; + @XmlElement ByteArrayComparableModel comparator; + @XmlAttribute public String value; + @XmlElement public List filters; + @XmlAttribute public Integer limit; + @XmlAttribute public Integer offset; + @XmlAttribute public String family; + @XmlAttribute public String qualifier; + @XmlAttribute public Boolean ifMissing; + @XmlAttribute public Boolean latestVersion; + @XmlAttribute public String minColumn; + @XmlAttribute public Boolean minColumnInclusive; + @XmlAttribute public String maxColumn; + @XmlAttribute public Boolean maxColumnInclusive; + @XmlAttribute public Boolean dropDependentColumn; + @XmlAttribute public Float chance; + @XmlElement public List prefixes; + @XmlElement private List ranges; + @XmlElement public List timestamps; + + static enum FilterType { + ColumnCountGetFilter, + ColumnPaginationFilter, + ColumnPrefixFilter, + ColumnRangeFilter, + DependentColumnFilter, + FamilyFilter, + FilterList, + FirstKeyOnlyFilter, + InclusiveStopFilter, + KeyOnlyFilter, + MultipleColumnPrefixFilter, + MultiRowRangeFilter, + PageFilter, + PrefixFilter, + QualifierFilter, + RandomRowFilter, + RowFilter, + SingleColumnValueExcludeFilter, + SingleColumnValueFilter, + SkipFilter, + TimestampsFilter, + ValueFilter, + WhileMatchFilter + } + + public FilterModel() { } + + public FilterModel(Filter filter) { + String typeName = filter.getClass().getSimpleName(); + FilterType type = FilterType.valueOf(typeName); + this.type = typeName; + switch (type) { + case ColumnCountGetFilter: + this.limit = ((ColumnCountGetFilter)filter).getLimit(); + break; + case ColumnPaginationFilter: + this.limit = ((ColumnPaginationFilter)filter).getLimit(); + this.offset = ((ColumnPaginationFilter)filter).getOffset(); + break; + case ColumnPrefixFilter: + byte[] src = ((ColumnPrefixFilter)filter).getPrefix(); + this.value = Bytes.toString(Base64.getEncoder().encode(src)); + break; + case ColumnRangeFilter: + ColumnRangeFilter crf = (ColumnRangeFilter)filter; + this.minColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMinColumn())); + this.minColumnInclusive = crf.getMinColumnInclusive(); + this.maxColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMaxColumn())); + this.maxColumnInclusive = crf.getMaxColumnInclusive(); + break; + case DependentColumnFilter: { + DependentColumnFilter dcf = (DependentColumnFilter)filter; + this.family = Bytes.toString(Base64.getEncoder().encode(dcf.getFamily())); + byte[] qualifier = dcf.getQualifier(); + if (qualifier != null) { + this.qualifier = Bytes.toString(Base64.getEncoder().encode(qualifier)); + } + this.op = dcf.getCompareOperator().toString(); + this.comparator = new ByteArrayComparableModel(dcf.getComparator()); + this.dropDependentColumn = dcf.dropDependentColumn(); + } break; + case FilterList: + this.op = ((FilterList)filter).getOperator().toString(); + this.filters = new ArrayList<>(); + for (Filter child: ((FilterList)filter).getFilters()) { + this.filters.add(new FilterModel(child)); + } + break; + case FirstKeyOnlyFilter: + case KeyOnlyFilter: + break; + case InclusiveStopFilter: + this.value = Bytes.toString(Base64.getEncoder().encode( + ((InclusiveStopFilter)filter).getStopRowKey())); + break; + case MultipleColumnPrefixFilter: + this.prefixes = new ArrayList<>(); + for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) { + this.prefixes.add(Bytes.toString(Base64.getEncoder().encode(prefix))); + } + break; + case MultiRowRangeFilter: + this.ranges = new ArrayList<>(); + for(RowRange range : ((MultiRowRangeFilter)filter).getRowRanges()) { + this.ranges.add(new RowRange(range.getStartRow(), range.isStartRowInclusive(), + range.getStopRow(), range.isStopRowInclusive())); + } + break; + case PageFilter: + this.value = Long.toString(((PageFilter)filter).getPageSize()); + break; + case PrefixFilter: + this.value = Bytes.toString(Base64.getEncoder().encode( + ((PrefixFilter)filter).getPrefix())); + break; + case FamilyFilter: + case QualifierFilter: + case RowFilter: + case ValueFilter: + this.op = ((CompareFilter)filter).getCompareOperator().toString(); + this.comparator = + new ByteArrayComparableModel( + ((CompareFilter)filter).getComparator()); + break; + case RandomRowFilter: + this.chance = ((RandomRowFilter)filter).getChance(); + break; + case SingleColumnValueExcludeFilter: + case SingleColumnValueFilter: { + SingleColumnValueFilter scvf = (SingleColumnValueFilter) filter; + this.family = Bytes.toString(Base64.getEncoder().encode(scvf.getFamily())); + byte[] qualifier = scvf.getQualifier(); + if (qualifier != null) { + this.qualifier = Bytes.toString(Base64.getEncoder().encode(qualifier)); + } + this.op = scvf.getCompareOperator().toString(); + this.comparator = + new ByteArrayComparableModel(scvf.getComparator()); + if (scvf.getFilterIfMissing()) { + this.ifMissing = true; + } + if (scvf.getLatestVersionOnly()) { + this.latestVersion = true; + } + } break; + case SkipFilter: + this.filters = new ArrayList<>(); + this.filters.add(new FilterModel(((SkipFilter)filter).getFilter())); + break; + case TimestampsFilter: + this.timestamps = ((TimestampsFilter)filter).getTimestamps(); + break; + case WhileMatchFilter: + this.filters = new ArrayList<>(); + this.filters.add( + new FilterModel(((WhileMatchFilter)filter).getFilter())); + break; + default: + throw new RuntimeException("unhandled filter type " + type); + } + } + + public Filter build() { + Filter filter; + switch (FilterType.valueOf(type)) { + case ColumnCountGetFilter: + filter = new ColumnCountGetFilter(limit); + break; + case ColumnPaginationFilter: + filter = new ColumnPaginationFilter(limit, offset); + break; + case ColumnPrefixFilter: + filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); + break; + case ColumnRangeFilter: + filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), + minColumnInclusive, Base64.getDecoder().decode(maxColumn), + maxColumnInclusive); + break; + case DependentColumnFilter: + filter = new DependentColumnFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + dropDependentColumn, CompareOperator.valueOf(op), comparator.build()); + break; + case FamilyFilter: + filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case FilterList: { + List list = new ArrayList<>(filters.size()); + for (FilterModel model: filters) { + list.add(model.build()); + } + filter = new FilterList(FilterList.Operator.valueOf(op), list); + } break; + case FirstKeyOnlyFilter: + filter = new FirstKeyOnlyFilter(); + break; + case InclusiveStopFilter: + filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); + break; + case KeyOnlyFilter: + filter = new KeyOnlyFilter(); + break; + case MultipleColumnPrefixFilter: { + byte[][] values = new byte[prefixes.size()][]; + for (int i = 0; i < prefixes.size(); i++) { + values[i] = Base64.getDecoder().decode(prefixes.get(i)); + } + filter = new MultipleColumnPrefixFilter(values); + } break; + case MultiRowRangeFilter: { + filter = new MultiRowRangeFilter(ranges); + } break; + case PageFilter: + filter = new PageFilter(Long.parseLong(value)); + break; + case PrefixFilter: + filter = new PrefixFilter(Base64.getDecoder().decode(value)); + break; + case QualifierFilter: + filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case RandomRowFilter: + filter = new RandomRowFilter(chance); + break; + case RowFilter: + filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case SingleColumnValueFilter: + filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion); + } + break; + case SingleColumnValueExcludeFilter: + filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion); + } + break; + case SkipFilter: + filter = new SkipFilter(filters.get(0).build()); + break; + case TimestampsFilter: + filter = new TimestampsFilter(timestamps); + break; + case ValueFilter: + filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case WhileMatchFilter: + filter = new WhileMatchFilter(filters.get(0).build()); + break; + default: + throw new RuntimeException("unhandled filter type: " + type); + } + return filter; + } + + } + + /** + * Get the JacksonJaxbJsonProvider instance; + * + * @return A JacksonJaxbJsonProvider. + */ + private static JacksonJaxbJsonProvider getJasonProvider() { + return JaxbJsonProviderHolder.INSTANCE; + } + + /** + * @param s the JSON representation of the filter + * @return the filter + * @throws Exception + */ + public static Filter buildFilter(String s) throws Exception { + FilterModel model = getJasonProvider().locateMapper(FilterModel.class, + MediaType.APPLICATION_JSON_TYPE).readValue(s, FilterModel.class); + return model.build(); + } + + /** + * @param filter the filter + * @return the JSON representation of the filter + * @throws Exception + */ + public static String stringifyFilter(final Filter filter) throws Exception { + return getJasonProvider().locateMapper(FilterModel.class, + MediaType.APPLICATION_JSON_TYPE).writeValueAsString(new FilterModel(filter)); + } + + private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":"); + + /** + * @param scan the scan specification + * @throws Exception + */ + public static ScannerModel fromScan(Scan scan) throws Exception { + ScannerModel model = new ScannerModel(); + model.setStartRow(scan.getStartRow()); + model.setEndRow(scan.getStopRow()); + Map> families = scan.getFamilyMap(); + if (families != null) { + for (Map.Entry> entry : families.entrySet()) { + if (entry.getValue() != null) { + for (byte[] qualifier: entry.getValue()) { + model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier)); + } + } else { + model.addColumn(entry.getKey()); + } + } + } + model.setStartTime(scan.getTimeRange().getMin()); + model.setEndTime(scan.getTimeRange().getMax()); + int caching = scan.getCaching(); + if (caching > 0) { + model.setCaching(caching); + } + int batch = scan.getBatch(); + if (batch > 0) { + model.setBatch(batch); + } + int maxVersions = scan.getMaxVersions(); + if (maxVersions > 0) { + model.setMaxVersions(maxVersions); + } + if (scan.getLimit() > 0) { + model.setLimit(scan.getLimit()); + } + Filter filter = scan.getFilter(); + if (filter != null) { + model.setFilter(stringifyFilter(filter)); + } + // Add the visbility labels if found in the attributes + Authorizations authorizations = scan.getAuthorizations(); + if (authorizations != null) { + List labels = authorizations.getLabels(); + for (String label : labels) { + model.addLabel(label); + } + } + return model; + } + + /** + * Default constructor + */ + public ScannerModel() {} + + /** + * Constructor + * @param startRow the start key of the row-range + * @param endRow the end key of the row-range + * @param columns the columns to scan + * @param batch the number of values to return in batch + * @param caching the number of rows that the scanner will fetch at once + * @param endTime the upper bound on timestamps of values of interest + * @param maxVersions the maximum number of versions to return + * @param filter a filter specification + * (values with timestamps later than this are excluded) + */ + public ScannerModel(byte[] startRow, byte[] endRow, List columns, + int batch, int caching, long endTime, int maxVersions, String filter) { + super(); + this.startRow = startRow; + this.endRow = endRow; + this.columns = columns; + this.batch = batch; + this.caching = caching; + this.endTime = endTime; + this.maxVersions = maxVersions; + this.filter = filter; + } + + /** + * Constructor + * @param startRow the start key of the row-range + * @param endRow the end key of the row-range + * @param columns the columns to scan + * @param batch the number of values to return in batch + * @param caching the number of rows that the scanner will fetch at once + * @param startTime the lower bound on timestamps of values of interest + * (values with timestamps earlier than this are excluded) + * @param endTime the upper bound on timestamps of values of interest + * (values with timestamps later than this are excluded) + * @param filter a filter specification + */ + public ScannerModel(byte[] startRow, byte[] endRow, List columns, + int batch, int caching, long startTime, long endTime, String filter) { + super(); + this.startRow = startRow; + this.endRow = endRow; + this.columns = columns; + this.batch = batch; + this.caching = caching; + this.startTime = startTime; + this.endTime = endTime; + this.filter = filter; + } + + /** + * Add a column to the column set + * @param column the column name, as <column>(:<qualifier>)? + */ + public void addColumn(byte[] column) { + columns.add(column); + } + + /** + * Add a visibility label to the scan + */ + public void addLabel(String label) { + labels.add(label); + } + /** + * @return true if a start row was specified + */ + public boolean hasStartRow() { + return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW); + } + + /** + * @return start row + */ + @XmlAttribute + public byte[] getStartRow() { + return startRow; + } + + /** + * @return true if an end row was specified + */ + public boolean hasEndRow() { + return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW); + } + + /** + * @return end row + */ + @XmlAttribute + public byte[] getEndRow() { + return endRow; + } + + /** + * @return list of columns of interest in column:qualifier format, or empty for all + */ + @XmlElement(name="column") + public List getColumns() { + return columns; + } + + @XmlElement(name="labels") + public List getLabels() { + return labels; + } + + /** + * @return the number of cells to return in batch + */ + @XmlAttribute + public int getBatch() { + return batch; + } + + /** + * @return the number of rows that the scanner to fetch at once + */ + @XmlAttribute + public int getCaching() { + return caching; + } + + /** + * @return the limit specification + */ + @XmlAttribute + public int getLimit() { + return limit; + } + + /** + * @return true if HFile blocks should be cached on the servers for this scan, false otherwise + */ + @XmlAttribute + public boolean getCacheBlocks() { + return cacheBlocks; + } + + /** + * @return the lower bound on timestamps of items of interest + */ + @XmlAttribute + public long getStartTime() { + return startTime; + } + + /** + * @return the upper bound on timestamps of items of interest + */ + @XmlAttribute + public long getEndTime() { + return endTime; + } + + /** + * @return maximum number of versions to return + */ + @XmlAttribute + public int getMaxVersions() { + return maxVersions; + } + + /** + * @return the filter specification + */ + @XmlElement + public String getFilter() { + return filter; + } + + /** + * @param startRow start row + */ + public void setStartRow(byte[] startRow) { + this.startRow = startRow; + } + + /** + * @param endRow end row + */ + public void setEndRow(byte[] endRow) { + this.endRow = endRow; + } + + /** + * @param columns list of columns of interest in column:qualifier format, or empty for all + */ + public void setColumns(List columns) { + this.columns = columns; + } + + /** + * @param batch the number of cells to return in batch + */ + public void setBatch(int batch) { + this.batch = batch; + } + + /** + * @param caching the number of rows to fetch at once + */ + public void setCaching(int caching) { + this.caching = caching; + } + + /** + * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise + */ + public void setCacheBlocks(boolean value) { + this.cacheBlocks = value; + } + + /** + * @param limit the number of rows can fetch of each scanner at lifetime + */ + public void setLimit(int limit) { + this.limit = limit; + } + + /** + * @param maxVersions maximum number of versions to return + */ + public void setMaxVersions(int maxVersions) { + this.maxVersions = maxVersions; + } + + /** + * @param startTime the lower bound on timestamps of values of interest + */ + public void setStartTime(long startTime) { + this.startTime = startTime; + } + + /** + * @param endTime the upper bound on timestamps of values of interest + */ + public void setEndTime(long endTime) { + this.endTime = endTime; + } + + /** + * @param filter the filter specification + */ + public void setFilter(String filter) { + this.filter = filter; + } + + @Override + public byte[] createProtobufOutput() { + Scanner.Builder builder = Scanner.newBuilder(); + if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) { + builder.setStartRow(UnsafeByteOperations.unsafeWrap(startRow)); + } + if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) { + builder.setEndRow(UnsafeByteOperations.unsafeWrap(endRow)); + } + for (byte[] column: columns) { + builder.addColumns(UnsafeByteOperations.unsafeWrap(column)); + } + if (startTime != 0) { + builder.setStartTime(startTime); + } + if (endTime != 0) { + builder.setEndTime(endTime); + } + builder.setBatch(getBatch()); + if (caching > 0) { + builder.setCaching(caching); + } + if (limit > 0){ + builder.setLimit(limit); + } + builder.setMaxVersions(maxVersions); + if (filter != null) { + builder.setFilter(filter); + } + if (labels != null && labels.size() > 0) { + for (String label : labels) + builder.addLabels(label); + } + builder.setCacheBlocks(cacheBlocks); + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + Scanner.Builder builder = Scanner.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + if (builder.hasStartRow()) { + startRow = builder.getStartRow().toByteArray(); + } + if (builder.hasEndRow()) { + endRow = builder.getEndRow().toByteArray(); + } + for (ByteString column: builder.getColumnsList()) { + addColumn(column.toByteArray()); + } + if (builder.hasBatch()) { + batch = builder.getBatch(); + } + if (builder.hasCaching()) { + caching = builder.getCaching(); + } + if (builder.hasLimit()) { + limit = builder.getLimit(); + } + if (builder.hasStartTime()) { + startTime = builder.getStartTime(); + } + if (builder.hasEndTime()) { + endTime = builder.getEndTime(); + } + if (builder.hasMaxVersions()) { + maxVersions = builder.getMaxVersions(); + } + if (builder.hasFilter()) { + filter = builder.getFilter(); + } + if (builder.getLabelsList() != null) { + List labels = builder.getLabelsList(); + for(String label : labels) { + addLabel(label); + } + } + if (builder.hasCacheBlocks()) { + this.cacheBlocks = builder.getCacheBlocks(); + } + return this; + } + +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java new file mode 100755 index 00000000..6d646c04 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -0,0 +1,796 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlElementWrapper; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.util.Bytes; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Representation of the status of a storage cluster: + *

+ *

    + *
  • regions: the total number of regions served by the cluster
  • + *
  • requests: the total number of requests per second handled by the + * cluster in the last reporting interval
  • + *
  • averageLoad: the average load of the region servers in the cluster
  • + *
  • liveNodes: detailed status of the live region servers
  • + *
  • deadNodes: the names of region servers declared dead
  • + *
+ * + *
+ * <complexType name="StorageClusterStatus">
+ *   <sequence>
+ *     <element name="liveNode" type="tns:Node"
+ *       maxOccurs="unbounded" minOccurs="0">
+ *     </element>
+ *     <element name="deadNode" type="string" maxOccurs="unbounded"
+ *       minOccurs="0">
+ *     </element>
+ *   </sequence>
+ *   <attribute name="regions" type="int"></attribute>
+ *   <attribute name="requests" type="int"></attribute>
+ *   <attribute name="averageLoad" type="float"></attribute>
+ * </complexType>
+ *
+ * <complexType name="Node">
+ *   <sequence>
+ *     <element name="region" type="tns:Region"
+ *       maxOccurs="unbounded" minOccurs="0"></element>
+ *   </sequence>
+ *   <attribute name="name" type="string"></attribute>
+ *   <attribute name="startCode" type="int"></attribute>
+ *   <attribute name="requests" type="int"></attribute>
+ *   <attribute name="heapSizeMB" type="int"></attribute>
+ *   <attribute name="maxHeapSizeMB" type="int"></attribute>
+ * </complexType>
+ *
+ * <complexType name="Region">
+ *   <attribute name="name" type="base64Binary"></attribute>
+ *   <attribute name="stores" type="int"></attribute>
+ *   <attribute name="storefiles" type="int"></attribute>
+ *   <attribute name="storefileSizeMB" type="int"></attribute>
+ *   <attribute name="memstoreSizeMB" type="int"></attribute>
+ *   <attribute name="storefileIndexSizeMB" type="int"></attribute>
+ *   <attribute name="readRequestsCount" type="int"></attribute>
+ *   <attribute name="cpRequestsCount" type="int"></attribute>
+ *   <attribute name="writeRequestsCount" type="int"></attribute>
+ *   <attribute name="rootIndexSizeKB" type="int"></attribute>
+ *   <attribute name="totalStaticIndexSizeKB" type="int"></attribute>
+ *   <attribute name="totalStaticBloomSizeKB" type="int"></attribute>
+ *   <attribute name="totalCompactingKVs" type="int"></attribute>
+ *   <attribute name="currentCompactedKVs" type="int"></attribute>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="ClusterStatus") +@InterfaceAudience.Private +public class StorageClusterStatusModel implements Serializable, ProtobufMessageHandler { + private static final long serialVersionUID = 1L; + + /** + * Represents a region server. + */ + public static class Node implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * Represents a region hosted on a region server. + */ + public static class Region implements Serializable { + private static final long serialVersionUID = -1326683840086398193L; + + private byte[] name; + private int stores; + private int storefiles; + private int storefileSizeMB; + private int memstoreSizeMB; + private long storefileIndexSizeKB; + private long readRequestsCount; + private long writeRequestsCount; + private int rootIndexSizeKB; + private int totalStaticIndexSizeKB; + private int totalStaticBloomSizeKB; + private long totalCompactingKVs; + private long currentCompactedKVs; + + /** + * Default constructor + */ + public Region() { + } + + /** + * Constructor + * @param name the region name + */ + public Region(byte[] name) { + this.name = name; + } + + /** + * Constructor + * @param name the region name + * @param stores the number of stores + * @param storefiles the number of store files + * @param storefileSizeMB total size of store files, in MB + * @param memstoreSizeMB total size of memstore, in MB + * @param storefileIndexSizeKB total size of store file indexes, in KB + */ + public Region(byte[] name, int stores, int storefiles, + int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, + long readRequestsCount, long writeRequestsCount, + int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, + long totalCompactingKVs, long currentCompactedKVs) { + this.name = name; + this.stores = stores; + this.storefiles = storefiles; + this.storefileSizeMB = storefileSizeMB; + this.memstoreSizeMB = memstoreSizeMB; + this.storefileIndexSizeKB = storefileIndexSizeKB; + this.readRequestsCount = readRequestsCount; + this.writeRequestsCount = writeRequestsCount; + this.rootIndexSizeKB = rootIndexSizeKB; + this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; + this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; + this.totalCompactingKVs = totalCompactingKVs; + this.currentCompactedKVs = currentCompactedKVs; + } + + /** + * @return the region name + */ + @XmlAttribute + public byte[] getName() { + return name; + } + + /** + * @return the number of stores + */ + @XmlAttribute + public int getStores() { + return stores; + } + + /** + * @return the number of store files + */ + @XmlAttribute + public int getStorefiles() { + return storefiles; + } + + /** + * @return the total size of store files, in MB + */ + @XmlAttribute + public int getStorefileSizeMB() { + return storefileSizeMB; + } + + /** + * @return memstore size, in MB + */ + @XmlAttribute + public int getMemStoreSizeMB() { + return memstoreSizeMB; + } + + /** + * @return the total size of store file indexes, in KB + */ + @XmlAttribute + public long getStorefileIndexSizeKB() { + return storefileIndexSizeKB; + } + + /** + * @return the current total read requests made to region + */ + @XmlAttribute + public long getReadRequestsCount() { + return readRequestsCount; + } + + + /** + * @return the current total write requests made to region + */ + @XmlAttribute + public long getWriteRequestsCount() { + return writeRequestsCount; + } + + /** + * @return The current total size of root-level indexes for the region, in KB. + */ + @XmlAttribute + public int getRootIndexSizeKB() { + return rootIndexSizeKB; + } + + /** + * @return The total size of static index, in KB + */ + @XmlAttribute + public int getTotalStaticIndexSizeKB() { + return totalStaticIndexSizeKB; + } + + /** + * @return The total size of static bloom, in KB + */ + @XmlAttribute + public int getTotalStaticBloomSizeKB() { + return totalStaticBloomSizeKB; + } + + /** + * @return The total number of compacting key-values + */ + @XmlAttribute + public long getTotalCompactingKVs() { + return totalCompactingKVs; + } + + /** + * @return The number of current compacted key-values + */ + @XmlAttribute + public long getCurrentCompactedKVs() { + return currentCompactedKVs; + } + + /** + * @param readRequestsCount The current total read requests made to region + */ + public void setReadRequestsCount(long readRequestsCount) { + this.readRequestsCount = readRequestsCount; + } + + /** + * @param rootIndexSizeKB The current total size of root-level indexes + * for the region, in KB + */ + public void setRootIndexSizeKB(int rootIndexSizeKB) { + this.rootIndexSizeKB = rootIndexSizeKB; + } + + /** + * @param writeRequestsCount The current total write requests made to region + */ + public void setWriteRequestsCount(long writeRequestsCount) { + this.writeRequestsCount = writeRequestsCount; + } + + /** + * @param currentCompactedKVs The completed count of key values + * in currently running compaction + */ + public void setCurrentCompactedKVs(long currentCompactedKVs) { + this.currentCompactedKVs = currentCompactedKVs; + } + + /** + * @param totalCompactingKVs The total compacting key values + * in currently running compaction + */ + public void setTotalCompactingKVs(long totalCompactingKVs) { + this.totalCompactingKVs = totalCompactingKVs; + } + + /** + * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, + * not just loaded into the block cache, in KB. + */ + public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) { + this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; + } + + /** + * @param totalStaticIndexSizeKB The total size of all index blocks, + * not just the root level, in KB. + */ + public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) { + this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; + } + + /** + * @param name the region name + */ + public void setName(byte[] name) { + this.name = name; + } + + /** + * @param stores the number of stores + */ + public void setStores(int stores) { + this.stores = stores; + } + + /** + * @param storefiles the number of store files + */ + public void setStorefiles(int storefiles) { + this.storefiles = storefiles; + } + + /** + * @param storefileSizeMB total size of store files, in MB + */ + public void setStorefileSizeMB(int storefileSizeMB) { + this.storefileSizeMB = storefileSizeMB; + } + + /** + * @param memstoreSizeMB memstore size, in MB + */ + public void setMemStoreSizeMB(int memstoreSizeMB) { + this.memstoreSizeMB = memstoreSizeMB; + } + + /** + * @param storefileIndexSizeKB total size of store file indexes, in KB + */ + public void setStorefileIndexSizeKB(long storefileIndexSizeKB) { + this.storefileIndexSizeKB = storefileIndexSizeKB; + } + } + + private String name; + private long startCode; + private long requests; + private int heapSizeMB; + private int maxHeapSizeMB; + private List regions = new ArrayList<>(); + + /** + * Add a region name to the list + * @param name the region name + */ + public void addRegion(byte[] name, int stores, int storefiles, + int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB, + long readRequestsCount, long writeRequestsCount, + int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, + long totalCompactingKVs, long currentCompactedKVs) { + regions.add(new Region(name, stores, storefiles, storefileSizeMB, + memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, + writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, + totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); + } + + /** + * @param index the index + * @return the region name + */ + public Region getRegion(int index) { + return regions.get(index); + } + + /** + * Default constructor + */ + public Node() {} + + /** + * Constructor + * @param name the region server name + * @param startCode the region server's start code + */ + public Node(String name, long startCode) { + this.name = name; + this.startCode = startCode; + } + + /** + * @return the region server's name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @return the region server's start code + */ + @XmlAttribute + public long getStartCode() { + return startCode; + } + + /** + * @return the current heap size, in MB + */ + @XmlAttribute + public int getHeapSizeMB() { + return heapSizeMB; + } + + /** + * @return the maximum heap size, in MB + */ + @XmlAttribute + public int getMaxHeapSizeMB() { + return maxHeapSizeMB; + } + + /** + * @return the list of regions served by the region server + */ + @XmlElement(name="Region") + public List getRegions() { + return regions; + } + + /** + * @return the number of requests per second processed by the region server + */ + @XmlAttribute + public long getRequests() { + return requests; + } + + /** + * @param name the region server's hostname + */ + public void setName(String name) { + this.name = name; + } + + /** + * @param startCode the region server's start code + */ + public void setStartCode(long startCode) { + this.startCode = startCode; + } + + /** + * @param heapSizeMB the current heap size, in MB + */ + public void setHeapSizeMB(int heapSizeMB) { + this.heapSizeMB = heapSizeMB; + } + + /** + * @param maxHeapSizeMB the maximum heap size, in MB + */ + public void setMaxHeapSizeMB(int maxHeapSizeMB) { + this.maxHeapSizeMB = maxHeapSizeMB; + } + + /** + * @param regions a list of regions served by the region server + */ + public void setRegions(List regions) { + this.regions = regions; + } + + /** + * @param requests the number of requests per second processed by the region server + */ + public void setRequests(long requests) { + this.requests = requests; + } + } + + private List liveNodes = new ArrayList<>(); + private List deadNodes = new ArrayList<>(); + private int regions; + private long requests; + private double averageLoad; + + /** + * Add a live node to the cluster representation. + * @param name the region server name + * @param startCode the region server's start code + * @param heapSizeMB the current heap size, in MB + * @param maxHeapSizeMB the maximum heap size, in MB + */ + public Node addLiveNode(String name, long startCode, int heapSizeMB, int maxHeapSizeMB) { + Node node = new Node(name, startCode); + node.setHeapSizeMB(heapSizeMB); + node.setMaxHeapSizeMB(maxHeapSizeMB); + liveNodes.add(node); + return node; + } + + /** + * @param index the index + * @return the region server model + */ + public Node getLiveNode(int index) { + return liveNodes.get(index); + } + + /** + * Add a dead node to the cluster representation. + * @param node the dead region server's name + */ + public void addDeadNode(String node) { + deadNodes.add(node); + } + + /** + * @param index the index + * @return the dead region server's name + */ + public String getDeadNode(int index) { + return deadNodes.get(index); + } + + /** + * Default constructor + */ + public StorageClusterStatusModel() { + } + + /** + * @return the list of live nodes + */ + @XmlElement(name = "Node") + @XmlElementWrapper(name = "LiveNodes") + // workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192 + @JsonProperty("LiveNodes") + public List getLiveNodes() { + return liveNodes; + } + + /** + * @return the list of dead nodes + */ + @XmlElement(name = "Node") + @XmlElementWrapper(name = "DeadNodes") + // workaround https://github.com/FasterXML/jackson-dataformat-xml/issues/192 + @JsonProperty("DeadNodes") + public List getDeadNodes() { + return deadNodes; + } + + /** + * @return the total number of regions served by the cluster + */ + @XmlAttribute + public int getRegions() { + return regions; + } + + /** + * @return the total number of requests per second handled by the cluster in the last reporting + * interval + */ + @XmlAttribute + public long getRequests() { + return requests; + } + + /** + * @return the average load of the region servers in the cluster + */ + @XmlAttribute + public double getAverageLoad() { + return averageLoad; + } + + /** + * @param nodes the list of live node models + */ + public void setLiveNodes(List nodes) { + this.liveNodes = nodes; + } + + /** + * @param nodes the list of dead node names + */ + public void setDeadNodes(List nodes) { + this.deadNodes = nodes; + } + + /** + * @param regions the total number of regions served by the cluster + */ + public void setRegions(int regions) { + this.regions = regions; + } + + /** + * @param requests the total number of requests per second handled by the cluster + */ + public void setRequests(long requests) { + this.requests = requests; + } + + /** + * @param averageLoad the average load of region servers in the cluster + */ + public void setAverageLoad(double averageLoad) { + this.averageLoad = averageLoad; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(String.format("%d live servers, %d dead servers, " + + "%.4f average load%n%n", liveNodes.size(), deadNodes.size(), + averageLoad)); + if (!liveNodes.isEmpty()) { + sb.append(liveNodes.size()); + sb.append(" live servers\n"); + for (Node node : liveNodes) { + sb.append(" "); + sb.append(node.name); + sb.append(' '); + sb.append(node.startCode); + sb.append("\n requests="); + sb.append(node.requests); + sb.append(", regions="); + sb.append(node.regions.size()); + sb.append("\n heapSizeMB="); + sb.append(node.heapSizeMB); + sb.append("\n maxHeapSizeMB="); + sb.append(node.maxHeapSizeMB); + sb.append("\n\n"); + for (Node.Region region : node.regions) { + sb.append(" "); + sb.append(Bytes.toString(region.name)); + sb.append("\n stores="); + sb.append(region.stores); + sb.append("\n storefiless="); + sb.append(region.storefiles); + sb.append("\n storefileSizeMB="); + sb.append(region.storefileSizeMB); + sb.append("\n memstoreSizeMB="); + sb.append(region.memstoreSizeMB); + sb.append("\n storefileIndexSizeKB="); + sb.append(region.storefileIndexSizeKB); + sb.append("\n readRequestsCount="); + sb.append(region.readRequestsCount); + sb.append("\n cpRequestsCount="); + sb.append("\n writeRequestsCount="); + sb.append(region.writeRequestsCount); + sb.append("\n rootIndexSizeKB="); + sb.append(region.rootIndexSizeKB); + sb.append("\n totalStaticIndexSizeKB="); + sb.append(region.totalStaticIndexSizeKB); + sb.append("\n totalStaticBloomSizeKB="); + sb.append(region.totalStaticBloomSizeKB); + sb.append("\n totalCompactingKVs="); + sb.append(region.totalCompactingKVs); + sb.append("\n currentCompactedKVs="); + sb.append(region.currentCompactedKVs); + sb.append('\n'); + } + sb.append('\n'); + } + } + if (!deadNodes.isEmpty()) { + sb.append('\n'); + sb.append(deadNodes.size()); + sb.append(" dead servers\n"); + for (String node : deadNodes) { + sb.append(" "); + sb.append(node); + sb.append('\n'); + } + } + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder(); + builder.setRegions(regions); + builder.setRequests(requests); + builder.setAverageLoad(averageLoad); + for (Node node : liveNodes) { + StorageClusterStatus.Node.Builder nodeBuilder = + StorageClusterStatus.Node.newBuilder(); + nodeBuilder.setName(node.name); + nodeBuilder.setStartCode(node.startCode); + nodeBuilder.setRequests(node.requests); + nodeBuilder.setHeapSizeMB(node.heapSizeMB); + nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB); + for (Node.Region region : node.regions) { + StorageClusterStatus.Region.Builder regionBuilder = + StorageClusterStatus.Region.newBuilder(); + regionBuilder.setName(UnsafeByteOperations.unsafeWrap(region.name)); + regionBuilder.setStores(region.stores); + regionBuilder.setStorefiles(region.storefiles); + regionBuilder.setStorefileSizeMB(region.storefileSizeMB); + regionBuilder.setMemStoreSizeMB(region.memstoreSizeMB); + regionBuilder.setStorefileIndexSizeKB(region.storefileIndexSizeKB); + regionBuilder.setReadRequestsCount(region.readRequestsCount); + regionBuilder.setWriteRequestsCount(region.writeRequestsCount); + regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB); + regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB); + regionBuilder.setTotalStaticBloomSizeKB(region.totalStaticBloomSizeKB); + regionBuilder.setTotalCompactingKVs(region.totalCompactingKVs); + regionBuilder.setCurrentCompactedKVs(region.currentCompactedKVs); + nodeBuilder.addRegions(regionBuilder); + } + builder.addLiveNodes(nodeBuilder); + } + for (String node : deadNodes) { + builder.addDeadNodes(node); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { + StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + if (builder.hasRegions()) { + regions = builder.getRegions(); + } + if (builder.hasRequests()) { + requests = builder.getRequests(); + } + if (builder.hasAverageLoad()) { + averageLoad = builder.getAverageLoad(); + } + for (StorageClusterStatus.Node node : builder.getLiveNodesList()) { + long startCode = node.hasStartCode() ? node.getStartCode() : -1; + Node nodeModel = + addLiveNode(node.getName(), startCode, node.getHeapSizeMB(), + node.getMaxHeapSizeMB()); + long requests = node.hasRequests() ? node.getRequests() : 0; + nodeModel.setRequests(requests); + for (StorageClusterStatus.Region region : node.getRegionsList()) { + nodeModel.addRegion( + region.getName().toByteArray(), + region.getStores(), + region.getStorefiles(), + region.getStorefileSizeMB(), + region.getMemStoreSizeMB(), + region.getStorefileIndexSizeKB(), + region.getReadRequestsCount(), + region.getWriteRequestsCount(), + region.getRootIndexSizeKB(), + region.getTotalStaticIndexSizeKB(), + region.getTotalStaticBloomSizeKB(), + region.getTotalCompactingKVs(), + region.getCurrentCompactedKVs()); + } + } + for (String node : builder.getDeadNodesList()) { + addDeadNode(node); + } + return this; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java new file mode 100755 index 00000000..58409976 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterVersionModel.java @@ -0,0 +1,75 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.Serializable; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Simple representation of the version of the storage cluster + * + *
+ * <complexType name="StorageClusterVersion">
+ *   <attribute name="version" type="string"></attribute>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="ClusterVersion") +@InterfaceAudience.Private +public class StorageClusterVersionModel implements Serializable { + private static final long serialVersionUID = 1L; + + private String version; + + /** + * @return the storage cluster version + */ + @XmlAttribute(name="Version") + public String getVersion() { + return version; + } + + /** + * @param version the storage cluster version + */ + public void setVersion(String version) { + this.version = version; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return version; + } + + //needed for jackson deserialization + private static StorageClusterVersionModel valueOf(String value) { + StorageClusterVersionModel versionModel + = new StorageClusterVersionModel(); + versionModel.setVersion(value); + return versionModel; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java new file mode 100755 index 00000000..32006251 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableInfoModel.java @@ -0,0 +1,162 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableInfoMessage.TableInfo; + +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + +/** + * Representation of a list of table regions. + * + *
+ * <complexType name="TableInfo">
+ *   <sequence>
+ *     <element name="region" type="tns:TableRegion"
+ *       maxOccurs="unbounded" minOccurs="1"></element>
+ *   </sequence>
+ *   <attribute name="name" type="string"></attribute>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="TableInfo") +@InterfaceAudience.Private +public class TableInfoModel implements Serializable, ProtobufMessageHandler { + private static final long serialVersionUID = 1L; + + private String name; + private List regions = new ArrayList<>(); + + /** + * Default constructor + */ + public TableInfoModel() {} + + /** + * Constructor + * @param name + */ + public TableInfoModel(String name) { + this.name = name; + } + + /** + * Add a region model to the list + * @param region the region + */ + public void add(TableRegionModel region) { + regions.add(region); + } + + /** + * @param index the index + * @return the region model + */ + public TableRegionModel get(int index) { + return regions.get(index); + } + + /** + * @return the table name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @return the regions + */ + @XmlElement(name="Region") + public List getRegions() { + return regions; + } + + /** + * @param name the table name + */ + public void setName(String name) { + this.name = name; + } + + /** + * @param regions the regions to set + */ + public void setRegions(List regions) { + this.regions = regions; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for(TableRegionModel aRegion : regions) { + sb.append(aRegion.toString()); + sb.append('\n'); + } + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + TableInfo.Builder builder = TableInfo.newBuilder(); + builder.setName(name); + for (TableRegionModel aRegion: regions) { + TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder(); + regionBuilder.setName(aRegion.getName()); + regionBuilder.setId(aRegion.getId()); + regionBuilder.setStartKey(UnsafeByteOperations.unsafeWrap(aRegion.getStartKey())); + regionBuilder.setEndKey(UnsafeByteOperations.unsafeWrap(aRegion.getEndKey())); + regionBuilder.setLocation(aRegion.getLocation()); + builder.addRegions(regionBuilder); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + TableInfo.Builder builder = TableInfo.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + setName(builder.getName()); + for (TableInfo.Region region: builder.getRegionsList()) { + add(new TableRegionModel(builder.getName(), region.getId(), + region.getStartKey().toByteArray(), + region.getEndKey().toByteArray(), + region.getLocation())); + } + return this; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java new file mode 100755 index 00000000..8d3e1ab0 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableListModel.java @@ -0,0 +1,115 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import javax.xml.bind.annotation.XmlElementRef; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableListMessage.TableList; + +/** + * Simple representation of a list of table names. + */ +@XmlRootElement(name="TableList") +@InterfaceAudience.Private +public class TableListModel implements Serializable, ProtobufMessageHandler { + + private static final long serialVersionUID = 1L; + + private List tables = new ArrayList<>(); + + /** + * Default constructor + */ + public TableListModel() {} + + /** + * Add the table name model to the list + * @param table the table model + */ + public void add(TableModel table) { + tables.add(table); + } + + /** + * @param index the index + * @return the table model + */ + public TableModel get(int index) { + return tables.get(index); + } + + /** + * @return the tables + */ + @XmlElementRef(name="table") + public List getTables() { + return tables; + } + + /** + * @param tables the tables to set + */ + public void setTables(List tables) { + this.tables = tables; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + for(TableModel aTable : tables) { + sb.append(aTable.toString()); + sb.append('\n'); + } + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + TableList.Builder builder = TableList.newBuilder(); + for (TableModel aTable : tables) { + builder.addName(aTable.getName()); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + TableList.Builder builder = TableList.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + for (String table: builder.getNameList()) { + this.add(new TableModel(table)); + } + return this; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java new file mode 100755 index 00000000..4628263e --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableModel.java @@ -0,0 +1,84 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.Serializable; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Simple representation of a table name. + * + *
+ * <complexType name="Table">
+ *   <sequence>
+ *     <element name="name" type="string"></element>
+ *   </sequence>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="table") +@InterfaceAudience.Private +public class TableModel implements Serializable { + + private static final long serialVersionUID = 1L; + + private String name; + + /** + * Default constructor + */ + public TableModel() {} + + /** + * Constructor + * @param name + */ + public TableModel(String name) { + super(); + this.name = name; + } + + /** + * @return the name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @param name the name to set + */ + public void setName(String name) { + this.name = name; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return this.name; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java new file mode 100755 index 00000000..2ed5d9d5 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -0,0 +1,196 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.Serializable; + +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Representation of a region of a table and its current location on the + * storage cluster. + * + *
+ * <complexType name="TableRegion">
+ *   <attribute name="name" type="string"></attribute>
+ *   <attribute name="id" type="int"></attribute>
+ *   <attribute name="startKey" type="base64Binary"></attribute>
+ *   <attribute name="endKey" type="base64Binary"></attribute>
+ *   <attribute name="location" type="string"></attribute>
+ *  </complexType>
+ * 
+ */ +@XmlRootElement(name="Region") +@InterfaceAudience.Private +public class TableRegionModel implements Serializable { + + private static final long serialVersionUID = 1L; + + private String table; + private long id; + private byte[] startKey; + private byte[] endKey; + private String location; + + /** + * Constructor + */ + public TableRegionModel() {} + + /** + * Constructor + * @param table the table name + * @param id the encoded id of the region + * @param startKey the start key of the region + * @param endKey the end key of the region + */ + public TableRegionModel(String table, long id, byte[] startKey, + byte[] endKey) { + this(table, id, startKey, endKey, null); + } + + /** + * Constructor + * @param table the table name + * @param id the encoded id of the region + * @param startKey the start key of the region + * @param endKey the end key of the region + * @param location the name and port of the region server hosting the region + */ + public TableRegionModel(String table, long id, byte[] startKey, + byte[] endKey, String location) { + this.table = table; + this.id = id; + this.startKey = startKey; + this.endKey = endKey; + this.location = location; + } + + /** + * @return the region name + */ + @XmlAttribute + public String getName() { + byte [] tableNameAsBytes = Bytes.toBytes(this.table); + TableName tableName = TableName.valueOf(tableNameAsBytes); + byte [] nameAsBytes = HRegionInfo.createRegionName( + tableName, this.startKey, this.id, !tableName.isSystemTable()); + return Bytes.toString(nameAsBytes); + } + + /** + * @return the encoded region id + */ + @XmlAttribute + public long getId() { + return id; + } + + /** + * @return the start key + */ + @XmlAttribute + public byte[] getStartKey() { + return startKey; + } + + /** + * @return the end key + */ + @XmlAttribute + public byte[] getEndKey() { + return endKey; + } + + /** + * @return the name and port of the region server hosting the region + */ + @XmlAttribute + public String getLocation() { + return location; + } + + /** + * @param name region printable name + */ + public void setName(String name) { + String split[] = name.split(","); + this.table = split[0]; + this.startKey = Bytes.toBytes(split[1]); + String tail = split[2]; + split = tail.split("\\."); + id = Long.parseLong(split[0]); + } + + /** + * @param id the region's encoded id + */ + public void setId(long id) { + this.id = id; + } + + /** + * @param startKey the start key + */ + public void setStartKey(byte[] startKey) { + this.startKey = startKey; + } + + /** + * @param endKey the end key + */ + public void setEndKey(byte[] endKey) { + this.endKey = endKey; + } + + /** + * @param location the name and port of the region server hosting the region + */ + public void setLocation(String location) { + this.location = location; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(getName()); + sb.append(" [\n id="); + sb.append(id); + sb.append("\n startKey='"); + sb.append(Bytes.toString(startKey)); + sb.append("'\n endKey='"); + sb.append(Bytes.toString(endKey)); + if (location != null) { + sb.append("'\n location='"); + sb.append(location); + } + sb.append("'\n]\n"); + return sb.toString(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java new file mode 100755 index 00000000..3d60490f --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -0,0 +1,362 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonAnySetter; +import com.fasterxml.jackson.annotation.JsonIgnore; + +import java.io.IOException; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import javax.xml.bind.annotation.XmlAnyAttribute; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.namespace.QName; + +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.util.Bytes; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.TableSchemaMessage.TableSchema; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema; + +/** + * A representation of HBase table descriptors. + * + *
+ * <complexType name="TableSchema">
+ *   <sequence>
+ *     <element name="column" type="tns:ColumnSchema"
+ *       maxOccurs="unbounded" minOccurs="1"></element>
+ *   </sequence>
+ *   <attribute name="name" type="string"></attribute>
+ *   <anyAttribute></anyAttribute>
+ * </complexType>
+ * 
+ */ +@XmlRootElement(name="TableSchema") +@InterfaceAudience.Private +public class TableSchemaModel implements Serializable, ProtobufMessageHandler { + private static final long serialVersionUID = 1L; + private static final QName IS_META = new QName(HTableDescriptor.IS_META); + private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT); + private static final QName READONLY = new QName(HTableDescriptor.READONLY); + private static final QName TTL = new QName(HColumnDescriptor.TTL); + private static final QName VERSIONS = new QName(HConstants.VERSIONS); + private static final QName COMPRESSION = + new QName(HColumnDescriptor.COMPRESSION); + + private String name; + private Map attrs = new LinkedHashMap<>(); + private List columns = new ArrayList<>(); + + /** + * Default constructor. + */ + public TableSchemaModel() {} + + /** + * Constructor + * @param tableDescriptor the table descriptor + */ + public TableSchemaModel(TableDescriptor tableDescriptor) { + setName(tableDescriptor.getTableName().getNameAsString()); + for (Map.Entry e : tableDescriptor.getValues().entrySet()) { + addAttribute(Bytes.toString(e.getKey().get()), + Bytes.toString(e.getValue().get())); + } + for (ColumnFamilyDescriptor hcd : tableDescriptor.getColumnFamilies()) { + ColumnSchemaModel columnModel = new ColumnSchemaModel(); + columnModel.setName(hcd.getNameAsString()); + for (Map.Entry e: + hcd.getValues().entrySet()) { + columnModel.addAttribute(Bytes.toString(e.getKey().get()), + Bytes.toString(e.getValue().get())); + } + addColumnFamily(columnModel); + } + } + + /** + * Add an attribute to the table descriptor + * @param name attribute name + * @param value attribute value + */ + @JsonAnySetter + public void addAttribute(String name, Object value) { + attrs.put(new QName(name), value); + } + + /** + * Return a table descriptor value as a string. Calls toString() on the + * object stored in the descriptor value map. + * @param name the attribute name + * @return the attribute value + */ + public String getAttribute(String name) { + Object o = attrs.get(new QName(name)); + return o != null ? o.toString() : null; + } + + /** + * Add a column family to the table descriptor + * @param family the column family model + */ + public void addColumnFamily(ColumnSchemaModel family) { + columns.add(family); + } + + /** + * Retrieve the column family at the given index from the table descriptor + * @param index the index + * @return the column family model + */ + public ColumnSchemaModel getColumnFamily(int index) { + return columns.get(index); + } + + /** + * @return the table name + */ + @XmlAttribute + public String getName() { + return name; + } + + /** + * @return the map for holding unspecified (user) attributes + */ + @XmlAnyAttribute + @JsonAnyGetter + public Map getAny() { + return attrs; + } + + /** + * @return the columns + */ + @XmlElement(name="ColumnSchema") + public List getColumns() { + return columns; + } + + /** + * @param name the table name + */ + public void setName(String name) { + this.name = name; + } + + /** + * @param columns the columns to set + */ + public void setColumns(List columns) { + this.columns = columns; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{ NAME=> '"); + sb.append(name); + sb.append('\''); + for (Map.Entry e : attrs.entrySet()) { + sb.append(", "); + sb.append(e.getKey().getLocalPart()); + sb.append(" => '"); + sb.append(e.getValue().toString()); + sb.append('\''); + } + sb.append(", COLUMNS => [ "); + Iterator i = columns.iterator(); + while (i.hasNext()) { + ColumnSchemaModel family = i.next(); + sb.append(family.toString()); + if (i.hasNext()) { + sb.append(','); + } + sb.append(' '); + } + sb.append("] }"); + return sb.toString(); + } + + // getters and setters for common schema attributes + + // cannot be standard bean type getters and setters, otherwise this would + // confuse JAXB + + /** + * @return true if IS_META attribute exists and is truel + */ + public boolean __getIsMeta() { + Object o = attrs.get(IS_META); + return o != null && Boolean.parseBoolean(o.toString()); + } + + /** + * @return true if IS_ROOT attribute exists and is truel + */ + public boolean __getIsRoot() { + Object o = attrs.get(IS_ROOT); + return o != null && Boolean.parseBoolean(o.toString()); + } + + /** + * @return true if READONLY attribute exists and is truel + */ + public boolean __getReadOnly() { + Object o = attrs.get(READONLY); + return o != null ? Boolean.parseBoolean(o.toString()) : HTableDescriptor.DEFAULT_READONLY; + } + + /** + * @param value desired value of IS_META attribute + */ + public void __setIsMeta(boolean value) { + attrs.put(IS_META, Boolean.toString(value)); + } + + /** + * @param value desired value of IS_ROOT attribute + */ + public void __setIsRoot(boolean value) { + attrs.put(IS_ROOT, Boolean.toString(value)); + } + + /** + * @param value desired value of READONLY attribute + */ + public void __setReadOnly(boolean value) { + attrs.put(READONLY, Boolean.toString(value)); + } + + @Override + public byte[] createProtobufOutput() { + TableSchema.Builder builder = TableSchema.newBuilder(); + builder.setName(name); + for (Map.Entry e : attrs.entrySet()) { + TableSchema.Attribute.Builder attrBuilder = + TableSchema.Attribute.newBuilder(); + attrBuilder.setName(e.getKey().getLocalPart()); + attrBuilder.setValue(e.getValue().toString()); + builder.addAttrs(attrBuilder); + } + for (ColumnSchemaModel family : columns) { + Map familyAttrs = family.getAny(); + ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder(); + familyBuilder.setName(family.getName()); + for (Map.Entry e : familyAttrs.entrySet()) { + ColumnSchema.Attribute.Builder attrBuilder = + ColumnSchema.Attribute.newBuilder(); + attrBuilder.setName(e.getKey().getLocalPart()); + attrBuilder.setValue(e.getValue().toString()); + familyBuilder.addAttrs(attrBuilder); + } + if (familyAttrs.containsKey(TTL)) { + familyBuilder.setTtl(Integer.parseInt(familyAttrs.get(TTL).toString())); + } + if (familyAttrs.containsKey(VERSIONS)) { + familyBuilder.setMaxVersions(Integer.parseInt(familyAttrs.get(VERSIONS).toString())); + } + if (familyAttrs.containsKey(COMPRESSION)) { + familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString()); + } + builder.addColumns(familyBuilder); + } + if (attrs.containsKey(READONLY)) { + builder.setReadOnly(Boolean.parseBoolean(attrs.get(READONLY).toString())); + } + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + TableSchema.Builder builder = TableSchema.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + this.setName(builder.getName()); + for (TableSchema.Attribute attr : builder.getAttrsList()) { + this.addAttribute(attr.getName(), attr.getValue()); + } + if (builder.hasReadOnly()) { + this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly()); + } + for (ColumnSchema family : builder.getColumnsList()) { + ColumnSchemaModel familyModel = new ColumnSchemaModel(); + familyModel.setName(family.getName()); + for (ColumnSchema.Attribute attr : family.getAttrsList()) { + familyModel.addAttribute(attr.getName(), attr.getValue()); + } + if (family.hasTtl()) { + familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl()); + } + if (family.hasMaxVersions()) { + familyModel.addAttribute(HConstants.VERSIONS, + family.getMaxVersions()); + } + if (family.hasCompression()) { + familyModel.addAttribute(HColumnDescriptor.COMPRESSION, + family.getCompression()); + } + this.addColumnFamily(familyModel); + } + return this; + } + + /** + * @return a table descriptor + */ + @JsonIgnore + public TableDescriptor getTableDescriptor() { + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(getName())); + for (Map.Entry e : getAny().entrySet()) { + tableDescriptorBuilder.setValue(e.getKey().getLocalPart(), e.getValue().toString()); + } + for (ColumnSchemaModel column : getColumns()) { + ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(column.getName())); + for (Map.Entry e : column.getAny().entrySet()) { + cfdb.setValue(e.getKey().getLocalPart(), e.getValue().toString()); + } + tableDescriptorBuilder.setColumnFamily(cfdb.build()); + } + return tableDescriptorBuilder.build(); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java new file mode 100755 index 00000000..0db793ac --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/VersionModel.java @@ -0,0 +1,211 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.model; + +import java.io.IOException; +import java.io.Serializable; + +import javax.servlet.ServletContext; +import javax.xml.bind.annotation.XmlAttribute; +import javax.xml.bind.annotation.XmlRootElement; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.RESTServlet; +import org.glassfish.jersey.servlet.ServletContainer; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.rest.protobuf.generated.VersionMessage.Version; + +/** + * A representation of the collection of versions of the REST gateway software + * components. + *
    + *
  • restVersion: REST gateway revision
  • + *
  • jvmVersion: the JVM vendor and version information
  • + *
  • osVersion: the OS type, version, and hardware architecture
  • + *
  • serverVersion: the name and version of the servlet container
  • + *
  • jerseyVersion: the version of the embedded Jersey framework
  • + *
+ */ +@XmlRootElement(name="Version") +@InterfaceAudience.Private +public class VersionModel implements Serializable, ProtobufMessageHandler { + + private static final long serialVersionUID = 1L; + + private String restVersion; + private String jvmVersion; + private String osVersion; + private String serverVersion; + private String jerseyVersion; + + /** + * Default constructor. Do not use. + */ + public VersionModel() {} + + /** + * Constructor + * @param context the servlet context + */ + public VersionModel(ServletContext context) { + restVersion = RESTServlet.VERSION_STRING; + jvmVersion = System.getProperty("java.vm.vendor") + ' ' + + System.getProperty("java.version") + '-' + + System.getProperty("java.vm.version"); + osVersion = System.getProperty("os.name") + ' ' + + System.getProperty("os.version") + ' ' + + System.getProperty("os.arch"); + serverVersion = context.getServerInfo(); + jerseyVersion = ServletContainer.class.getPackage().getImplementationVersion(); + // Currently, this will always be null because the manifest doesn't have any useful information + if (jerseyVersion == null) jerseyVersion = ""; + } + + /** + * @return the REST gateway version + */ + @XmlAttribute(name="REST") + public String getRESTVersion() { + return restVersion; + } + + /** + * @return the JVM vendor and version + */ + @XmlAttribute(name="JVM") + public String getJVMVersion() { + return jvmVersion; + } + + /** + * @return the OS name, version, and hardware architecture + */ + @XmlAttribute(name="OS") + public String getOSVersion() { + return osVersion; + } + + /** + * @return the servlet container version + */ + @XmlAttribute(name="Server") + public String getServerVersion() { + return serverVersion; + } + + /** + * @return the version of the embedded Jersey framework + */ + @XmlAttribute(name="Jersey") + public String getJerseyVersion() { + return jerseyVersion; + } + + /** + * @param version the REST gateway version string + */ + public void setRESTVersion(String version) { + this.restVersion = version; + } + + /** + * @param version the OS version string + */ + public void setOSVersion(String version) { + this.osVersion = version; + } + + /** + * @param version the JVM version string + */ + public void setJVMVersion(String version) { + this.jvmVersion = version; + } + + /** + * @param version the servlet container version string + */ + public void setServerVersion(String version) { + this.serverVersion = version; + } + + /** + * @param version the Jersey framework version string + */ + public void setJerseyVersion(String version) { + this.jerseyVersion = version; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("rest "); + sb.append(restVersion); + sb.append(" [JVM: "); + sb.append(jvmVersion); + sb.append("] [OS: "); + sb.append(osVersion); + sb.append("] [Server: "); + sb.append(serverVersion); + sb.append("] [Jersey: "); + sb.append(jerseyVersion); + sb.append("]\n"); + return sb.toString(); + } + + @Override + public byte[] createProtobufOutput() { + Version.Builder builder = Version.newBuilder(); + builder.setRestVersion(restVersion); + builder.setJvmVersion(jvmVersion); + builder.setOsVersion(osVersion); + builder.setServerVersion(serverVersion); + builder.setJerseyVersion(jerseyVersion); + return builder.build().toByteArray(); + } + + @Override + public ProtobufMessageHandler getObjectFromMessage(byte[] message) + throws IOException { + Version.Builder builder = Version.newBuilder(); + ProtobufUtil.mergeFrom(builder, message); + if (builder.hasRestVersion()) { + restVersion = builder.getRestVersion(); + } + if (builder.hasJvmVersion()) { + jvmVersion = builder.getJvmVersion(); + } + if (builder.hasOsVersion()) { + osVersion = builder.getOsVersion(); + } + if (builder.hasServerVersion()) { + serverVersion = builder.getServerVersion(); + } + if (builder.hasJerseyVersion()) { + jerseyVersion = builder.getJerseyVersion(); + } + return this; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html new file mode 100755 index 00000000..1129023b --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/package.html @@ -0,0 +1,31 @@ + + + + + + + +

HBase REST

+This package provides a RESTful Web service front end for HBase. +

+The documentation that used to live in this file has moved to the HBase Reference Guide. +

+ + + diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java new file mode 100755 index 00000000..fda2b7de --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/JAXBContextResolver.java @@ -0,0 +1,89 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.provider; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; + +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel; +import org.apache.hadoop.hbase.rest.model.NamespacesInstanceModel; +import org.apache.hadoop.hbase.rest.model.NamespacesModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; +import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; +import org.apache.hadoop.hbase.rest.model.TableInfoModel; +import org.apache.hadoop.hbase.rest.model.TableListModel; +import org.apache.hadoop.hbase.rest.model.TableModel; +import org.apache.hadoop.hbase.rest.model.TableRegionModel; +import org.apache.hadoop.hbase.rest.model.TableSchemaModel; +import org.apache.hadoop.hbase.rest.model.VersionModel; + +/** + * Plumbing for hooking up Jersey's JSON entity body encoding and decoding + * support to JAXB. Modify how the context is created (by using e.g. a + * different configuration builder) to control how JSON is processed and + * created. + */ +@Provider +@InterfaceAudience.Private +public class JAXBContextResolver implements ContextResolver { + + private final JAXBContext context; + + private final Set> types; + + private final Class[] cTypes = { + CellModel.class, + CellSetModel.class, + ColumnSchemaModel.class, + NamespacesModel.class, + NamespacesInstanceModel.class, + RowModel.class, + ScannerModel.class, + StorageClusterStatusModel.class, + StorageClusterVersionModel.class, + TableInfoModel.class, + TableListModel.class, + TableModel.class, + TableRegionModel.class, + TableSchemaModel.class, + VersionModel.class + }; + + @SuppressWarnings("unchecked") + public JAXBContextResolver() throws Exception { + this.types = new HashSet(Arrays.asList(cTypes)); + context = JAXBContext.newInstance(cTypes); + } + + @Override + public JAXBContext getContext(Class objectType) { + return (types.contains(objectType)) ? context : null; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java new file mode 100755 index 00000000..9990f3fe --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java @@ -0,0 +1,88 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.provider.consumer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.lang.annotation.Annotation; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Type; + +import javax.ws.rs.Consumes; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.MessageBodyReader; +import javax.ws.rs.ext.Provider; + +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.rest.Constants; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; + +/** + * Adapter for hooking up Jersey content processing dispatch to + * ProtobufMessageHandler interface capable handlers for decoding protobuf input. + */ +@Provider +@Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@InterfaceAudience.Private +public class ProtobufMessageBodyConsumer + implements MessageBodyReader { + private static final Logger LOG = + LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class); + + @Override + public boolean isReadable(Class type, Type genericType, + Annotation[] annotations, MediaType mediaType) { + return ProtobufMessageHandler.class.isAssignableFrom(type); + } + + @Override + public ProtobufMessageHandler readFrom(Class type, Type genericType, + Annotation[] annotations, MediaType mediaType, + MultivaluedMap httpHeaders, InputStream inputStream) + throws IOException, WebApplicationException { + ProtobufMessageHandler obj = null; + try { + obj = type.getDeclaredConstructor().newInstance(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + byte[] buffer = new byte[4096]; + int read; + do { + read = inputStream.read(buffer, 0, buffer.length); + if (read > 0) { + baos.write(buffer, 0, read); + } + } while (read > 0); + if (LOG.isTraceEnabled()) { + LOG.trace(getClass() + ": read " + baos.size() + " bytes from " + + inputStream); + } + obj = obj.getObjectFromMessage(baos.toByteArray()); + } catch (InstantiationException | NoSuchMethodException | InvocationTargetException + | IllegalAccessException e) { + throw new WebApplicationException(e); + } + return obj; + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java new file mode 100755 index 00000000..0a606f60 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/PlainTextMessageBodyProducer.java @@ -0,0 +1,70 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.provider.producer; + +import java.io.IOException; +import java.io.OutputStream; +import java.lang.annotation.Annotation; +import java.lang.reflect.Type; + +import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.MessageBodyWriter; +import javax.ws.rs.ext.Provider; + +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.rest.Constants; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * An adapter between Jersey and Object.toString(). Hooks up plain text output + * to the Jersey content handling framework. + * Jersey will first call getSize() to learn the number of bytes that will be + * sent, then writeTo to perform the actual I/O. + */ +@Provider +@Produces(Constants.MIMETYPE_TEXT) +@InterfaceAudience.Private +public class PlainTextMessageBodyProducer + implements MessageBodyWriter { + + @Override + public boolean isWriteable(Class arg0, Type arg1, Annotation[] arg2, + MediaType arg3) { + return true; + } + + @Override + public long getSize(Object object, Class type, Type genericType, + Annotation[] annotations, MediaType mediaType) { + // deprecated by JAX-RS 2.0 and ignored by Jersey runtime + return -1; + } + + @Override + public void writeTo(Object object, Class type, Type genericType, + Annotation[] annotations, MediaType mediaType, + MultivaluedMap httpHeaders, OutputStream outStream) + throws IOException, WebApplicationException { + outStream.write(Bytes.toBytes(object.toString())); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java new file mode 100755 index 00000000..800c48b3 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/producer/ProtobufMessageBodyProducer.java @@ -0,0 +1,70 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.provider.producer; + +import java.io.IOException; +import java.io.OutputStream; +import java.lang.annotation.Annotation; +import java.lang.reflect.Type; + +import javax.ws.rs.Produces; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.ext.MessageBodyWriter; +import javax.ws.rs.ext.Provider; + +import org.apache.hadoop.hbase.rest.Constants; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up + * protobuf output producing methods to the Jersey content handling framework. + * Jersey will first call getSize() to learn the number of bytes that will be + * sent, then writeTo to perform the actual I/O. + */ +@Provider +@Produces({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF}) +@InterfaceAudience.Private +public class ProtobufMessageBodyProducer + implements MessageBodyWriter { + + @Override + public boolean isWriteable(Class type, Type genericType, + Annotation[] annotations, MediaType mediaType) { + return ProtobufMessageHandler.class.isAssignableFrom(type); + } + + @Override + public long getSize(ProtobufMessageHandler m, Class type, Type genericType, + Annotation[] annotations, MediaType mediaType) { + // deprecated by JAX-RS 2.0 and ignored by Jersey runtime + return -1; + } + + @Override + public void writeTo(ProtobufMessageHandler m, Class type, Type genericType, + Annotation[] annotations, MediaType mediaType, + MultivaluedMap httpHeaders, OutputStream entityStream) + throws IOException, WebApplicationException { + entityStream.write(m.createProtobufOutput()); + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/ConnectionCache.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/ConnectionCache.java new file mode 100755 index 00000000..cbd58746 --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/ConnectionCache.java @@ -0,0 +1,245 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.util; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.ScheduledChore; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.KeyLocker; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; + +/** + * A utility to store user specific HConnections in memory. + * There is a chore to clean up connections idle for too long. + * This class is used by REST server and Thrift server to + * support authentication and impersonation. + */ +@InterfaceAudience.Private +public class ConnectionCache { + private static final Logger LOG = LoggerFactory.getLogger(ConnectionCache.class); + + private final Map connections = new ConcurrentHashMap<>(); + private final KeyLocker locker = new KeyLocker<>(); + private final String realUserName; + private final UserGroupInformation realUser; + private final UserProvider userProvider; + private final Configuration conf; + private final ChoreService choreService; + + private final ThreadLocal effectiveUserNames = + new ThreadLocal() { + @Override + protected String initialValue() { + return realUserName; + } + }; + + public ConnectionCache(final Configuration conf, + final UserProvider userProvider, + final int cleanInterval, final int maxIdleTime) throws IOException { + Stoppable stoppable = new Stoppable() { + private volatile boolean isStopped = false; + @Override public void stop(String why) { isStopped = true;} + @Override public boolean isStopped() {return isStopped;} + }; + this.choreService = new ChoreService("ConnectionCache"); + ScheduledChore cleaner = new ScheduledChore("ConnectionCleaner", stoppable, cleanInterval) { + @Override + protected void chore() { + for (Map.Entry entry: connections.entrySet()) { + ConnectionInfo connInfo = entry.getValue(); + if (connInfo.timedOut(maxIdleTime)) { + if (connInfo.admin != null) { + try { + connInfo.admin.close(); + } catch (Throwable t) { + LOG.info("Got exception in closing idle admin", t); + } + } + try { + connInfo.connection.close(); + } catch (Throwable t) { + LOG.info("Got exception in closing idle connection", t); + } + } + } + } + }; + // Start the daemon cleaner chore + choreService.scheduleChore(cleaner); + this.realUser = userProvider.getCurrent().getUGI(); + this.realUserName = realUser.getShortUserName(); + this.userProvider = userProvider; + this.conf = conf; + } + + /** + * Set the current thread local effective user + */ + public void setEffectiveUser(String user) { + effectiveUserNames.set(user); + } + + /** + * Get the current thread local effective user + */ + public String getEffectiveUser() { + return effectiveUserNames.get(); + } + + /** + * Called when cache is no longer needed so that it can perform cleanup operations + */ + public void shutdown() { + if (choreService != null) choreService.shutdown(); + } + + /** + * Caller doesn't close the admin afterwards. + * We need to manage it and close it properly. + */ + public Admin getAdmin() throws IOException { + ConnectionInfo connInfo = getCurrentConnection(); + if (connInfo.admin == null) { + Lock lock = locker.acquireLock(getEffectiveUser()); + try { + if (connInfo.admin == null) { + connInfo.admin = connInfo.connection.getAdmin(); + } + } finally { + lock.unlock(); + } + } + return connInfo.admin; + } + + /** + * Caller closes the table afterwards. + */ + public Table getTable(String tableName) throws IOException { + ConnectionInfo connInfo = getCurrentConnection(); + return connInfo.connection.getTable(TableName.valueOf(tableName)); + } + + /** + * Retrieve a regionLocator for the table. The user should close the RegionLocator. + */ + public RegionLocator getRegionLocator(byte[] tableName) throws IOException { + return getCurrentConnection().connection.getRegionLocator(TableName.valueOf(tableName)); + } + + /** + * Get the cached connection for the current user. + * If none or timed out, create a new one. + */ + ConnectionInfo getCurrentConnection() throws IOException { + String userName = getEffectiveUser(); + ConnectionInfo connInfo = connections.get(userName); + if (connInfo == null || !connInfo.updateAccessTime()) { + Lock lock = locker.acquireLock(userName); + try { + connInfo = connections.get(userName); + if (connInfo == null) { + UserGroupInformation ugi = realUser; + if (!userName.equals(realUserName)) { + ugi = UserGroupInformation.createProxyUser(userName, realUser); + } + User user = userProvider.create(ugi); + Connection conn = ConnectionFactory.createConnection(conf, user); + connInfo = new ConnectionInfo(conn, userName); + connections.put(userName, connInfo); + } + } finally { + lock.unlock(); + } + } + return connInfo; + } + + /** + * Updates the access time for the current connection. Used to keep Connections alive for + * long-lived scanners. + * @return whether we successfully updated the last access time + */ + public boolean updateConnectionAccessTime() { + String userName = getEffectiveUser(); + ConnectionInfo connInfo = connections.get(userName); + if (connInfo != null) { + return connInfo.updateAccessTime(); + } + return false; + } + + class ConnectionInfo { + final Connection connection; + final String userName; + + volatile Admin admin; + private long lastAccessTime; + private boolean closed; + + ConnectionInfo(Connection conn, String user) { + lastAccessTime = EnvironmentEdgeManager.currentTime(); + connection = conn; + closed = false; + userName = user; + } + + synchronized boolean updateAccessTime() { + if (closed) { + return false; + } + if (connection.isAborted() || connection.isClosed()) { + LOG.info("Unexpected: cached Connection is aborted/closed, removed from cache"); + connections.remove(userName); + return false; + } + lastAccessTime = EnvironmentEdgeManager.currentTime(); + return true; + } + + synchronized boolean timedOut(int maxIdleTime) { + long timeoutTime = lastAccessTime + maxIdleTime; + if (EnvironmentEdgeManager.currentTime() > timeoutTime) { + connections.remove(userName); + closed = true; + return true; + } + return false; + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/JSONBean.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/JSONBean.java new file mode 100755 index 00000000..d28641fd --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/JSONBean.java @@ -0,0 +1,364 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.util; + +import org.apache.hadoop.hbase.util.GsonUtil; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.management.AttributeNotFoundException; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.ReflectionException; +import javax.management.RuntimeErrorException; +import javax.management.RuntimeMBeanException; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.TabularData; +import java.io.Closeable; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.PrintWriter; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Array; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.Set; + +/** + * Utility for doing JSON and MBeans. + */ +@InterfaceAudience.Private +public class JSONBean { + private static final Logger LOG = LoggerFactory.getLogger(JSONBean.class); + private static final Gson GSON = GsonUtil.createGson().create(); + + /** + * Use dumping out mbeans as JSON. + */ + public interface Writer extends Closeable { + + void write(String key, String value) throws IOException; + + int write(MBeanServer mBeanServer, ObjectName qry, String attribute, boolean description) + throws IOException; + + void flush() throws IOException; + } + + /** + * Notice that, closing the return {@link Writer} will not close the {@code writer} passed in, you + * still need to close the {@code writer} by yourself. + *

+ * This is because that, we can only finish the json after you call {@link Writer#close()}. So if + * we just close the {@code writer}, you can write nothing after finished the json. + */ + public Writer open(final PrintWriter writer) throws IOException { + JsonWriter jsonWriter = GSON.newJsonWriter(new java.io.Writer() { + + @Override + public void write(char[] cbuf, int off, int len) throws IOException { + writer.write(cbuf, off, len); + } + + @Override + public void flush() throws IOException { + writer.flush(); + } + + @Override + public void close() throws IOException { + // do nothing + } + }); + jsonWriter.setIndent(" "); + jsonWriter.beginObject(); + return new Writer() { + @Override + public void flush() throws IOException { + jsonWriter.flush(); + } + + @Override + public void close() throws IOException { + jsonWriter.endObject(); + jsonWriter.close(); + } + + @Override + public void write(String key, String value) throws IOException { + jsonWriter.name(key).value(value); + } + + @Override + public int write(MBeanServer mBeanServer, ObjectName qry, String attribute, + boolean description) throws IOException { + return JSONBean.write(jsonWriter, mBeanServer, qry, attribute, description); + } + }; + } + + /** + * @return Return non-zero if failed to find bean. 0 + */ + private static int write(JsonWriter writer, MBeanServer mBeanServer, ObjectName qry, + String attribute, boolean description) throws IOException { + LOG.trace("Listing beans for " + qry); + Set names = null; + names = mBeanServer.queryNames(qry, null); + writer.name("beans").beginArray(); + Iterator it = names.iterator(); + while (it.hasNext()) { + ObjectName oname = it.next(); + MBeanInfo minfo; + String code = ""; + String descriptionStr = null; + Object attributeinfo = null; + try { + minfo = mBeanServer.getMBeanInfo(oname); + code = minfo.getClassName(); + if (description) { + descriptionStr = minfo.getDescription(); + } + String prs = ""; + try { + if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) { + prs = "modelerType"; + code = (String) mBeanServer.getAttribute(oname, prs); + } + if (attribute != null) { + prs = attribute; + attributeinfo = mBeanServer.getAttribute(oname, prs); + } + } catch (RuntimeMBeanException e) { + // UnsupportedOperationExceptions happen in the normal course of business, + // so no need to log them as errors all the time. + if (e.getCause() instanceof UnsupportedOperationException) { + if (LOG.isTraceEnabled()) { + LOG.trace("Getting attribute " + prs + " of " + oname + " threw " + e); + } + } else { + LOG.error("Getting attribute " + prs + " of " + oname + " threw an exception", e); + } + return 0; + } catch (AttributeNotFoundException e) { + // If the modelerType attribute was not found, the class name is used + // instead. + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); + } catch (MBeanException e) { + // The code inside the attribute getter threw an exception so log it, + // and fall back on the class name + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); + } catch (RuntimeException e) { + // For some reason even with an MBeanException available to them + // Runtime exceptionscan still find their way through, so treat them + // the same as MBeanException + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); + } catch (ReflectionException e) { + // This happens when the code inside the JMX bean (setter?? from the + // java docs) threw an exception, so log it and fall back on the + // class name + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", e); + } + } catch (InstanceNotFoundException e) { + // Ignored for some reason the bean was not found so don't output it + continue; + } catch (IntrospectionException e) { + // This is an internal error, something odd happened with reflection so + // log it and don't output the bean. + LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, e); + continue; + } catch (ReflectionException e) { + // This happens when the code inside the JMX bean threw an exception, so + // log it and don't output the bean. + LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, e); + continue; + } + writer.beginObject(); + writer.name("name").value(oname.toString()); + if (description && descriptionStr != null && descriptionStr.length() > 0) { + writer.name("description").value(descriptionStr); + } + writer.name("modelerType").value(code); + if (attribute != null && attributeinfo == null) { + writer.name("result").value("ERROR"); + writer.name("message").value("No attribute with name " + attribute + " was found."); + writer.endObject(); + writer.endArray(); + writer.close(); + return -1; + } + + if (attribute != null) { + writeAttribute(writer, attribute, descriptionStr, attributeinfo); + } else { + MBeanAttributeInfo[] attrs = minfo.getAttributes(); + for (int i = 0; i < attrs.length; i++) { + writeAttribute(writer, mBeanServer, oname, description, attrs[i]); + } + } + writer.endObject(); + } + writer.endArray(); + return 0; + } + + private static void writeAttribute(JsonWriter writer, MBeanServer mBeanServer, ObjectName oname, + boolean description, MBeanAttributeInfo attr) throws IOException { + if (!attr.isReadable()) { + return; + } + String attName = attr.getName(); + if ("modelerType".equals(attName)) { + return; + } + if (attName.indexOf("=") >= 0 || attName.indexOf(":") >= 0 || attName.indexOf(" ") >= 0) { + return; + } + String descriptionStr = description ? attr.getDescription() : null; + Object value = null; + try { + value = mBeanServer.getAttribute(oname, attName); + } catch (RuntimeMBeanException e) { + // UnsupportedOperationExceptions happen in the normal course of business, + // so no need to log them as errors all the time. + if (e.getCause() instanceof UnsupportedOperationException) { + if (LOG.isTraceEnabled()) { + LOG.trace("Getting attribute " + attName + " of " + oname + " threw " + e); + } + } else { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", e); + } + return; + } catch (RuntimeErrorException e) { + // RuntimeErrorException happens when an unexpected failure occurs in getAttribute + // for example https://issues.apache.org/jira/browse/DAEMON-120 + LOG.debug("getting attribute " + attName + " of " + oname + " threw an exception", e); + return; + } catch (AttributeNotFoundException e) { + // Ignored the attribute was not found, which should never happen because the bean + // just told us that it has this attribute, but if this happens just don't output + // the attribute. + return; + } catch (MBeanException e) { + // The code inside the attribute getter threw an exception so log it, and + // skip outputting the attribute + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", e); + return; + } catch (RuntimeException e) { + // For some reason even with an MBeanException available to them Runtime exceptions + // can still find their way through, so treat them the same as MBeanException + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", e); + return; + } catch (ReflectionException e) { + // This happens when the code inside the JMX bean (setter?? from the java docs) + // threw an exception, so log it and skip outputting the attribute + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", e); + return; + } catch (InstanceNotFoundException e) { + // Ignored the mbean itself was not found, which should never happen because we + // just accessed it (perhaps something unregistered in-between) but if this + // happens just don't output the attribute. + return; + } + + writeAttribute(writer, attName, descriptionStr, value); + } + + private static void writeAttribute(JsonWriter writer, String attName, String descriptionStr, + Object value) throws IOException { + if (descriptionStr != null && descriptionStr.length() > 0 && !attName.equals(descriptionStr)) { + writer.name(attName); + writer.beginObject(); + writer.name("description").value(descriptionStr); + writer.name("value"); + writeObject(writer, value); + writer.endObject(); + } else { + writer.name(attName); + writeObject(writer, value); + } + } + + private static void writeObject(JsonWriter writer, Object value) throws IOException { + if (value == null) { + writer.nullValue(); + } else { + Class c = value.getClass(); + if (c.isArray()) { + writer.beginArray(); + int len = Array.getLength(value); + for (int j = 0; j < len; j++) { + Object item = Array.get(value, j); + writeObject(writer, item); + } + writer.endArray(); + } else if (value instanceof Number) { + Number n = (Number) value; + if (Double.isFinite(n.doubleValue())) { + writer.value(n); + } else { + writer.value(n.toString()); + } + } else if (value instanceof Boolean) { + Boolean b = (Boolean) value; + writer.value(b); + } else if (value instanceof CompositeData) { + CompositeData cds = (CompositeData) value; + CompositeType comp = cds.getCompositeType(); + Set keys = comp.keySet(); + writer.beginObject(); + for (String key : keys) { + writeAttribute(writer, key, null, cds.get(key)); + } + writer.endObject(); + } else if (value instanceof TabularData) { + TabularData tds = (TabularData) value; + writer.beginArray(); + for (Object entry : tds.values()) { + writeObject(writer, entry); + } + writer.endArray(); + } else { + writer.value(value.toString()); + } + } + } + + /** + * Dump out all registered mbeans as json on System.out. + */ + public static void dumpAllBeans() throws IOException, MalformedObjectNameException { + try (PrintWriter writer = + new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8))) { + JSONBean dumper = new JSONBean(); + try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) { + MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); + jsonBeanWriter.write(mbeanServer, new ObjectName("*:*"), null, false); + } + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/JvmPauseMonitor.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/JvmPauseMonitor.java new file mode 100755 index 00000000..4c95d43c --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/JvmPauseMonitor.java @@ -0,0 +1,223 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.util; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.metrics.JvmPauseMonitorSource; +import org.apache.hbase.thirdparty.com.google.common.base.Joiner; +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * Class which sets up a simple thread which runs in a loop sleeping + * for a short interval of time. If the sleep takes significantly longer + * than its target time, it implies that the JVM or host machine has + * paused processing, which may cause other problems. If such a pause is + * detected, the thread logs a message. + * The original JvmPauseMonitor is: + * ${hadoop-common-project}/hadoop-common/src/main/java/org/apache/hadoop/util/ + * JvmPauseMonitor.java + * r1503806 | cmccabe | 2013-07-17 01:48:24 +0800 (Wed, 17 Jul 2013) | 1 line + * HADOOP-9618. thread which detects GC pauses(Todd Lipcon) + */ +@InterfaceAudience.Private +public class JvmPauseMonitor { + private static final Logger LOG = LoggerFactory.getLogger(JvmPauseMonitor.class); + + /** The target sleep time */ + private static final long SLEEP_INTERVAL_MS = 500; + + /** log WARN if we detect a pause longer than this threshold */ + private final long warnThresholdMs; + public static final String WARN_THRESHOLD_KEY = + "jvm.pause.warn-threshold.ms"; + private static final long WARN_THRESHOLD_DEFAULT = 10000; + + /** log INFO if we detect a pause longer than this threshold */ + private final long infoThresholdMs; + public static final String INFO_THRESHOLD_KEY = + "jvm.pause.info-threshold.ms"; + private static final long INFO_THRESHOLD_DEFAULT = 1000; + + private Thread monitorThread; + private volatile boolean shouldRun = true; + private JvmPauseMonitorSource metricsSource; + + public JvmPauseMonitor(Configuration conf) { + this(conf, null); + } + + public JvmPauseMonitor(Configuration conf, JvmPauseMonitorSource metricsSource) { + this.warnThresholdMs = conf.getLong(WARN_THRESHOLD_KEY, WARN_THRESHOLD_DEFAULT); + this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, INFO_THRESHOLD_DEFAULT); + this.metricsSource = metricsSource; + } + + public void start() { + Preconditions.checkState(monitorThread == null, "Already started"); + monitorThread = new Thread(new Monitor(), "JvmPauseMonitor"); + monitorThread.setDaemon(true); + monitorThread.start(); + } + + public void stop() { + shouldRun = false; + monitorThread.interrupt(); + try { + monitorThread.join(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + private String formatMessage(long extraSleepTime, List gcDiffs) { + String ret = "Detected pause in JVM or host machine (eg GC): " + "pause of approximately " + + extraSleepTime + "ms\n"; + if (gcDiffs.isEmpty()) { + ret += "No GCs detected"; + } else { + ret += Joiner.on("\n").join(gcDiffs); + } + return ret; + } + + private Map getGcTimes() { + Map map = Maps.newHashMap(); + List gcBeans = ManagementFactory.getGarbageCollectorMXBeans(); + for (GarbageCollectorMXBean gcBean : gcBeans) { + map.put(gcBean.getName(), new GcTimes(gcBean)); + } + return map; + } + + private static class GcTimes { + private GcTimes(GarbageCollectorMXBean gcBean) { + gcCount = gcBean.getCollectionCount(); + gcTimeMillis = gcBean.getCollectionTime(); + } + + private GcTimes(long count, long time) { + this.gcCount = count; + this.gcTimeMillis = time; + } + + private GcTimes subtract(GcTimes other) { + return new GcTimes(this.gcCount - other.gcCount, this.gcTimeMillis - other.gcTimeMillis); + } + + @Override + public String toString() { + return "count=" + gcCount + " time=" + gcTimeMillis + "ms"; + } + + private long gcCount; + private long gcTimeMillis; + } + + private class Monitor implements Runnable { + @Override + public void run() { + Stopwatch sw = Stopwatch.createUnstarted(); + Map gcTimesBeforeSleep = getGcTimes(); + while (shouldRun) { + sw.reset().start(); + try { + Thread.sleep(SLEEP_INTERVAL_MS); + } catch (InterruptedException ie) { + return; + } + + long extraSleepTime = sw.elapsed(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS; + Map gcTimesAfterSleep = getGcTimes(); + + if (extraSleepTime > infoThresholdMs) { + Set gcBeanNames = Sets.intersection(gcTimesAfterSleep.keySet(), + gcTimesBeforeSleep.keySet()); + List gcDiffs = Lists.newArrayList(); + for (String name : gcBeanNames) { + GcTimes diff = gcTimesAfterSleep.get(name).subtract(gcTimesBeforeSleep.get(name)); + if (diff.gcCount != 0) { + gcDiffs.add("GC pool '" + name + "' had collection(s): " + diff.toString()); + } + } + + updateMetrics(extraSleepTime, !gcDiffs.isEmpty()); + + if (extraSleepTime > warnThresholdMs) { + LOG.warn(formatMessage(extraSleepTime, gcDiffs)); + } else { + LOG.info(formatMessage(extraSleepTime, gcDiffs)); + } + } + gcTimesBeforeSleep = gcTimesAfterSleep; + } + } + } + + public void updateMetrics(long sleepTime, boolean gcDetected) { + if (metricsSource != null) { + if (sleepTime > warnThresholdMs) { + metricsSource.incWarnThresholdExceeded(1); + } else { + metricsSource.incInfoThresholdExceeded(1); + } + if (gcDetected) { + metricsSource.updatePauseTimeWithGc(sleepTime); + } else { + metricsSource.updatePauseTimeWithoutGc(sleepTime); + } + } + } + + public JvmPauseMonitorSource getMetricsSource() { + return metricsSource; + } + + public void setMetricsSource(JvmPauseMonitorSource metricsSource) { + this.metricsSource = metricsSource; + } + + /** + * Simple 'main' to facilitate manual testing of the pause monitor. + * + * This main function just leaks memory into a list. Running this class + * with a 1GB heap will very quickly go into "GC hell" and result in + * log messages about the GC pauses. + */ + public static void main(String []args) throws Exception { + new JvmPauseMonitor(new Configuration()).start(); + List list = Lists.newArrayList(); + int i = 0; + while (true) { + list.add(String.valueOf(i++)); + } + } +} diff --git a/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/ProcessUtils.java b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/ProcessUtils.java new file mode 100755 index 00000000..f166672a --- /dev/null +++ b/rest/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/util/ProcessUtils.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.util; + +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.List; + +/** + * Process related utilities. + */ +@InterfaceAudience.Private +public final class ProcessUtils { + private static Logger LOG = LoggerFactory.getLogger(ProcessUtils.class); + + private ProcessUtils() { } + + public static Integer getPid() { + // JVM_PID is exported by bin/hbase run script + String pidStr = System.getenv("JVM_PID"); + + // in case if it is not set correctly used fallback from mxbean which is implementation specific + if (pidStr == null || pidStr.trim().isEmpty()) { + String name = ManagementFactory.getRuntimeMXBean().getName(); + if (name != null) { + int idx = name.indexOf("@"); + if (idx != -1) { + pidStr = name.substring(0, name.indexOf("@")); + } + } + } + try { + if (pidStr != null) { + return Integer.valueOf(pidStr); + } + } catch (NumberFormatException nfe) { + // ignore + } + return null; + } + + public static Process runCmdAsync(List cmd) { + try { + LOG.info("Running command async: " + cmd); + return new ProcessBuilder(cmd).inheritIO().start(); + } catch (IOException ex) { + throw new IllegalStateException(ex); + } + } +} diff --git a/rest/hbase-rest/src/main/resources/hbase-webapps/rest/index.html b/rest/hbase-rest/src/main/resources/hbase-webapps/rest/index.html new file mode 100755 index 00000000..e4084b7c --- /dev/null +++ b/rest/hbase-rest/src/main/resources/hbase-webapps/rest/index.html @@ -0,0 +1,20 @@ + + diff --git a/rest/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp b/rest/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp new file mode 100755 index 00000000..ed4e9c28 --- /dev/null +++ b/rest/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp @@ -0,0 +1,116 @@ +<%-- +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +--%> +<%@ page contentType="text/html;charset=UTF-8" + import="org.apache.hadoop.conf.Configuration" + import="org.apache.hadoop.hbase.HBaseConfiguration" + import="org.apache.hadoop.hbase.util.VersionInfo" + import="java.util.Date"%> +<% +Configuration conf = (Configuration)getServletContext().getAttribute("hbase.conf"); +long startcode = conf.getLong("startcode", System.currentTimeMillis()); +String listenPort = conf.get("hbase.rest.port", "8080"); +%> + + + + + + HBase REST Server: <%= listenPort %> + + + + + + + + + +

+ +
+
+ +
+
+ +
+

Software Attributes

+ + + + + + + + + + + + + + + + + + + + + +
Attribute NameValueDescription
HBase Version<%= VersionInfo.getVersion() %>, revision=<%= VersionInfo.getRevision() %>HBase version and revision
HBase Compiled<%= VersionInfo.getDate() %>, <%= VersionInfo.getUser() %>When HBase version was compiled and by whom
REST Server Start Time<%= new Date(startcode) %>Date stamp of when this REST server was started
+
+
+ +
+ + + + + + diff --git a/rest/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd b/rest/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd new file mode 100755 index 00000000..53945f34 --- /dev/null +++ b/rest/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd @@ -0,0 +1,209 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java new file mode 100755 index 00000000..5af8ee2b --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/DummyFilter.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.IOException; + +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DummyFilter implements Filter { + private static final Logger LOG = LoggerFactory.getLogger(DummyFilter.class); + + @Override + public void destroy() { + } + + @Override + public void doFilter(ServletRequest paramServletRequest, ServletResponse paramServletResponse, + FilterChain paramFilterChain) throws IOException, ServletException { + if (paramServletRequest instanceof HttpServletRequest + && paramServletResponse instanceof HttpServletResponse) { + HttpServletRequest request = (HttpServletRequest) paramServletRequest; + HttpServletResponse response = (HttpServletResponse) paramServletResponse; + + String path = request.getRequestURI(); + LOG.info(path); + if (path.indexOf("/status/cluster") >= 0) { + LOG.info("Blocking cluster status request"); + response.sendError(HttpServletResponse.SC_NOT_FOUND, "Cluster status cannot be requested."); + } else { + paramFilterChain.doFilter(request, response); + } + } + } + + @Override + public void init(FilterConfig filterChain) throws ServletException { + } + +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java new file mode 100755 index 00000000..00b28c75 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/HBaseRESTTestingUtility.java @@ -0,0 +1,64 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.util.StringUtils; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class HBaseRESTTestingUtility { + private static final Logger LOG = LoggerFactory.getLogger(HBaseRESTTestingUtility.class); + + private RESTServer server; + + public int getServletPort() { + return server.getPort(); + } + + public void startServletContainer(Configuration conf) throws Exception { + if (server != null) { + LOG.error("RESTServer already running"); + return; + } + + conf.setInt("hbase.rest.port", 0); + conf.setInt("hbase.rest.info.port", -1); + conf.setBoolean(RESTServer.SKIP_LOGIN_KEY, true); + + server = new RESTServer(conf); + server.run(); + + LOG.info("started " + server.getClass().getName() + " on port " + + server.getPort()); + } + + public void shutdownServletContainer() { + if (server != null) { + try { + server.stop(); + server = null; + RESTServlet.stop(); + } catch (Exception e) { + LOG.warn(StringUtils.stringifyException(e)); + } + } + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java new file mode 100755 index 00000000..b895c4d8 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -0,0 +1,1522 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.PrintStream; +import java.lang.reflect.Constructor; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.TreeMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ArrayBackedTag; +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.PageFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.filter.WhileMatchFilter; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.RemoteAdmin; +import org.apache.hadoop.hbase.util.ByteArrayHashKey; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Hash; +import org.apache.hadoop.hbase.util.MurmurHash; +import org.apache.hadoop.hbase.util.Pair; + +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.mapreduce.RecordReader; +import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; +import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer; +import org.apache.hadoop.util.LineReader; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Script used evaluating Stargate performance and scalability. Runs a SG + * client that steps through one of a set of hardcoded tests or 'experiments' + * (e.g. a random reads test, a random writes test, etc.). Pass on the + * command-line which test to run and how many clients are participating in + * this experiment. Run java PerformanceEvaluation --help to + * obtain usage. + * + *

This class sets up and runs the evaluation programs described in + * Section 7, Performance Evaluation, of the Bigtable + * paper, pages 8-10. + * + *

If number of clients > 1, we start up a MapReduce job. Each map task + * runs an individual client. Each client does about 1GB of data. + */ +public class PerformanceEvaluation extends Configured implements Tool { + protected static final Logger LOG = + LoggerFactory.getLogger(PerformanceEvaluation.class); + + private static final int DEFAULT_ROW_PREFIX_LENGTH = 16; + private static final int ROW_LENGTH = 1000; + private static final int TAG_LENGTH = 256; + private static final int ONE_GB = 1024 * 1024 * 1000; + private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH; + + public static final TableName TABLE_NAME = TableName.valueOf("TestTable"); + public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); + public static final byte[] QUALIFIER_NAME = Bytes.toBytes("data"); + private TableName tableName = TABLE_NAME; + + protected TableDescriptorBuilder.ModifyableTableDescriptor TABLE_DESCRIPTOR; + protected Map commands = new TreeMap<>(); + protected static Cluster cluster = new Cluster(); + + volatile Configuration conf; + private boolean nomapred = false; + private int N = 1; + private int R = ROWS_PER_GB; + private Compression.Algorithm compression = Compression.Algorithm.NONE; + private DataBlockEncoding blockEncoding = DataBlockEncoding.NONE; + private boolean flushCommits = true; + private boolean writeToWAL = true; + private boolean inMemoryCF = false; + private int presplitRegions = 0; + private boolean useTags = false; + private int noOfTags = 1; + private Connection connection; + + private static final Path PERF_EVAL_DIR = new Path("performance_evaluation"); + + /** + * Regex to parse lines in input file passed to mapreduce task. + */ + public static final Pattern LINE_PATTERN = + Pattern.compile("tableName=(\\w+),\\s+" + + "startRow=(\\d+),\\s+" + + "perClientRunRows=(\\d+),\\s+" + + "totalRows=(\\d+),\\s+" + + "clients=(\\d+),\\s+" + + "flushCommits=(\\w+),\\s+" + + "writeToWAL=(\\w+),\\s+" + + "useTags=(\\w+),\\s+" + + "noOfTags=(\\d+)"); + + /** + * Enum for map metrics. Keep it out here rather than inside in the Map + * inner-class so we can find associated properties. + */ + protected enum Counter { + /** elapsed time */ + ELAPSED_TIME, + /** number of rows */ + ROWS + } + + /** + * Constructor + * @param c Configuration object + */ + public PerformanceEvaluation(final Configuration c) { + this.conf = c; + + addCommandDescriptor(RandomReadTest.class, "randomRead", + "Run random read test"); + addCommandDescriptor(RandomSeekScanTest.class, "randomSeekScan", + "Run random seek and scan 100 test"); + addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", + "Run random seek scan with both start and stop row (max 10 rows)"); + addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100", + "Run random seek scan with both start and stop row (max 100 rows)"); + addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000", + "Run random seek scan with both start and stop row (max 1000 rows)"); + addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", + "Run random seek scan with both start and stop row (max 10000 rows)"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", + "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", + "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", + "Run sequential write test"); + addCommandDescriptor(ScanTest.class, "scan", + "Run scan test (read every row)"); + addCommandDescriptor(FilteredScanTest.class, "filterScan", + "Run scan test using a filter to find a specific row based " + + "on it's value (make sure to use --rows=20)"); + } + + protected void addCommandDescriptor(Class cmdClass, + String name, String description) { + CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); + commands.put(name, cmdDescriptor); + } + + /** + * Implementations can have their status set. + */ + interface Status { + /** + * Sets status + * @param msg status message + * @throws IOException if setting the status fails + */ + void setStatus(final String msg) throws IOException; + } + + /** + * This class works as the InputSplit of Performance Evaluation + * MapReduce InputFormat, and the Record Value of RecordReader. + * Each map task will only read one record from a PeInputSplit, + * the record value is the PeInputSplit itself. + */ + public static class PeInputSplit extends InputSplit implements Writable { + private TableName tableName; + private int startRow; + private int rows; + private int totalRows; + private int clients; + private boolean flushCommits; + private boolean writeToWAL; + private boolean useTags; + private int noOfTags; + + public PeInputSplit(TableName tableName, int startRow, int rows, int totalRows, int clients, + boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags) { + this.tableName = tableName; + this.startRow = startRow; + this.rows = rows; + this.totalRows = totalRows; + this.clients = clients; + this.flushCommits = flushCommits; + this.writeToWAL = writeToWAL; + this.useTags = useTags; + this.noOfTags = noOfTags; + } + + @Override + public void readFields(DataInput in) throws IOException { + int tableNameLen = in.readInt(); + byte[] name = new byte[tableNameLen]; + in.readFully(name); + this.tableName = TableName.valueOf(name); + this.startRow = in.readInt(); + this.rows = in.readInt(); + this.totalRows = in.readInt(); + this.clients = in.readInt(); + this.flushCommits = in.readBoolean(); + this.writeToWAL = in.readBoolean(); + this.useTags = in.readBoolean(); + this.noOfTags = in.readInt(); + } + + @Override + public void write(DataOutput out) throws IOException { + byte[] name = this.tableName.toBytes(); + out.writeInt(name.length); + out.write(name); + out.writeInt(startRow); + out.writeInt(rows); + out.writeInt(totalRows); + out.writeInt(clients); + out.writeBoolean(flushCommits); + out.writeBoolean(writeToWAL); + out.writeBoolean(useTags); + out.writeInt(noOfTags); + } + + @Override + public long getLength() { + return 0; + } + + @Override + public String[] getLocations() { + return new String[0]; + } + + public int getStartRow() { + return startRow; + } + + public TableName getTableName() { + return tableName; + } + + public int getRows() { + return rows; + } + + public int getTotalRows() { + return totalRows; + } + + public boolean isFlushCommits() { + return flushCommits; + } + + public boolean isWriteToWAL() { + return writeToWAL; + } + + public boolean isUseTags() { + return useTags; + } + + public int getNoOfTags() { + return noOfTags; + } + } + + /** + * InputFormat of Performance Evaluation MapReduce job. + * It extends from FileInputFormat, want to use it's methods such as setInputPaths(). + */ + public static class PeInputFormat extends FileInputFormat { + @Override + public List getSplits(JobContext job) throws IOException { + // generate splits + List splitList = new ArrayList<>(); + + for (FileStatus file : listStatus(job)) { + if (file.isDirectory()) { + continue; + } + Path path = file.getPath(); + FileSystem fs = path.getFileSystem(job.getConfiguration()); + FSDataInputStream fileIn = fs.open(path); + LineReader in = new LineReader(fileIn, job.getConfiguration()); + int lineLen; + while (true) { + Text lineText = new Text(); + lineLen = in.readLine(lineText); + if (lineLen <= 0) { + break; + } + Matcher m = LINE_PATTERN.matcher(lineText.toString()); + if ((m != null) && m.matches()) { + TableName tableName = TableName.valueOf(m.group(1)); + int startRow = Integer.parseInt(m.group(2)); + int rows = Integer.parseInt(m.group(3)); + int totalRows = Integer.parseInt(m.group(4)); + int clients = Integer.parseInt(m.group(5)); + boolean flushCommits = Boolean.parseBoolean(m.group(6)); + boolean writeToWAL = Boolean.parseBoolean(m.group(7)); + boolean useTags = Boolean.parseBoolean(m.group(8)); + int noOfTags = Integer.parseInt(m.group(9)); + + LOG.debug("tableName=" + tableName + + " split["+ splitList.size() + "] " + + " startRow=" + startRow + + " rows=" + rows + + " totalRows=" + totalRows + + " clients=" + clients + + " flushCommits=" + flushCommits + + " writeToWAL=" + writeToWAL + + " useTags=" + useTags + + " noOfTags=" + noOfTags); + + PeInputSplit newSplit = + new PeInputSplit(tableName, startRow, rows, totalRows, clients, + flushCommits, writeToWAL, useTags, noOfTags); + splitList.add(newSplit); + } + } + in.close(); + } + + LOG.info("Total # of splits: " + splitList.size()); + return splitList; + } + + @Override + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) { + return new PeRecordReader(); + } + + public static class PeRecordReader extends RecordReader { + private boolean readOver = false; + private PeInputSplit split = null; + private NullWritable key = null; + private PeInputSplit value = null; + + @Override + public void initialize(InputSplit split, TaskAttemptContext context) { + this.readOver = false; + this.split = (PeInputSplit)split; + } + + @Override + public boolean nextKeyValue() { + if (readOver) { + return false; + } + + key = NullWritable.get(); + value = split; + + readOver = true; + return true; + } + + @Override + public NullWritable getCurrentKey() { + return key; + } + + @Override + public PeInputSplit getCurrentValue() { + return value; + } + + @Override + public float getProgress() { + if (readOver) { + return 1.0f; + } else { + return 0.0f; + } + } + + @Override + public void close() { + // do nothing + } + } + } + + /** + * MapReduce job that runs a performance evaluation client in each map task. + */ + public static class EvaluationMapTask + extends Mapper { + + /** configuration parameter name that contains the command */ + public final static String CMD_KEY = "EvaluationMapTask.command"; + /** configuration parameter name that contains the PE impl */ + public static final String PE_KEY = "EvaluationMapTask.performanceEvalImpl"; + + private Class cmd; + private PerformanceEvaluation pe; + + @Override + protected void setup(Context context) { + this.cmd = forName(context.getConfiguration().get(CMD_KEY), Test.class); + + // this is required so that extensions of PE are instantiated within the + // map reduce task... + Class peClass = + forName(context.getConfiguration().get(PE_KEY), PerformanceEvaluation.class); + try { + this.pe = peClass.getConstructor(Configuration.class) + .newInstance(context.getConfiguration()); + } catch (Exception e) { + throw new IllegalStateException("Could not instantiate PE instance", e); + } + } + + private Class forName(String className, Class type) { + Class clazz; + try { + clazz = Class.forName(className).asSubclass(type); + } catch (ClassNotFoundException e) { + throw new IllegalStateException("Could not find class for name: " + className, e); + } + return clazz; + } + + @Override + protected void map(NullWritable key, PeInputSplit value, final Context context) + throws IOException, InterruptedException { + Status status = context::setStatus; + + // Evaluation task + pe.tableName = value.getTableName(); + long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(), + value.getRows(), value.getTotalRows(), + value.isFlushCommits(), value.isWriteToWAL(), + value.isUseTags(), value.getNoOfTags(), + ConnectionFactory.createConnection(context.getConfiguration()), status); + // Collect how much time the thing took. Report as map output and + // to the ELAPSED_TIME counter. + context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime); + context.getCounter(Counter.ROWS).increment(value.rows); + context.write(new LongWritable(value.startRow), new LongWritable(elapsedTime)); + context.progress(); + } + } + + /** + * If table does not already exist, create. + * @param admin Client to use checking. + * @return True if we created the table. + * @throws IOException if an operation on the table fails + */ + private boolean checkTable(RemoteAdmin admin) throws IOException { + TableDescriptor tableDescriptor = getDescriptor(); + if (this.presplitRegions > 0) { + // presplit requested + if (admin.isTableAvailable(tableDescriptor.getTableName().getName())) { + admin.deleteTable(tableDescriptor.getTableName().getName()); + } + + byte[][] splits = getSplits(); + for (int i=0; i < splits.length; i++) { + LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i])); + } + admin.createTable(tableDescriptor); + LOG.info("Table created with " + this.presplitRegions + " splits"); + } else { + boolean tableExists = admin.isTableAvailable(tableDescriptor.getTableName().getName()); + if (!tableExists) { + admin.createTable(tableDescriptor); + LOG.info("Table " + tableDescriptor + " created"); + } + } + + return admin.isTableAvailable(tableDescriptor.getTableName().getName()); + } + + protected TableDescriptor getDescriptor() { + if (TABLE_DESCRIPTOR == null) { + TABLE_DESCRIPTOR = new TableDescriptorBuilder.ModifyableTableDescriptor(tableName); + ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor = + new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILY_NAME); + familyDescriptor.setDataBlockEncoding(blockEncoding); + familyDescriptor.setCompressionType(compression); + if (inMemoryCF) { + familyDescriptor.setInMemory(true); + } + TABLE_DESCRIPTOR.setColumnFamily(familyDescriptor); + } + return TABLE_DESCRIPTOR; + } + + /** + * Generates splits based on total number of rows and specified split regions + * + * @return splits : array of byte [] + */ + protected byte[][] getSplits() { + if (this.presplitRegions == 0) { + return new byte[0][]; + } + + int numSplitPoints = presplitRegions - 1; + byte[][] splits = new byte[numSplitPoints][]; + int jump = this.R / this.presplitRegions; + for (int i = 0; i < numSplitPoints; i++) { + int rowkey = jump * (1 + i); + splits[i] = format(rowkey); + } + return splits; + } + + /** + * We're to run multiple clients concurrently. Setup a mapreduce job. Run + * one map per client. Then run a single reduce to sum the elapsed times. + * @param cmd Command to run. + */ + private void runNIsMoreThanOne(final Class cmd) + throws IOException, InterruptedException, ClassNotFoundException { + RemoteAdmin remoteAdmin = new RemoteAdmin(new Client(cluster), getConf()); + checkTable(remoteAdmin); + if (nomapred) { + doMultipleClients(cmd); + } else { + doMapReduce(cmd); + } + } + + /** + * Run all clients in this vm each to its own thread. + * @param cmd Command to run + * @throws IOException if creating a connection fails + */ + private void doMultipleClients(final Class cmd) throws IOException { + final List threads = new ArrayList<>(this.N); + final long[] timings = new long[this.N]; + final int perClientRows = R/N; + final TableName tableName = this.tableName; + final DataBlockEncoding encoding = this.blockEncoding; + final boolean flushCommits = this.flushCommits; + final Compression.Algorithm compression = this.compression; + final boolean writeToWal = this.writeToWAL; + final int preSplitRegions = this.presplitRegions; + final boolean useTags = this.useTags; + final int numTags = this.noOfTags; + final Connection connection = ConnectionFactory.createConnection(getConf()); + for (int i = 0; i < this.N; i++) { + final int index = i; + Thread t = new Thread("TestClient-" + i) { + @Override + public void run() { + super.run(); + PerformanceEvaluation pe = new PerformanceEvaluation(getConf()); + pe.tableName = tableName; + pe.blockEncoding = encoding; + pe.flushCommits = flushCommits; + pe.compression = compression; + pe.writeToWAL = writeToWal; + pe.presplitRegions = preSplitRegions; + pe.N = N; + pe.connection = connection; + pe.useTags = useTags; + pe.noOfTags = numTags; + try { + long elapsedTime = pe.runOneClient(cmd, index * perClientRows, + perClientRows, R, + flushCommits, writeToWAL, useTags, noOfTags, connection, + msg -> LOG.info("client-" + getName() + " " + msg)); + timings[index] = elapsedTime; + LOG.info("Finished " + getName() + " in " + elapsedTime + + "ms writing " + perClientRows + " rows"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + }; + threads.add(t); + } + for (Thread t : threads) { + t.start(); + } + for (Thread t : threads) { + while (t.isAlive()) { + try { + t.join(); + } catch (InterruptedException e) { + LOG.debug("Interrupted, continuing" + e.toString()); + } + } + } + final String test = cmd.getSimpleName(); + LOG.info("[" + test + "] Summary of timings (ms): " + + Arrays.toString(timings)); + Arrays.sort(timings); + long total = 0; + for (int i = 0; i < this.N; i++) { + total += timings[i]; + } + LOG.info("[" + test + "]" + + "\tMin: " + timings[0] + "ms" + + "\tMax: " + timings[this.N - 1] + "ms" + + "\tAvg: " + (total / this.N) + "ms"); + } + + /** + * Run a mapreduce job. Run as many maps as asked-for clients. + * Before we start up the job, write out an input file with instruction + * per client regards which row they are to start on. + * @param cmd Command to run. + */ + private void doMapReduce(final Class cmd) + throws IOException, InterruptedException, ClassNotFoundException { + Configuration conf = getConf(); + Path inputDir = writeInputFile(conf); + conf.set(EvaluationMapTask.CMD_KEY, cmd.getName()); + conf.set(EvaluationMapTask.PE_KEY, getClass().getName()); + Job job = Job.getInstance(conf); + job.setJarByClass(PerformanceEvaluation.class); + job.setJobName("HBase Performance Evaluation"); + + job.setInputFormatClass(PeInputFormat.class); + PeInputFormat.setInputPaths(job, inputDir); + + job.setOutputKeyClass(LongWritable.class); + job.setOutputValueClass(LongWritable.class); + + job.setMapperClass(EvaluationMapTask.class); + job.setReducerClass(LongSumReducer.class); + job.setNumReduceTasks(1); + + job.setOutputFormatClass(TextOutputFormat.class); + TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs")); + TableMapReduceUtil.addDependencyJars(job); + TableMapReduceUtil.initCredentials(job); + job.waitForCompletion(true); + } + + /** + * Write input file of offsets-per-client for the mapreduce job. + * @param c Configuration + * @return Directory that contains file written. + * @throws IOException if creating the directory or the file fails + */ + private Path writeInputFile(final Configuration c) throws IOException { + SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss"); + Path jobdir = new Path(PERF_EVAL_DIR, formatter.format(new Date())); + Path inputDir = new Path(jobdir, "inputs"); + + FileSystem fs = FileSystem.get(c); + fs.mkdirs(inputDir); + Path inputFile = new Path(inputDir, "input.txt"); + // Make input random. + try (PrintStream out = new PrintStream(fs.create(inputFile))) { + Map m = new TreeMap<>(); + Hash h = MurmurHash.getInstance(); + int perClientRows = (this.R / this.N); + for (int i = 0; i < 10; i++) { + for (int j = 0; j < N; j++) { + StringBuilder s = new StringBuilder(); + s.append("tableName=").append(tableName); + s.append(", startRow=").append((j * perClientRows) + (i * (perClientRows / 10))); + s.append(", perClientRunRows=").append(perClientRows / 10); + s.append(", totalRows=").append(R); + s.append(", clients=").append(N); + s.append(", flushCommits=").append(flushCommits); + s.append(", writeToWAL=").append(writeToWAL); + s.append(", useTags=").append(useTags); + s.append(", noOfTags=").append(noOfTags); + + byte[] b = Bytes.toBytes(s.toString()); + int hash = h.hash(new ByteArrayHashKey(b, 0, b.length), -1); + m.put(hash, s.toString()); + } + } + for (Map.Entry e : m.entrySet()) { + out.println(e.getValue()); + } + } + return inputDir; + } + + /** + * Describes a command. + */ + static class CmdDescriptor { + private Class cmdClass; + private String name; + private String description; + + CmdDescriptor(Class cmdClass, String name, String description) { + this.cmdClass = cmdClass; + this.name = name; + this.description = description; + } + + public Class getCmdClass() { + return cmdClass; + } + + public String getName() { + return name; + } + + public String getDescription() { + return description; + } + } + + /** + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation} tests + * This makes the reflection logic a little easier to understand... + */ + static class TestOptions { + private int startRow; + private int perClientRunRows; + private int totalRows; + private TableName tableName; + private boolean flushCommits; + private boolean writeToWAL; + private boolean useTags; + private int noOfTags; + private Connection connection; + + TestOptions(int startRow, int perClientRunRows, int totalRows, TableName tableName, + boolean flushCommits, boolean writeToWAL, boolean useTags, + int noOfTags, Connection connection) { + this.startRow = startRow; + this.perClientRunRows = perClientRunRows; + this.totalRows = totalRows; + this.tableName = tableName; + this.flushCommits = flushCommits; + this.writeToWAL = writeToWAL; + this.useTags = useTags; + this.noOfTags = noOfTags; + this.connection = connection; + } + + public int getStartRow() { + return startRow; + } + + public int getPerClientRunRows() { + return perClientRunRows; + } + + public int getTotalRows() { + return totalRows; + } + + public TableName getTableName() { + return tableName; + } + + public boolean isFlushCommits() { + return flushCommits; + } + + public boolean isWriteToWAL() { + return writeToWAL; + } + + public Connection getConnection() { + return connection; + } + + public boolean isUseTags() { + return this.useTags; + } + + public int getNumTags() { + return this.noOfTags; + } + } + + /* + * A test. + * Subclass to particularize what happens per row. + */ + static abstract class Test { + // Below is make it so when Tests are all running in the one + // jvm, that they each have a differently seeded Random. + private static final Random randomSeed = + new Random(System.currentTimeMillis()); + private static long nextRandomSeed() { + return randomSeed.nextLong(); + } + protected final Random rand = new Random(nextRandomSeed()); + + protected final int startRow; + protected final int perClientRunRows; + protected final int totalRows; + private final Status status; + protected TableName tableName; + protected volatile Configuration conf; + protected boolean writeToWAL; + protected boolean useTags; + protected int noOfTags; + protected Connection connection; + + /** + * Note that all subclasses of this class must provide a public contructor + * that has the exact same list of arguments. + */ + Test(final Configuration conf, final TestOptions options, final Status status) { + super(); + this.startRow = options.getStartRow(); + this.perClientRunRows = options.getPerClientRunRows(); + this.totalRows = options.getTotalRows(); + this.status = status; + this.tableName = options.getTableName(); + this.conf = conf; + this.writeToWAL = options.isWriteToWAL(); + this.useTags = options.isUseTags(); + this.noOfTags = options.getNumTags(); + this.connection = options.getConnection(); + } + + protected String generateStatus(final int sr, final int i, final int lr) { + return sr + "/" + i + "/" + lr; + } + + protected int getReportingPeriod() { + int period = this.perClientRunRows / 10; + return period == 0? this.perClientRunRows: period; + } + + abstract void testTakedown() throws IOException; + + /** + * Run test + * @return Elapsed time. + * @throws IOException if something in the test fails + */ + long test() throws IOException { + testSetup(); + LOG.info("Timed test starting in thread " + Thread.currentThread().getName()); + final long startTime = System.nanoTime(); + try { + testTimed(); + } finally { + testTakedown(); + } + return (System.nanoTime() - startTime) / 1000000; + } + + abstract void testSetup() throws IOException; + + /** + * Provides an extension point for tests that don't want a per row invocation. + */ + void testTimed() throws IOException { + int lastRow = this.startRow + this.perClientRunRows; + // Report on completion of 1/10th of total. + for (int i = this.startRow; i < lastRow; i++) { + testRow(i); + if (status != null && i > 0 && (i % getReportingPeriod()) == 0) { + status.setStatus(generateStatus(this.startRow, i, lastRow)); + } + } + } + + /** + * Test for individual row. + * @param i Row index. + */ + abstract void testRow(final int i) throws IOException; + } + + static abstract class TableTest extends Test { + protected Table table; + + public TableTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testSetup() throws IOException { + this.table = connection.getTable(tableName); + } + + @Override + void testTakedown() throws IOException { + table.close(); + } + } + + static abstract class BufferedMutatorTest extends Test { + protected BufferedMutator mutator; + protected boolean flushCommits; + + public BufferedMutatorTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + this.flushCommits = options.isFlushCommits(); + } + + @Override + void testSetup() throws IOException { + this.mutator = connection.getBufferedMutator(tableName); + } + + @Override + void testTakedown() throws IOException { + if (flushCommits) { + this.mutator.flush(); + } + mutator.close(); + } + } + + static class RandomSeekScanTest extends TableTest { + RandomSeekScanTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testRow(final int i) throws IOException { + Scan scan = new Scan().withStartRow(getRandomRow(this.rand, this.totalRows)); + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + scan.setFilter(new WhileMatchFilter(new PageFilter(120))); + ResultScanner s = this.table.getScanner(scan); + s.close(); + } + + @Override + protected int getReportingPeriod() { + int period = this.perClientRunRows / 100; + return period == 0? this.perClientRunRows: period; + } + } + + @SuppressWarnings("unused") + static abstract class RandomScanWithRangeTest extends TableTest { + RandomScanWithRangeTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testRow(final int i) throws IOException { + Pair startAndStopRow = getStartAndStopRow(); + Scan scan = new Scan().withStartRow(startAndStopRow.getFirst()) + .withStopRow(startAndStopRow.getSecond()); + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + ResultScanner s = this.table.getScanner(scan); + int count = 0; + for (Result rr = null; (rr = s.next()) != null;) { + count++; + } + + if (i % 100 == 0) { + LOG.info(String.format("Scan for key range %s - %s returned %s rows", + Bytes.toString(startAndStopRow.getFirst()), + Bytes.toString(startAndStopRow.getSecond()), count)); + } + + s.close(); + } + + protected abstract Pair getStartAndStopRow(); + + protected Pair generateStartAndStopRows(int maxRange) { + int start = this.rand.nextInt(Integer.MAX_VALUE) % totalRows; + int stop = start + maxRange; + return new Pair<>(format(start), format(stop)); + } + + @Override + protected int getReportingPeriod() { + int period = this.perClientRunRows / 100; + return period == 0? this.perClientRunRows: period; + } + } + + static class RandomScanWithRange10Test extends RandomScanWithRangeTest { + RandomScanWithRange10Test(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + protected Pair getStartAndStopRow() { + return generateStartAndStopRows(10); + } + } + + static class RandomScanWithRange100Test extends RandomScanWithRangeTest { + RandomScanWithRange100Test(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + protected Pair getStartAndStopRow() { + return generateStartAndStopRows(100); + } + } + + static class RandomScanWithRange1000Test extends RandomScanWithRangeTest { + RandomScanWithRange1000Test(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + protected Pair getStartAndStopRow() { + return generateStartAndStopRows(1000); + } + } + + static class RandomScanWithRange10000Test extends RandomScanWithRangeTest { + RandomScanWithRange10000Test(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + protected Pair getStartAndStopRow() { + return generateStartAndStopRows(10000); + } + } + + static class RandomReadTest extends TableTest { + RandomReadTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testRow(final int i) throws IOException { + Get get = new Get(getRandomRow(this.rand, this.totalRows)); + get.addColumn(FAMILY_NAME, QUALIFIER_NAME); + this.table.get(get); + } + + @Override + protected int getReportingPeriod() { + int period = this.perClientRunRows / 100; + return period == 0? this.perClientRunRows: period; + } + } + + static class RandomWriteTest extends BufferedMutatorTest { + RandomWriteTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testRow(final int i) throws IOException { + byte[] row = getRandomRow(this.rand, this.totalRows); + Put put = new Put(row); + byte[] value = generateData(this.rand, ROW_LENGTH); + if (useTags) { + byte[] tag = generateData(this.rand, TAG_LENGTH); + Tag[] tags = new Tag[noOfTags]; + for (int n = 0; n < noOfTags; n++) { + Tag t = new ArrayBackedTag((byte) n, tag); + tags[n] = t; + } + KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, + value, tags); + put.add(kv); + } else { + put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value); + } + put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); + mutator.mutate(put); + } + } + + static class ScanTest extends TableTest { + private ResultScanner testScanner; + + ScanTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testTakedown() throws IOException { + if (this.testScanner != null) { + this.testScanner.close(); + } + super.testTakedown(); + } + + @Override + void testRow(final int i) throws IOException { + if (this.testScanner == null) { + Scan scan = new Scan().withStartRow(format(this.startRow)); + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + this.testScanner = table.getScanner(scan); + } + testScanner.next(); + } + } + + static class SequentialReadTest extends TableTest { + SequentialReadTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testRow(final int i) throws IOException { + Get get = new Get(format(i)); + get.addColumn(FAMILY_NAME, QUALIFIER_NAME); + table.get(get); + } + } + + static class SequentialWriteTest extends BufferedMutatorTest { + SequentialWriteTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testRow(final int i) throws IOException { + byte[] row = format(i); + Put put = new Put(row); + byte[] value = generateData(this.rand, ROW_LENGTH); + if (useTags) { + byte[] tag = generateData(this.rand, TAG_LENGTH); + Tag[] tags = new Tag[noOfTags]; + for (int n = 0; n < noOfTags; n++) { + Tag t = new ArrayBackedTag((byte) n, tag); + tags[n] = t; + } + KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, + value, tags); + put.add(kv); + } else { + put.addColumn(FAMILY_NAME, QUALIFIER_NAME, value); + } + put.setDurability(writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); + mutator.mutate(put); + } + } + + static class FilteredScanTest extends TableTest { + protected static final Logger LOG = LoggerFactory.getLogger(FilteredScanTest.class.getName()); + + FilteredScanTest(Configuration conf, TestOptions options, Status status) { + super(conf, options, status); + } + + @Override + void testRow(int i) throws IOException { + byte[] value = generateValue(this.rand); + Scan scan = constructScan(value); + try (ResultScanner scanner = this.table.getScanner(scan)) { + while (scanner.next() != null) { + } + } + } + + protected Scan constructScan(byte[] valuePrefix) { + Filter filter = new SingleColumnValueFilter( + FAMILY_NAME, QUALIFIER_NAME, CompareOperator.EQUAL, + new BinaryComparator(valuePrefix) + ); + Scan scan = new Scan(); + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + scan.setFilter(filter); + return scan; + } + } + + /** + * Format passed integer. + * @param number the integer to format + * @return Returns zero-prefixed 10-byte wide decimal version of passed number (Does absolute in + * case number is negative). + */ + public static byte [] format(final int number) { + byte[] b = new byte[DEFAULT_ROW_PREFIX_LENGTH + 10]; + int d = Math.abs(number); + for (int i = b.length - 1; i >= 0; i--) { + b[i] = (byte)((d % 10) + '0'); + d /= 10; + } + return b; + } + + public static byte[] generateData(final Random r, int length) { + byte[] b = new byte [length]; + int i; + + for (i = 0; i < (length-8); i += 8) { + b[i] = (byte) (65 + r.nextInt(26)); + b[i+1] = b[i]; + b[i+2] = b[i]; + b[i+3] = b[i]; + b[i+4] = b[i]; + b[i+5] = b[i]; + b[i+6] = b[i]; + b[i+7] = b[i]; + } + + byte a = (byte) (65 + r.nextInt(26)); + for (; i < length; i++) { + b[i] = a; + } + return b; + } + + public static byte[] generateValue(final Random r) { + byte [] b = new byte [ROW_LENGTH]; + r.nextBytes(b); + return b; + } + + static byte[] getRandomRow(final Random random, final int totalRows) { + return format(random.nextInt(Integer.MAX_VALUE) % totalRows); + } + + long runOneClient(final Class cmd, final int startRow, + final int perClientRunRows, final int totalRows, + boolean flushCommits, boolean writeToWAL, boolean useTags, int noOfTags, + Connection connection, final Status status) throws IOException { + status.setStatus("Start " + cmd + " at offset " + startRow + " for " + + perClientRunRows + " rows"); + long totalElapsedTime; + + TestOptions options = new TestOptions(startRow, perClientRunRows, + totalRows, tableName, flushCommits, writeToWAL, useTags, noOfTags, connection); + final Test t; + try { + Constructor constructor = cmd.getDeclaredConstructor( + Configuration.class, TestOptions.class, Status.class); + t = constructor.newInstance(this.conf, options, status); + } catch (NoSuchMethodException e) { + throw new IllegalArgumentException("Invalid command class: " + + cmd.getName() + ". It does not provide a constructor as described by" + + "the javadoc comment. Available constructors are: " + + Arrays.toString(cmd.getConstructors())); + } catch (Exception e) { + throw new IllegalStateException("Failed to construct command class", e); + } + totalElapsedTime = t.test(); + + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + + "ms at offset " + startRow + " for " + perClientRunRows + " rows"); + return totalElapsedTime; + } + + private void runNIsOne(final Class cmd) { + Status status = LOG::info; + + RemoteAdmin admin; + try { + Client client = new Client(cluster); + admin = new RemoteAdmin(client, getConf()); + checkTable(admin); + runOneClient(cmd, 0, this.R, this.R, this.flushCommits, this.writeToWAL, + this.useTags, this.noOfTags, this.connection, status); + } catch (Exception e) { + LOG.error("Failed", e); + } + } + + private void runTest(final Class cmd) + throws IOException, InterruptedException, ClassNotFoundException { + if (N == 1) { + // If there is only one client and one HRegionServer, we assume nothing + // has been set up at all. + runNIsOne(cmd); + } else { + // Else, run + runNIsMoreThanOne(cmd); + } + } + + protected void printUsage() { + printUsage(null); + } + + protected void printUsage(final String message) { + if (message != null && message.length() > 0) { + System.err.println(message); + } + System.err.println("Usage: java " + this.getClass().getName() + " \\"); + System.err.println(" [--nomapred] [--rows=ROWS] [--table=NAME] \\"); + System.err.println(" [--compress=TYPE] [--blockEncoding=TYPE] " + + "[-D]* "); + System.err.println(); + System.err.println("General Options:"); + System.err.println(" nomapred Run multiple clients using threads " + + "(rather than use mapreduce)"); + System.err.println(" rows Rows each client runs. Default: One million"); + System.err.println(); + System.err.println("Table Creation / Write Tests:"); + System.err.println(" table Alternate table name. Default: 'TestTable'"); + System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); + System.err.println(" flushCommits Used to determine if the test should flush the table. " + + "Default: false"); + System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); + System.err.println(" presplit Create presplit table. Recommended for accurate perf " + + "analysis (see guide). Default: disabled"); + System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + + "Default : false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true."); + System.err.println(); + System.err.println("Read Tests:"); + System.err.println(" inmemory Tries to keep the HFiles of the CF inmemory as far as " + + "possible. Not guaranteed that reads are always served from inmemory. Default: false"); + System.err.println(); + System.err.println(" Note: -D properties will be applied to the conf used. "); + System.err.println(" For example: "); + System.err.println(" -Dmapreduce.output.fileoutputformat.compress=true"); + System.err.println(" -Dmapreduce.task.timeout=60000"); + System.err.println(); + System.err.println("Command:"); + for (CmdDescriptor command : commands.values()) { + System.err.println(String.format(" %-15s %s", command.getName(), command.getDescription())); + } + System.err.println(); + System.err.println("Args:"); + System.err.println(" nclients Integer. Required. Total number of " + + "clients (and HRegionServers)"); + System.err.println(" running: 1 <= value <= 500"); + System.err.println("Examples:"); + System.err.println(" To run a single evaluation client:"); + System.err.println(" $ hbase " + this.getClass().getName() + + " sequentialWrite 1"); + } + + private void getArgs(final int start, final String[] args) { + if (start + 1 > args.length) { + throw new IllegalArgumentException("must supply the number of clients"); + } + N = Integer.parseInt(args[start]); + if (N < 1) { + throw new IllegalArgumentException("Number of clients must be > 1"); + } + // Set total number of rows to write. + R = R * N; + } + + @Override + public int run(String[] args) throws Exception { + // Process command-line args. TODO: Better cmd-line processing + // (but hopefully something not as painful as cli options). + int errCode = -1; + if (args.length < 1) { + printUsage(); + return errCode; + } + + try { + for (int i = 0; i < args.length; i++) { + String cmd = args[i]; + if (cmd.equals("-h") || cmd.startsWith("--h")) { + printUsage(); + errCode = 0; + break; + } + + final String nmr = "--nomapred"; + if (cmd.startsWith(nmr)) { + nomapred = true; + continue; + } + + final String rows = "--rows="; + if (cmd.startsWith(rows)) { + R = Integer.parseInt(cmd.substring(rows.length())); + continue; + } + + final String table = "--table="; + if (cmd.startsWith(table)) { + this.tableName = TableName.valueOf(cmd.substring(table.length())); + continue; + } + + final String compress = "--compress="; + if (cmd.startsWith(compress)) { + this.compression = Compression.Algorithm.valueOf(cmd.substring(compress.length())); + continue; + } + + final String blockEncoding = "--blockEncoding="; + if (cmd.startsWith(blockEncoding)) { + this.blockEncoding = DataBlockEncoding.valueOf(cmd.substring(blockEncoding.length())); + continue; + } + + final String flushCommits = "--flushCommits="; + if (cmd.startsWith(flushCommits)) { + this.flushCommits = Boolean.parseBoolean(cmd.substring(flushCommits.length())); + continue; + } + + final String writeToWAL = "--writeToWAL="; + if (cmd.startsWith(writeToWAL)) { + this.writeToWAL = Boolean.parseBoolean(cmd.substring(writeToWAL.length())); + continue; + } + + final String presplit = "--presplit="; + if (cmd.startsWith(presplit)) { + this.presplitRegions = Integer.parseInt(cmd.substring(presplit.length())); + continue; + } + + final String inMemory = "--inmemory="; + if (cmd.startsWith(inMemory)) { + this.inMemoryCF = Boolean.parseBoolean(cmd.substring(inMemory.length())); + continue; + } + + this.connection = ConnectionFactory.createConnection(getConf()); + + final String useTags = "--usetags="; + if (cmd.startsWith(useTags)) { + this.useTags = Boolean.parseBoolean(cmd.substring(useTags.length())); + continue; + } + + final String noOfTags = "--nooftags="; + if (cmd.startsWith(noOfTags)) { + this.noOfTags = Integer.parseInt(cmd.substring(noOfTags.length())); + continue; + } + + final String host = "--host="; + if (cmd.startsWith(host)) { + cluster.add(cmd.substring(host.length())); + continue; + } + + Class cmdClass = determineCommandClass(cmd); + if (cmdClass != null) { + getArgs(i + 1, args); + if (cluster.isEmpty()) { + String s = conf.get("stargate.hostname", "localhost"); + if (s.contains(":")) { + cluster.add(s); + } else { + cluster.add(s, conf.getInt("stargate.port", 8080)); + } + } + runTest(cmdClass); + errCode = 0; + break; + } + + printUsage(); + break; + } + } catch (Exception e) { + LOG.error("Failed", e); + } + + return errCode; + } + + private Class determineCommandClass(String cmd) { + CmdDescriptor descriptor = commands.get(cmd); + return descriptor != null ? descriptor.getCmdClass() : null; + } + + public static void main(final String[] args) throws Exception { + int res = ToolRunner.run(new PerformanceEvaluation(HBaseConfiguration.create()), args); + System.exit(res); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java new file mode 100755 index 00000000..86289ec5 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java @@ -0,0 +1,669 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; + +import java.util.HashMap; +import java.util.Map; +import javax.ws.rs.core.MediaType; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +public class RowResourceBase { + protected static final String TABLE = "TestRowResource"; + + protected static final TableName TABLE_NAME = TableName.valueOf(TABLE); + + protected static final String CFA = "a"; + protected static final String CFB = "b"; + protected static final String COLUMN_1 = CFA + ":1"; + protected static final String COLUMN_2 = CFB + ":2"; + protected static final String COLUMN_3 = CFA + ":"; + protected static final String ROW_1 = "testrow1"; + protected static final String VALUE_1 = "testvalue1"; + protected static final String ROW_2 = "testrow2"; + protected static final String VALUE_2 = "testvalue2"; + protected static final String ROW_3 = "testrow3"; + protected static final String VALUE_3 = "testvalue3"; + protected static final String ROW_4 = "testrow4"; + protected static final String VALUE_4 = "testvalue4"; + protected static final String VALUE_5 = "5"; + protected static final String VALUE_6 = "6"; + + protected static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + protected static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + protected static Client client; + protected static JAXBContext context; + protected static Marshaller xmlMarshaller; + protected static Unmarshaller xmlUnmarshaller; + protected static Configuration conf; + protected static ObjectMapper jsonMapper; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(3); + REST_TEST_UTIL.startServletContainer(conf); + context = JAXBContext.newInstance( + CellModel.class, + CellSetModel.class, + RowModel.class); + xmlMarshaller = context.createMarshaller(); + xmlUnmarshaller = context.createUnmarshaller(); + jsonMapper = new JacksonJaxbJsonProvider() + .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeMethod() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + if (admin.tableExists(TABLE_NAME)) { + TEST_UTIL.deleteTable(TABLE_NAME); + } + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TableName.valueOf(TABLE)); + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder + .newBuilder(Bytes.toBytes(CFB)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + admin.createTable(tableDescriptorBuilder.build()); + } + + @After + public void afterMethod() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + if (admin.tableExists(TABLE_NAME)) { + TEST_UTIL.deleteTable(TABLE_NAME); + } + } + + static Response putValuePB(String table, String row, String column, + String value) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append('/'); + path.append(column); + return putValuePB(path.toString(), table, row, column, value); + } + + static Response putValuePB(String url, String table, String row, + String column, String value) throws IOException { + RowModel rowModel = new RowModel(row); + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(value))); + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, + cellSetModel.createProtobufOutput()); + Thread.yield(); + return response; + } + + protected static void checkValueXML(String url, String table, String row, + String column, String value) throws IOException, JAXBException { + Response response = getValueXML(url); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + RowModel rowModel = cellSet.getRows().get(0); + CellModel cell = rowModel.getCells().get(0); + assertEquals(Bytes.toString(cell.getColumn()), column); + assertEquals(Bytes.toString(cell.getValue()), value); + } + + protected static void checkValueXML(String table, String row, String column, + String value) throws IOException, JAXBException { + Response response = getValueXML(table, row, column); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + RowModel rowModel = cellSet.getRows().get(0); + CellModel cell = rowModel.getCells().get(0); + assertEquals(Bytes.toString(cell.getColumn()), column); + assertEquals(Bytes.toString(cell.getValue()), value); + } + + protected static void checkIncrementValueXML(String table, String row, String column, long value) + throws IOException, JAXBException { + Response response1 = getValueXML(table, row, column); + assertEquals(200, response1.getCode()); + assertEquals(Constants.MIMETYPE_XML, response1.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response1.getBody())); + RowModel rowModel = cellSet.getRows().get(0); + CellModel cell = rowModel.getCells().get(0); + assertEquals(Bytes.toString(cell.getColumn()), column); + assertEquals(Bytes.toLong(cell.getValue()), value); + } + + protected static Response getValuePB(String url) throws IOException { + Response response = client.get(url, Constants.MIMETYPE_PROTOBUF); + return response; + } + + protected static Response putValueXML(String table, String row, String column, + String value) throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append('/'); + path.append(column); + return putValueXML(path.toString(), table, row, column, value); + } + + protected static Response putValueXML(String url, String table, String row, + String column, String value) throws IOException, JAXBException { + RowModel rowModel = new RowModel(row); + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(value))); + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + Response response = client.put(url, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + Thread.yield(); + return response; + } + + protected static Response getValuePB(String table, String row, String column) + throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append('/'); + path.append(column); + return getValuePB(path.toString()); + } + + protected static void checkValuePB(String table, String row, String column, + String value) throws IOException { + Response response = getValuePB(table, row, column); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + CellSetModel cellSet = new CellSetModel(); + cellSet.getObjectFromMessage(response.getBody()); + RowModel rowModel = cellSet.getRows().get(0); + CellModel cell = rowModel.getCells().get(0); + assertEquals(Bytes.toString(cell.getColumn()), column); + assertEquals(Bytes.toString(cell.getValue()), value); + } + + protected static void checkIncrementValuePB(String table, String row, String column, + long value) throws IOException { + Response response = getValuePB(table, row, column); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + CellSetModel cellSet = new CellSetModel(); + cellSet.getObjectFromMessage(response.getBody()); + RowModel rowModel = cellSet.getRows().get(0); + CellModel cell = rowModel.getCells().get(0); + assertEquals(Bytes.toString(cell.getColumn()), column); + assertEquals(Bytes.toLong(cell.getValue()), value); + } + + protected static Response checkAndPutValuePB(String url, String table, String row, String column, + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException { + RowModel rowModel = new RowModel(row); + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(valueToPut))); + + if (otherCells != null) { + for (Map.Entry entry : otherCells.entrySet()) { + rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), + Bytes.toBytes(entry.getValue()))); + } + } + + // This Cell need to be added as last cell. + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(valueToCheck))); + + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, + cellSetModel.createProtobufOutput()); + Thread.yield(); + return response; + } + + protected static Response checkAndPutValuePB(String table, String row, + String column, String valueToCheck, String valueToPut) throws IOException { + return checkAndPutValuePB(table,row,column,valueToCheck,valueToPut,null); + } + + protected static Response checkAndPutValuePB(String table, String row, String column, + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=put"); + return checkAndPutValuePB(path.toString(), table, row, column, + valueToCheck, valueToPut, otherCells); + } + + protected static Response checkAndPutValueXML(String url, String table, String row, String column, + String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException, JAXBException { + RowModel rowModel = new RowModel(row); + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(valueToPut))); + + if (otherCells != null) { + for (Map.Entry entry : otherCells.entrySet()) { + rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), + Bytes.toBytes(entry.getValue()))); + } + } + + // This Cell need to be added as last cell. + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(valueToCheck))); + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + Response response = client.put(url, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + Thread.yield(); + return response; + } + + protected static Response checkAndPutValueXML(String table, String row, String column, + String valueToCheck, String valueToPut) throws IOException, JAXBException { + return checkAndPutValueXML(table,row,column,valueToCheck,valueToPut, null); + } + + protected static Response checkAndPutValueXML(String table, String row, + String column, String valueToCheck, String valueToPut, HashMap otherCells) + throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=put"); + return checkAndPutValueXML(path.toString(), table, row, column, + valueToCheck, valueToPut, otherCells); + } + + protected static Response checkAndDeleteXML(String url, String table, + String row, String column, String valueToCheck, HashMap cellsToDelete) + throws IOException, JAXBException { + RowModel rowModel = new RowModel(row); + + if (cellsToDelete != null) { + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), + Bytes.toBytes(entry.getValue()))); + } + } + // Add this at the end + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(valueToCheck))); + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + Response response = client.put(url, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + Thread.yield(); + return response; + } + + protected static Response checkAndDeleteXML(String table, String row, + String column, String valueToCheck) throws IOException, JAXBException { + return checkAndDeleteXML(table, row, column, valueToCheck, null); + } + + protected static Response checkAndDeleteXML(String table, String row, + String column, String valueToCheck, HashMap cellsToDelete) + throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=delete"); + return checkAndDeleteXML(path.toString(), table, row, column, valueToCheck, cellsToDelete); + } + + protected static Response checkAndDeleteJson(String table, String row, + String column, String valueToCheck) throws IOException { + return checkAndDeleteJson(table, row, column, valueToCheck, null); + } + + protected static Response checkAndDeleteJson(String table, String row, + String column, String valueToCheck, HashMap cellsToDelete) + throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=delete"); + return checkAndDeleteJson(path.toString(), table, row, column, valueToCheck, cellsToDelete); + } + + protected static Response checkAndDeleteJson(String url, String table, + String row, String column, String valueToCheck, HashMap cellsToDelete) + throws IOException { + RowModel rowModel = new RowModel(row); + + if (cellsToDelete != null) { + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), + Bytes.toBytes(entry.getValue()))); + } + } + // Add this at the end + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(valueToCheck))); + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + String jsonString = jsonMapper.writeValueAsString(cellSetModel); + Response response = client.put(url, Constants.MIMETYPE_JSON, + Bytes.toBytes(jsonString)); + Thread.yield(); + return response; + } + + protected static Response checkAndDeletePB(String table, String row, String column, String value) + throws IOException { + return checkAndDeletePB(table, row, column, value, null); + } + + protected static Response checkAndDeletePB(String table, String row, + String column, String value, HashMap cellsToDelete) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=delete"); + return checkAndDeleteValuePB(path.toString(), table, row, column, value, cellsToDelete); + } + protected static Response checkAndDeleteValuePB(String url, String table, + String row, String column, String valueToCheck, HashMap cellsToDelete) + throws IOException { + RowModel rowModel = new RowModel(row); + + if (cellsToDelete != null) { + for (Map.Entry entry : cellsToDelete.entrySet()) { + rowModel.addCell(new CellModel(Bytes.toBytes(entry.getKey()), + Bytes.toBytes(entry.getValue()))); + } + } + // Add this at the end + rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes + .toBytes(valueToCheck))); + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + Response response = client.put(url, Constants.MIMETYPE_PROTOBUF, + cellSetModel.createProtobufOutput()); + Thread.yield(); + return response; + } + + protected static Response getValueXML(String table, String startRow, + String endRow, String column) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(startRow); + path.append(","); + path.append(endRow); + path.append('/'); + path.append(column); + return getValueXML(path.toString()); + } + + protected static Response getValueXML(String url) throws IOException { + Response response = client.get(url, Constants.MIMETYPE_XML); + return response; + } + + protected static Response getValueJson(String url) throws IOException { + Response response = client.get(url, Constants.MIMETYPE_JSON); + return response; + } + + protected static Response deleteValue(String table, String row, String column) + throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append('/'); + path.append(column); + Response response = client.delete(path.toString()); + Thread.yield(); + return response; + } + + protected static Response getValueXML(String table, String row, String column) + throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append('/'); + path.append(column); + return getValueXML(path.toString()); + } + + protected static Response deleteRow(String table, String row) + throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + Response response = client.delete(path.toString()); + Thread.yield(); + return response; + } + + protected static Response getValueJson(String table, String row, + String column) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append('/'); + path.append(column); + return getValueJson(path.toString()); + } + + protected static void checkValueJSON(String table, String row, String column, + String value) throws IOException { + Response response = getValueJson(table, row, column); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); + RowModel rowModel = cellSet.getRows().get(0); + CellModel cell = rowModel.getCells().get(0); + assertEquals(Bytes.toString(cell.getColumn()), column); + assertEquals(Bytes.toString(cell.getValue()), value); + } + + protected static void checkIncrementValueJSON(String table, String row, String column, + long value) throws IOException { + Response response = getValueJson(table, row, column); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + ObjectMapper mapper = new JacksonJaxbJsonProvider() + .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); + RowModel rowModel = cellSet.getRows().get(0); + CellModel cell = rowModel.getCells().get(0); + assertEquals(Bytes.toString(cell.getColumn()), column); + assertEquals(Bytes.toLong(cell.getValue()), value); + } + + protected static Response putValueJson(String table, String row, String column, + String value) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append('/'); + path.append(column); + return putValueJson(path.toString(), table, row, column, value); + } + + protected static Response putValueJson(String url, String table, String row, String column, + String value) throws IOException { + RowModel rowModel = new RowModel(row); + rowModel.addCell(new CellModel(Bytes.toBytes(column), + Bytes.toBytes(value))); + CellSetModel cellSetModel = new CellSetModel(); + cellSetModel.addRow(rowModel); + String jsonString = jsonMapper.writeValueAsString(cellSetModel); + Response response = client.put(url, Constants.MIMETYPE_JSON, + Bytes.toBytes(jsonString)); + Thread.yield(); + return response; + } + + protected static Response appendValueXML(String table, String row, String column, + String value) throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=append"); + return putValueXML(path.toString(), table, row, column, value); + } + + protected static Response appendValuePB(String table, String row, String column, + String value) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=append"); + return putValuePB(path.toString(), table, row, column, value); + } + + protected static Response appendValueJson(String table, String row, String column, + String value) throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=append"); + return putValueJson(path.toString(), table, row, column, value); + } + + protected static Response incrementValueXML(String table, String row, String column, + String value) throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=increment"); + return putValueXML(path.toString(), table, row, column, value); + } + + protected static Response incrementValuePB(String table, String row, String column, + String value) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=increment"); + return putValuePB(path.toString(), table, row, column, value); + } + + protected static Response incrementValueJson(String table, String row, String column, + String value) throws IOException, JAXBException { + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(table); + path.append('/'); + path.append(row); + path.append("?check=increment"); + return putValueJson(path.toString(), table, row, column, value); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java new file mode 100755 index 00000000..f4f9c757 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestDeleteRow.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestDeleteRow extends RowResourceBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestDeleteRow.class); + + @Test + public void testDeleteNonExistentColumn() throws Exception { + Response response = putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + + response = checkAndDeleteJson(TABLE, ROW_1, COLUMN_1, VALUE_2); + assertEquals(304, response.getCode()); + assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode()); + + response = checkAndDeleteJson(TABLE, ROW_2, COLUMN_1, VALUE_2); + assertEquals(304, response.getCode()); + assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode()); + + response = checkAndDeleteJson(TABLE, ROW_1, "dummy", VALUE_1); + assertEquals(400, response.getCode()); + assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode()); + + response = checkAndDeleteJson(TABLE, ROW_1, "dummy:test", VALUE_1); + assertEquals(404, response.getCode()); + assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode()); + + response = checkAndDeleteJson(TABLE, ROW_1, "a:test", VALUE_1); + assertEquals(304, response.getCode()); + assertEquals(200, getValueJson(TABLE, ROW_1, COLUMN_1).getCode()); + } + + @Test + public void testDeleteXML() throws IOException, JAXBException { + Response response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = putValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + + response = deleteValue(TABLE, ROW_1, COLUMN_1); + assertEquals(200, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + + response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + response = getValueXML(TABLE, ROW_1, COLUMN_2); + assertEquals(404, response.getCode()); + + //Delete a row in non existent table + response = deleteValue("dummy", ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + //Delete non existent column + response = deleteValue(TABLE, ROW_1, "dummy"); + assertEquals(404, response.getCode()); + } + +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java new file mode 100755 index 00000000..42e38fc9 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGZIPResponseWrapper.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import javax.servlet.ServletOutputStream; +import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.rest.filter.GZIPResponseStream; +import org.apache.hadoop.hbase.rest.filter.GZIPResponseWrapper; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestGZIPResponseWrapper { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestGZIPResponseWrapper.class); + + private final HttpServletResponse response = mock(HttpServletResponse.class); + private final GZIPResponseWrapper wrapper = new GZIPResponseWrapper(response); + + /** + * wrapper should set all headers except "content-length" + */ + @Test + public void testHeader() throws IOException { + wrapper.setStatus(200); + verify(response).setStatus(200); + wrapper.addHeader("header", "header value"); + verify(response).addHeader("header", "header value"); + wrapper.addHeader("content-length", "header value2"); + verify(response, never()).addHeader("content-length", "header value"); + + wrapper.setIntHeader("header", 5); + verify(response).setIntHeader("header", 5); + wrapper.setIntHeader("content-length", 4); + verify(response, never()).setIntHeader("content-length", 4); + + wrapper.setHeader("set-header", "new value"); + verify(response).setHeader("set-header", "new value"); + wrapper.setHeader("content-length", "content length value"); + verify(response, never()).setHeader("content-length", "content length value"); + + wrapper.sendRedirect("location"); + verify(response).sendRedirect("location"); + + wrapper.flushBuffer(); + verify(response).flushBuffer(); + } + + @Test + public void testResetBuffer() throws IOException { + when(response.isCommitted()).thenReturn(false); + ServletOutputStream out = mock(ServletOutputStream.class); + when(response.getOutputStream()).thenReturn(out); + + ServletOutputStream servletOutput = wrapper.getOutputStream(); + assertEquals(GZIPResponseStream.class, servletOutput.getClass()); + wrapper.resetBuffer(); + verify(response).setHeader("Content-Encoding", null); + + when(response.isCommitted()).thenReturn(true); + servletOutput = wrapper.getOutputStream(); + assertEquals(out.getClass(), servletOutput.getClass()); + assertNotNull(wrapper.getWriter()); + } + + @Test + public void testReset() throws IOException { + when(response.isCommitted()).thenReturn(false); + ServletOutputStream out = mock(ServletOutputStream.class); + when(response.getOutputStream()).thenReturn(out); + + ServletOutputStream servletOutput = wrapper.getOutputStream(); + verify(response).addHeader("Content-Encoding", "gzip"); + assertEquals(GZIPResponseStream.class, servletOutput.getClass()); + wrapper.reset(); + verify(response).setHeader("Content-Encoding", null); + + when(response.isCommitted()).thenReturn(true); + servletOutput = wrapper.getOutputStream(); + assertEquals(out.getClass(), servletOutput.getClass()); + } + + @Test + public void testSendError() throws IOException { + wrapper.sendError(404); + verify(response).sendError(404); + + wrapper.sendError(404, "error message"); + verify(response).sendError(404, "error message"); + } + +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java new file mode 100755 index 00000000..e1dec900 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGetAndPutResource.java @@ -0,0 +1,808 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; +import java.net.URLEncoder; +import java.util.HashMap; +import java.util.List; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.test.MetricsAssertHelper; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.http.Header; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestGetAndPutResource extends RowResourceBase { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestGetAndPutResource.class); + + private static final MetricsAssertHelper METRICS_ASSERT = + CompatibilityFactory.getInstance(MetricsAssertHelper.class); + + @Test + public void testForbidden() throws IOException, JAXBException { + conf.set("hbase.rest.readonly", "true"); + + Response response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(403, response.getCode()); + response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(403, response.getCode()); + response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2); + assertEquals(403, response.getCode()); + response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2); + assertEquals(403, response.getCode()); + response = deleteValue(TABLE, ROW_1, COLUMN_1); + assertEquals(403, response.getCode()); + response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(403, response.getCode()); + response = deleteRow(TABLE, ROW_1); + assertEquals(403, response.getCode()); + + conf.set("hbase.rest.readonly", "false"); + + response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_2); + assertEquals(200, response.getCode()); + response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3); + assertEquals(200, response.getCode()); + response = deleteValue(TABLE, ROW_1, COLUMN_1); + assertEquals(200, response.getCode()); + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testSingleCellGetPutXML() throws IOException, JAXBException { + Response response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2); + response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3); + response = checkAndDeleteXML(TABLE, ROW_1, COLUMN_1, VALUE_3); + assertEquals(200, response.getCode()); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testSingleCellGetPutPB() throws IOException, JAXBException { + Response response = getValuePB(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2); + + response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2, VALUE_3); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_3); + response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3, VALUE_4); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_4); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testMultipleCellCheckPutPB() throws IOException { + Response response = getValuePB(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + // Add 2 Columns to setup the test + response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + + response = putValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2,VALUE_3); + + // On Success update both the cells + response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_3); + checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_3); + + // On Failure, we dont update any cells + response = checkAndPutValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_4, otherCells); + assertEquals(304, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_3); + checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_3); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testMultipleCellCheckPutXML() throws IOException, JAXBException { + Response response = getValuePB(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + // Add 2 Columns to setup the test + response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + + response = putValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + + HashMap otherCells = new HashMap<>(); + otherCells.put(COLUMN_2,VALUE_3); + + // On Success update both the cells + response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_3, otherCells); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_3); + + // On Failure, we dont update any cells + response = checkAndPutValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1, VALUE_4, otherCells); + assertEquals(304, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_3); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_3); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testMultipleCellCheckDeletePB() throws IOException { + Response response = getValuePB(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + // Add 3 Columns to setup the test + response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + + response = putValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + + response = putValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); + + // Deletes the following columns based on Column1 check + HashMap cellsToDelete = new HashMap<>(); + cellsToDelete.put(COLUMN_2,VALUE_2); // Value does not matter + cellsToDelete.put(COLUMN_3,VALUE_3); // Value does not matter + + // On Success update both the cells + response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_1, cellsToDelete); + assertEquals(200, response.getCode()); + + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + + response = getValuePB(TABLE, ROW_1, COLUMN_2); + assertEquals(404, response.getCode()); + + response = getValuePB(TABLE, ROW_1, COLUMN_3); + assertEquals(404, response.getCode()); + + response = putValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + + response = putValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); + + // On Failure, we dont update any cells + response = checkAndDeletePB(TABLE, ROW_1, COLUMN_1, VALUE_3, cellsToDelete); + assertEquals(304, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + checkValuePB(TABLE, ROW_1, COLUMN_3, VALUE_3); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testSingleCellGetPutBinary() throws IOException { + final String path = "/" + TABLE + "/" + ROW_3 + "/" + COLUMN_1; + final byte[] body = Bytes.toBytes(VALUE_3); + Response response = client.put(path, Constants.MIMETYPE_BINARY, body); + assertEquals(200, response.getCode()); + Thread.yield(); + + response = client.get(path, Constants.MIMETYPE_BINARY); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type")); + assertTrue(Bytes.equals(response.getBody(), body)); + boolean foundTimestampHeader = false; + for (Header header: response.getHeaders()) { + if (header.getName().equals("X-Timestamp")) { + foundTimestampHeader = true; + break; + } + } + assertTrue(foundTimestampHeader); + + response = deleteRow(TABLE, ROW_3); + assertEquals(200, response.getCode()); + } + + @Test + public void testSingleCellGetJSON() throws IOException { + final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; + Response response = client.put(path, Constants.MIMETYPE_BINARY, + Bytes.toBytes(VALUE_4)); + assertEquals(200, response.getCode()); + Thread.yield(); + response = client.get(path, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + response = deleteRow(TABLE, ROW_4); + assertEquals(200, response.getCode()); + } + + @Test + public void testLatestCellGetJSON() throws IOException { + final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_4); + CellModel cellOne = new CellModel(Bytes.toBytes(COLUMN_1), 1L, + Bytes.toBytes(VALUE_1)); + CellModel cellTwo = new CellModel(Bytes.toBytes(COLUMN_1), 2L, + Bytes.toBytes(VALUE_2)); + rowModel.addCell(cellOne); + rowModel.addCell(cellTwo); + cellSetModel.addRow(rowModel); + String jsonString = jsonMapper.writeValueAsString(cellSetModel); + Response response = client.put(path, Constants.MIMETYPE_JSON, + Bytes.toBytes(jsonString)); + assertEquals(200, response.getCode()); + Thread.yield(); + response = client.get(path, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + CellSetModel cellSet = jsonMapper.readValue(response.getBody(), CellSetModel.class); + assertTrue(cellSet.getRows().size() == 1); + assertTrue(cellSet.getRows().get(0).getCells().size() == 1); + CellModel cell = cellSet.getRows().get(0).getCells().get(0); + assertEquals(VALUE_2 , Bytes.toString(cell.getValue())); + assertEquals(2L , cell.getTimestamp()); + response = deleteRow(TABLE, ROW_4); + assertEquals(200, response.getCode()); + } + + @Test + public void testURLEncodedKey() throws IOException, JAXBException { + String urlKey = "http://example.com/foo"; + StringBuilder path = new StringBuilder(); + path.append('/'); + path.append(TABLE); + path.append('/'); + path.append(URLEncoder.encode(urlKey, HConstants.UTF8_ENCODING)); + path.append('/'); + path.append(COLUMN_1); + Response response; + response = putValueXML(path.toString(), TABLE, urlKey, COLUMN_1, + VALUE_1); + assertEquals(200, response.getCode()); + checkValueXML(path.toString(), TABLE, urlKey, COLUMN_1, VALUE_1); + } + + @Test + public void testNoSuchCF() throws IOException { + final String goodPath = "/" + TABLE + "/" + ROW_1 + "/" + CFA+":"; + final String badPath = "/" + TABLE + "/" + ROW_1 + "/" + "BAD"; + Response response = client.post(goodPath, Constants.MIMETYPE_BINARY, + Bytes.toBytes(VALUE_1)); + assertEquals(200, response.getCode()); + assertEquals(200, client.get(goodPath, Constants.MIMETYPE_BINARY).getCode()); + assertEquals(404, client.get(badPath, Constants.MIMETYPE_BINARY).getCode()); + assertEquals(200, client.get(goodPath, Constants.MIMETYPE_BINARY).getCode()); + } + + @Test + public void testMultiCellGetPutXML() throws IOException, JAXBException { + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_2))); + cellSetModel.addRow(rowModel); + rowModel = new RowModel(ROW_2); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_4))); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + Response response = client.put(path, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + Thread.yield(); + + // make sure the fake row was not actually created + response = client.get(path, Constants.MIMETYPE_XML); + assertEquals(404, response.getCode()); + + // check that all of the values were created + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2); + checkValueXML(TABLE, ROW_2, COLUMN_1, VALUE_3); + checkValueXML(TABLE, ROW_2, COLUMN_2, VALUE_4); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + response = deleteRow(TABLE, ROW_2); + assertEquals(200, response.getCode()); + } + + @Test + public void testMultiCellGetPutPB() throws IOException { + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_2))); + cellSetModel.addRow(rowModel); + rowModel = new RowModel(ROW_2); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_4))); + cellSetModel.addRow(rowModel); + Response response = client.put(path, Constants.MIMETYPE_PROTOBUF, + cellSetModel.createProtobufOutput()); + Thread.yield(); + + // make sure the fake row was not actually created + response = client.get(path, Constants.MIMETYPE_PROTOBUF); + assertEquals(404, response.getCode()); + + // check that all of the values were created + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2); + checkValuePB(TABLE, ROW_2, COLUMN_1, VALUE_3); + checkValuePB(TABLE, ROW_2, COLUMN_2, VALUE_4); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + response = deleteRow(TABLE, ROW_2); + assertEquals(200, response.getCode()); + } + + @Test + public void testStartEndRowGetPutXML() throws IOException, JAXBException { + String[] rows = { ROW_1, ROW_2, ROW_3 }; + String[] values = { VALUE_1, VALUE_2, VALUE_3 }; + Response response = null; + for (int i = 0; i < rows.length; i++) { + response = putValueXML(TABLE, rows[i], COLUMN_1, values[i]); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, rows[i], COLUMN_1, values[i]); + } + response = getValueXML(TABLE, rows[0], rows[2], COLUMN_1); + assertEquals(200, response.getCode()); + CellSetModel cellSet = (CellSetModel) + xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + assertEquals(2, cellSet.getRows().size()); + for (int i = 0; i < cellSet.getRows().size()-1; i++) { + RowModel rowModel = cellSet.getRows().get(i); + for (CellModel cell: rowModel.getCells()) { + assertEquals(COLUMN_1, Bytes.toString(cell.getColumn())); + assertEquals(values[i], Bytes.toString(cell.getValue())); + } + } + for (String row : rows) { + response = deleteRow(TABLE, row); + assertEquals(200, response.getCode()); + } + } + + @Test + public void testInvalidCheckParam() throws IOException, JAXBException { + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_1))); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + + final String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "?check=blah"; + + Response response = client.put(path, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + assertEquals(400, response.getCode()); + } + + @Test + public void testInvalidColumnPut() throws IOException, JAXBException { + String dummyColumn = "doesnot:exist"; + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(dummyColumn), + Bytes.toBytes(VALUE_1))); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + + final String path = "/" + TABLE + "/" + ROW_1 + "/" + dummyColumn; + + Response response = client.put(path, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + assertEquals(404, response.getCode()); + } + + @Test + public void testMultiCellGetJson() throws IOException, JAXBException { + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_2))); + cellSetModel.addRow(rowModel); + rowModel = new RowModel(ROW_2); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_4))); + cellSetModel.addRow(rowModel); + String jsonString = jsonMapper.writeValueAsString(cellSetModel); + + Response response = client.put(path, Constants.MIMETYPE_JSON, + Bytes.toBytes(jsonString)); + Thread.yield(); + + // make sure the fake row was not actually created + response = client.get(path, Constants.MIMETYPE_JSON); + assertEquals(404, response.getCode()); + + // check that all of the values were created + checkValueJSON(TABLE, ROW_1, COLUMN_1, VALUE_1); + checkValueJSON(TABLE, ROW_1, COLUMN_2, VALUE_2); + checkValueJSON(TABLE, ROW_2, COLUMN_1, VALUE_3); + checkValueJSON(TABLE, ROW_2, COLUMN_2, VALUE_4); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + response = deleteRow(TABLE, ROW_2); + assertEquals(200, response.getCode()); + } + + @Test + public void testMetrics() throws IOException { + final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1; + Response response = client.put(path, Constants.MIMETYPE_BINARY, + Bytes.toBytes(VALUE_4)); + assertEquals(200, response.getCode()); + Thread.yield(); + response = client.get(path, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + response = deleteRow(TABLE, ROW_4); + assertEquals(200, response.getCode()); + + UserProvider userProvider = UserProvider.instantiate(conf); + METRICS_ASSERT.assertCounterGt("requests", 2L, + RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); + + METRICS_ASSERT.assertCounterGt("successfulGet", 0L, + RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); + + METRICS_ASSERT.assertCounterGt("successfulPut", 0L, + RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); + + METRICS_ASSERT.assertCounterGt("successfulDelete", 0L, + RESTServlet.getInstance(conf, userProvider).getMetrics().getSource()); + } + + @Test + public void testMultiColumnGetXML() throws Exception { + String path = "/" + TABLE + "/fakerow"; + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_3), Bytes.toBytes(VALUE_2))); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + + Response response = client.put(path, Constants.MIMETYPE_XML, Bytes.toBytes(writer.toString())); + Thread.yield(); + + // make sure the fake row was not actually created + response = client.get(path, Constants.MIMETYPE_XML); + assertEquals(404, response.getCode()); + + // Try getting all the column values at once. + path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1 + "," + COLUMN_2 + "," + COLUMN_3; + response = client.get(path, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + CellSetModel cellSet = + (CellSetModel) xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + assertTrue(cellSet.getRows().size() == 1); + assertTrue(cellSet.getRows().get(0).getCells().size() == 3); + List cells = cellSet.getRows().get(0).getCells(); + + assertTrue(containsCellModel(cells, COLUMN_1, VALUE_1)); + assertTrue(containsCellModel(cells, COLUMN_2, VALUE_2)); + assertTrue(containsCellModel(cells, COLUMN_3, VALUE_2)); + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + private boolean containsCellModel(List cells, String column, String value) { + boolean contains = false; + for (CellModel cell : cells) { + if (Bytes.toString(cell.getColumn()).equals(column) + && Bytes.toString(cell.getValue()).equals(value)) { + contains = true; + return contains; + } + } + return contains; + } + + @Test + public void testSuffixGlobbingXMLWithNewScanner() throws IOException, JAXBException { + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_2))); + cellSetModel.addRow(rowModel); + rowModel = new RowModel(ROW_2); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_4))); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + Response response = client.put(path, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + Thread.yield(); + + // make sure the fake row was not actually created + response = client.get(path, Constants.MIMETYPE_XML); + assertEquals(404, response.getCode()); + + // check that all of the values were created + StringBuilder query = new StringBuilder(); + query.append('/'); + query.append(TABLE); + query.append('/'); + query.append("testrow*"); + response = client.get(query.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + assertTrue(cellSet.getRows().size() == 2); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + response = deleteRow(TABLE, ROW_2); + assertEquals(200, response.getCode()); + } + + @Test + public void testSuffixGlobbingXML() throws IOException, JAXBException { + String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row + + CellSetModel cellSetModel = new CellSetModel(); + RowModel rowModel = new RowModel(ROW_1); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_1))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_2))); + cellSetModel.addRow(rowModel); + rowModel = new RowModel(ROW_2); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), + Bytes.toBytes(VALUE_3))); + rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), + Bytes.toBytes(VALUE_4))); + cellSetModel.addRow(rowModel); + StringWriter writer = new StringWriter(); + xmlMarshaller.marshal(cellSetModel, writer); + Response response = client.put(path, Constants.MIMETYPE_XML, + Bytes.toBytes(writer.toString())); + Thread.yield(); + + // make sure the fake row was not actually created + response = client.get(path, Constants.MIMETYPE_XML); + assertEquals(404, response.getCode()); + + // check that all of the values were created + StringBuilder query = new StringBuilder(); + query.append('/'); + query.append(TABLE); + query.append('/'); + query.append("testrow*"); + query.append('/'); + query.append(COLUMN_1); + response = client.get(query.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + xmlUnmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + List rows = cellSet.getRows(); + assertTrue(rows.size() == 2); + for (RowModel row : rows) { + assertTrue(row.getCells().size() == 1); + assertEquals(COLUMN_1, Bytes.toString(row.getCells().get(0).getColumn())); + } + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + response = deleteRow(TABLE, ROW_2); + assertEquals(200, response.getCode()); + } + + @Test + public void testAppendXML() throws IOException, JAXBException { + Response response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + //append cell + response = appendValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1); + response = appendValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2); + assertEquals(200, response.getCode()); + checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1 + VALUE_2); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testAppendPB() throws IOException, JAXBException { + Response response = getValuePB(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + //append cell + response = appendValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1); + response = appendValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2); + assertEquals(200, response.getCode()); + checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1 + VALUE_2); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testAppendJSON() throws IOException, JAXBException { + Response response = getValueJson(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + //append cell + response = appendValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); + assertEquals(200, response.getCode()); + putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1); + response = appendValueJson(TABLE, ROW_1, COLUMN_1, VALUE_2); + assertEquals(200, response.getCode()); + putValueJson(TABLE, ROW_1, COLUMN_1, VALUE_1 + VALUE_2); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testIncrementXML() throws IOException, JAXBException { + Response response = getValueXML(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + //append single cell + response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_5); + assertEquals(200, response.getCode()); + checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); + response = incrementValueXML(TABLE, ROW_1, COLUMN_1, VALUE_6); + assertEquals(200, response.getCode()); + checkIncrementValueXML(TABLE, ROW_1, COLUMN_1, + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testIncrementPB() throws IOException, JAXBException { + Response response = getValuePB(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + //append cell + response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_5); + assertEquals(200, response.getCode()); + checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); + response = incrementValuePB(TABLE, ROW_1, COLUMN_1, VALUE_6); + assertEquals(200, response.getCode()); + checkIncrementValuePB(TABLE, ROW_1, COLUMN_1, + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } + + @Test + public void testIncrementJSON() throws IOException, JAXBException { + Response response = getValueJson(TABLE, ROW_1, COLUMN_1); + assertEquals(404, response.getCode()); + + //append cell + response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_5); + assertEquals(200, response.getCode()); + checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, Long.parseLong(VALUE_5)); + response = incrementValueJson(TABLE, ROW_1, COLUMN_1, VALUE_6); + assertEquals(200, response.getCode()); + checkIncrementValueJSON(TABLE, ROW_1, COLUMN_1, + Long.parseLong(VALUE_5) + Long.parseLong(VALUE_6)); + + response = deleteRow(TABLE, ROW_1); + assertEquals(200, response.getCode()); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java new file mode 100755 index 00000000..e6c9c00e --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestGzipFilter { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestGzipFilter.class); + + private static final TableName TABLE = TableName.valueOf("TestGzipFilter"); + private static final String CFA = "a"; + private static final String COLUMN_1 = CFA + ":1"; + private static final String COLUMN_2 = CFA + ":2"; + private static final String ROW_1 = "testrow1"; + private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1"); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + Admin admin = TEST_UTIL.getAdmin(); + if (admin.tableExists(TABLE)) { + return; + } + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TABLE); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + admin.createTable(tableDescriptorBuilder.build()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testGzipFilter() throws Exception { + String path = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; + + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + GZIPOutputStream os = new GZIPOutputStream(bos); + os.write(VALUE_1); + os.close(); + byte[] value_1_gzip = bos.toByteArray(); + + // input side filter + + Header[] headers = new Header[2]; + headers[0] = new BasicHeader("Content-Type", Constants.MIMETYPE_BINARY); + headers[1] = new BasicHeader("Content-Encoding", "gzip"); + Response response = client.put(path, headers, value_1_gzip); + assertEquals(200, response.getCode()); + + Table table = TEST_UTIL.getConnection().getTable(TABLE); + Get get = new Get(Bytes.toBytes(ROW_1)); + get.addColumn(Bytes.toBytes(CFA), Bytes.toBytes("1")); + Result result = table.get(get); + byte[] value = result.getValue(Bytes.toBytes(CFA), Bytes.toBytes("1")); + assertNotNull(value); + assertTrue(Bytes.equals(value, VALUE_1)); + + // output side filter + + headers[0] = new BasicHeader("Accept", Constants.MIMETYPE_BINARY); + headers[1] = new BasicHeader("Accept-Encoding", "gzip"); + response = client.get(path, headers); + assertEquals(200, response.getCode()); + ByteArrayInputStream bis = new ByteArrayInputStream(response.getBody()); + GZIPInputStream is = new GZIPInputStream(bis); + value = new byte[VALUE_1.length]; + is.read(value, 0, VALUE_1.length); + assertTrue(Bytes.equals(value, VALUE_1)); + is.close(); + table.close(); + + testScannerResultCodes(); + } + + void testScannerResultCodes() throws Exception { + Header[] headers = new Header[3]; + headers[0] = new BasicHeader("Content-Type", Constants.MIMETYPE_XML); + headers[1] = new BasicHeader("Accept", Constants.MIMETYPE_JSON); + headers[2] = new BasicHeader("Accept-Encoding", "gzip"); + Response response = client.post("/" + TABLE + "/scanner", headers, Bytes.toBytes("")); + assertEquals(201, response.getCode()); + String scannerUrl = response.getLocation(); + assertNotNull(scannerUrl); + response = client.get(scannerUrl); + assertEquals(200, response.getCode()); + response = client.get(scannerUrl); + assertEquals(204, response.getCode()); + } + +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java new file mode 100755 index 00000000..0c5fcc41 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; + +import java.io.IOException; +import java.util.Collection; +import javax.ws.rs.core.MediaType; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; + +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@Category({RestTests.class, MediumTests.class}) +@RunWith(Parameterized.class) +public class TestMultiRowResource { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMultiRowResource.class); + + private static final TableName TABLE = TableName.valueOf("TestRowResource"); + private static final String CFA = "a"; + private static final String CFB = "b"; + private static final String COLUMN_1 = CFA + ":1"; + private static final String COLUMN_2 = CFB + ":2"; + private static final String ROW_1 = "testrow5"; + private static final String VALUE_1 = "testvalue5"; + private static final String ROW_2 = "testrow6"; + private static final String VALUE_2 = "testvalue6"; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); + + private static Client client; + private static JAXBContext context; + private static Marshaller marshaller; + private static Unmarshaller unmarshaller; + private static Configuration conf; + + private static Header extraHdr = null; + private static boolean csrfEnabled = true; + + @Parameterized.Parameters + public static Collection data() { + return HBaseCommonTestingUtility.BOOLEAN_PARAMETERIZED; + } + + public TestMultiRowResource(Boolean csrf) { + csrfEnabled = csrf; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + conf.setBoolean(RESTServer.REST_CSRF_ENABLED_KEY, csrfEnabled); + if (csrfEnabled) { + conf.set(RESTServer.REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY, ".*"); + } + extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(conf); + context = JAXBContext.newInstance( + CellModel.class, + CellSetModel.class, + RowModel.class); + marshaller = context.createMarshaller(); + unmarshaller = context.createUnmarshaller(); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + Admin admin = TEST_UTIL.getAdmin(); + if (admin.tableExists(TABLE)) { + return; + } + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TABLE); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + admin.createTable(tableDescriptorBuilder.build()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testMultiCellGetJSON() throws IOException { + String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; + String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2; + + StringBuilder path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget/?row="); + path.append(ROW_1); + path.append("&row="); + path.append(ROW_2); + + if (csrfEnabled) { + Response response = client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1)); + assertEquals(400, response.getCode()); + } + + client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1), extraHdr); + client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2), extraHdr); + + Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + + client.delete(row_5_url, extraHdr); + client.delete(row_6_url, extraHdr); + } + + @Test + public void testMultiCellGetXML() throws IOException { + String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; + String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2; + + StringBuilder path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget/?row="); + path.append(ROW_1); + path.append("&row="); + path.append(ROW_2); + + client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1), extraHdr); + client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2), extraHdr); + + Response response = client.get(path.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + + client.delete(row_5_url, extraHdr); + client.delete(row_6_url, extraHdr); + } + + @Test + public void testMultiCellGetWithColsJSON() throws IOException { + String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; + String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2; + + StringBuilder path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget"); + path.append("/" + COLUMN_1 + "," + CFB); + path.append("?row="); + path.append(ROW_1); + path.append("&row="); + path.append(ROW_2); + + client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1), extraHdr); + client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2), extraHdr); + + Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); + assertEquals(2, cellSet.getRows().size()); + assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey())); + assertEquals(VALUE_1, Bytes.toString(cellSet.getRows().get(0).getCells().get(0).getValue())); + assertEquals(ROW_2, Bytes.toString(cellSet.getRows().get(1).getKey())); + assertEquals(VALUE_2, Bytes.toString(cellSet.getRows().get(1).getCells().get(0).getValue())); + + client.delete(row_5_url, extraHdr); + client.delete(row_6_url, extraHdr); + } + + @Test + public void testMultiCellGetJSONNotFound() throws IOException { + String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; + + StringBuilder path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget/?row="); + path.append(ROW_1); + path.append("&row="); + path.append(ROW_2); + + client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1), extraHdr); + Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + CellSetModel cellSet = (CellSetModel) mapper.readValue(response.getBody(), CellSetModel.class); + assertEquals(1, cellSet.getRows().size()); + assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey())); + assertEquals(VALUE_1, Bytes.toString(cellSet.getRows().get(0).getCells().get(0).getValue())); + client.delete(row_5_url, extraHdr); + } + + @Test + public void testMultiCellGetWithColsInQueryPathJSON() throws IOException { + String row_5_url = "/" + TABLE + "/" + ROW_1 + "/" + COLUMN_1; + String row_6_url = "/" + TABLE + "/" + ROW_2 + "/" + COLUMN_2; + + StringBuilder path = new StringBuilder(); + path.append("/"); + path.append(TABLE); + path.append("/multiget/?row="); + path.append(ROW_1); + path.append("/"); + path.append(COLUMN_1); + path.append("&row="); + path.append(ROW_2); + path.append("/"); + path.append(COLUMN_1); + + client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1), extraHdr); + client.post(row_6_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_2), extraHdr); + + Response response = client.get(path.toString(), Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper( + CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + CellSetModel cellSet = mapper.readValue(response.getBody(), CellSetModel.class); + assertEquals(1, cellSet.getRows().size()); + assertEquals(ROW_1, Bytes.toString(cellSet.getRows().get(0).getKey())); + assertEquals(VALUE_1, Bytes.toString(cellSet.getRows().get(0).getCells().get(0).getValue())); + + client.delete(row_5_url, extraHdr); + client.delete(row_6_url, extraHdr); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java new file mode 100755 index 00000000..414168e2 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesInstanceResource.java @@ -0,0 +1,466 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.ws.rs.core.MediaType; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.NamespacesInstanceModel; +import org.apache.hadoop.hbase.rest.model.TableListModel; +import org.apache.hadoop.hbase.rest.model.TableModel; +import org.apache.hadoop.hbase.rest.model.TestNamespacesInstanceModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.http.Header; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestNamespacesInstanceResource { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestNamespacesInstanceResource.class); + + private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; + private static Map NAMESPACE1_PROPS = new HashMap<>(); + private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; + private static Map NAMESPACE2_PROPS = new HashMap<>(); + private static String NAMESPACE3 = "TestNamespacesInstanceResource3"; + private static Map NAMESPACE3_PROPS = new HashMap<>(); + private static String NAMESPACE4 = "TestNamespacesInstanceResource4"; + private static Map NAMESPACE4_PROPS = new HashMap<>(); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + private static Configuration conf; + private static TestNamespacesInstanceModel testNamespacesInstanceModel; + protected static ObjectMapper jsonMapper; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(conf); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + testNamespacesInstanceModel = new TestNamespacesInstanceModel(); + context = JAXBContext.newInstance(NamespacesInstanceModel.class, TableListModel.class); + jsonMapper = new JacksonJaxbJsonProvider() + .locateMapper(NamespacesInstanceModel.class, MediaType.APPLICATION_JSON_TYPE); + NAMESPACE1_PROPS.put("key1", "value1"); + NAMESPACE2_PROPS.put("key2a", "value2a"); + NAMESPACE2_PROPS.put("key2b", "value2b"); + NAMESPACE3_PROPS.put("key3", "value3"); + NAMESPACE4_PROPS.put("key4a", "value4a"); + NAMESPACE4_PROPS.put("key4b", "value4b"); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + private static byte[] toXML(NamespacesInstanceModel model) throws JAXBException { + StringWriter writer = new StringWriter(); + context.createMarshaller().marshal(model, writer); + return Bytes.toBytes(writer.toString()); + } + + @SuppressWarnings("unchecked") + private static T fromXML(byte[] content) + throws JAXBException { + return (T) context.createUnmarshaller().unmarshal(new ByteArrayInputStream(content)); + } + + private NamespaceDescriptor findNamespace(Admin admin, String namespaceName) throws IOException{ + NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); + for (NamespaceDescriptor namespaceDescriptor : nd) { + if (namespaceDescriptor.getName().equals(namespaceName)) { + return namespaceDescriptor; + } + } + return null; + } + + private void checkNamespaceProperties(NamespaceDescriptor nd, Map testProps){ + checkNamespaceProperties(nd.getConfiguration(), testProps); + } + + private void checkNamespaceProperties(Map namespaceProps, + Map testProps){ + assertTrue(namespaceProps.size() == testProps.size()); + for (String key: testProps.keySet()) { + assertEquals(testProps.get(key), namespaceProps.get(key)); + } + } + + private void checkNamespaceTables(List namespaceTables, List testTables){ + assertEquals(namespaceTables.size(), testTables.size()); + for (TableModel namespaceTable : namespaceTables) { + String tableName = namespaceTable.getName(); + assertTrue(testTables.contains(tableName)); + } + } + + @Test + public void testCannotDeleteDefaultAndHbaseNamespaces() throws IOException { + String defaultPath = "/namespaces/default"; + String hbasePath = "/namespaces/hbase"; + Response response; + + // Check that doesn't exist via non-REST call. + Admin admin = TEST_UTIL.getAdmin(); + assertNotNull(findNamespace(admin, "default")); + assertNotNull(findNamespace(admin, "hbase")); + + // Try (but fail) to delete namespaces via REST. + response = client.delete(defaultPath); + assertEquals(503, response.getCode()); + response = client.delete(hbasePath); + assertEquals(503, response.getCode()); + + assertNotNull(findNamespace(admin, "default")); + assertNotNull(findNamespace(admin, "hbase")); + } + + @Test + public void testGetNamespaceTablesAndCannotDeleteNamespace() throws IOException, JAXBException { + Admin admin = TEST_UTIL.getAdmin(); + String nsName = "TestNamespacesInstanceResource5"; + Response response; + + // Create namespace via admin. + NamespaceDescriptor.Builder nsBuilder = NamespaceDescriptor.create(nsName); + NamespaceDescriptor nsd = nsBuilder.build(); + nsd.setConfiguration("key1", "value1"); + admin.createNamespace(nsd); + + // Create two tables via admin. + TableName tn1 = TableName.valueOf(nsName + ":table1"); + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(tn1); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("cf1")).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + admin.createTable(tableDescriptorBuilder.build()); + TableName tn2 = TableName.valueOf(nsName + ":table2"); + tableDescriptorBuilder = TableDescriptorBuilder.newBuilder(tn2); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + admin.createTable(tableDescriptorBuilder.build()); + + Map nsProperties = new HashMap<>(); + nsProperties.put("key1", "value1"); + List nsTables = Arrays.asList("table1", "table2"); + + // Check get namespace properties as XML, JSON and Protobuf. + String namespacePath = "/namespaces/" + nsName; + response = client.get(namespacePath); + assertEquals(200, response.getCode()); + + response = client.get(namespacePath, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + NamespacesInstanceModel model = fromXML(response.getBody()); + checkNamespaceProperties(model.getProperties(), nsProperties); + + response = client.get(namespacePath, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + model = jsonMapper.readValue(response.getBody(), NamespacesInstanceModel.class); + checkNamespaceProperties(model.getProperties(), nsProperties); + + response = client.get(namespacePath, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + model.getObjectFromMessage(response.getBody()); + checkNamespaceProperties(model.getProperties(), nsProperties); + + // Check get namespace tables as XML, JSON and Protobuf. + namespacePath = "/namespaces/" + nsName + "/tables"; + response = client.get(namespacePath); + assertEquals(200, response.getCode()); + + response = client.get(namespacePath, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + TableListModel tablemodel = fromXML(response.getBody()); + checkNamespaceTables(tablemodel.getTables(), nsTables); + + response = client.get(namespacePath, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + tablemodel = jsonMapper.readValue(response.getBody(), TableListModel.class); + checkNamespaceTables(tablemodel.getTables(), nsTables); + + response = client.get(namespacePath, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + tablemodel.setTables(new ArrayList<>()); + tablemodel.getObjectFromMessage(response.getBody()); + checkNamespaceTables(tablemodel.getTables(), nsTables); + + // Check cannot delete namespace via REST because it contains tables. + response = client.delete(namespacePath); + namespacePath = "/namespaces/" + nsName; + assertEquals(503, response.getCode()); + } + + @Ignore("HBASE-19210") + @Test + public void testInvalidNamespacePostsAndPuts() throws IOException, JAXBException { + String namespacePath1 = "/namespaces/" + NAMESPACE1; + String namespacePath2 = "/namespaces/" + NAMESPACE2; + String namespacePath3 = "/namespaces/" + NAMESPACE3; + NamespacesInstanceModel model1; + NamespacesInstanceModel model2; + NamespacesInstanceModel model3; + Response response; + + // Check that namespaces don't exist via non-REST call. + Admin admin = TEST_UTIL.getAdmin(); + assertNull(findNamespace(admin, NAMESPACE1)); + assertNull(findNamespace(admin, NAMESPACE2)); + assertNull(findNamespace(admin, NAMESPACE3)); + + model1 = testNamespacesInstanceModel.buildTestModel(NAMESPACE1, NAMESPACE1_PROPS); + testNamespacesInstanceModel.checkModel(model1, NAMESPACE1, NAMESPACE1_PROPS); + model2 = testNamespacesInstanceModel.buildTestModel(NAMESPACE2, NAMESPACE2_PROPS); + testNamespacesInstanceModel.checkModel(model2, NAMESPACE2, NAMESPACE2_PROPS); + model3 = testNamespacesInstanceModel.buildTestModel(NAMESPACE3, NAMESPACE3_PROPS); + testNamespacesInstanceModel.checkModel(model3, NAMESPACE3, NAMESPACE3_PROPS); + + // Try REST post and puts with invalid content. + response = client.post(namespacePath1, Constants.MIMETYPE_JSON, toXML(model1)); + assertEquals(500, response.getCode()); + String jsonString = jsonMapper.writeValueAsString(model2); + response = client.put(namespacePath2, Constants.MIMETYPE_XML, Bytes.toBytes(jsonString)); + assertEquals(400, response.getCode()); + response = client.post(namespacePath3, Constants.MIMETYPE_PROTOBUF, toXML(model3)); + assertEquals(500, response.getCode()); + + NamespaceDescriptor nd1 = findNamespace(admin, NAMESPACE1); + NamespaceDescriptor nd2 = findNamespace(admin, NAMESPACE2); + NamespaceDescriptor nd3 = findNamespace(admin, NAMESPACE3); + assertNull(nd1); + assertNull(nd2); + assertNull(nd3); + } + + @Test + public void testNamespaceCreateAndDeleteXMLAndJSON() throws IOException, JAXBException { + String namespacePath1 = "/namespaces/" + NAMESPACE1; + String namespacePath2 = "/namespaces/" + NAMESPACE2; + NamespacesInstanceModel model1; + NamespacesInstanceModel model2; + Response response; + + // Check that namespaces don't exist via non-REST call. + Admin admin = TEST_UTIL.getAdmin(); + assertNull(findNamespace(admin, NAMESPACE1)); + assertNull(findNamespace(admin, NAMESPACE2)); + + model1 = testNamespacesInstanceModel.buildTestModel(NAMESPACE1, NAMESPACE1_PROPS); + testNamespacesInstanceModel.checkModel(model1, NAMESPACE1, NAMESPACE1_PROPS); + model2 = testNamespacesInstanceModel.buildTestModel(NAMESPACE2, NAMESPACE2_PROPS); + testNamespacesInstanceModel.checkModel(model2, NAMESPACE2, NAMESPACE2_PROPS); + + // Test cannot PUT (alter) non-existent namespace. + response = client.put(namespacePath1, Constants.MIMETYPE_XML, toXML(model1)); + assertEquals(403, response.getCode()); + String jsonString = jsonMapper.writeValueAsString(model2); + response = client.put(namespacePath2, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); + assertEquals(403, response.getCode()); + + // Test cannot create tables when in read only mode. + conf.set("hbase.rest.readonly", "true"); + response = client.post(namespacePath1, Constants.MIMETYPE_XML, toXML(model1)); + assertEquals(403, response.getCode()); + jsonString = jsonMapper.writeValueAsString(model2); + response = client.post(namespacePath2, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); + assertEquals(403, response.getCode()); + NamespaceDescriptor nd1 = findNamespace(admin, NAMESPACE1); + NamespaceDescriptor nd2 = findNamespace(admin, NAMESPACE2); + assertNull(nd1); + assertNull(nd2); + conf.set("hbase.rest.readonly", "false"); + + // Create namespace via XML and JSON. + response = client.post(namespacePath1, Constants.MIMETYPE_XML, toXML(model1)); + assertEquals(201, response.getCode()); + jsonString = jsonMapper.writeValueAsString(model2); + response = client.post(namespacePath2, Constants.MIMETYPE_JSON, Bytes.toBytes(jsonString)); + assertEquals(201, response.getCode()); + //check passing null content-type with a payload returns 415 + Header[] nullHeaders = null; + response = client.post(namespacePath1, nullHeaders, toXML(model1)); + assertEquals(415, response.getCode()); + response = client.post(namespacePath1, nullHeaders, Bytes.toBytes(jsonString)); + assertEquals(415, response.getCode()); + + // Check that created namespaces correctly. + nd1 = findNamespace(admin, NAMESPACE1); + nd2 = findNamespace(admin, NAMESPACE2); + assertNotNull(nd1); + assertNotNull(nd2); + checkNamespaceProperties(nd1, NAMESPACE1_PROPS); + checkNamespaceProperties(nd1, NAMESPACE1_PROPS); + + // Test cannot delete tables when in read only mode. + conf.set("hbase.rest.readonly", "true"); + response = client.delete(namespacePath1); + assertEquals(403, response.getCode()); + response = client.delete(namespacePath2); + assertEquals(403, response.getCode()); + nd1 = findNamespace(admin, NAMESPACE1); + nd2 = findNamespace(admin, NAMESPACE2); + assertNotNull(nd1); + assertNotNull(nd2); + conf.set("hbase.rest.readonly", "false"); + + // Delete namespaces via XML and JSON. + response = client.delete(namespacePath1); + assertEquals(200, response.getCode()); + response = client.delete(namespacePath2); + assertEquals(200, response.getCode()); + nd1 = findNamespace(admin, NAMESPACE1); + nd2 = findNamespace(admin, NAMESPACE2); + assertNull(nd1); + assertNull(nd2); + } + + @Test + public void testNamespaceCreateAndDeletePBAndNoBody() throws IOException { + String namespacePath3 = "/namespaces/" + NAMESPACE3; + String namespacePath4 = "/namespaces/" + NAMESPACE4; + NamespacesInstanceModel model3; + NamespacesInstanceModel model4; + Response response; + + // Check that namespaces don't exist via non-REST call. + Admin admin = TEST_UTIL.getAdmin(); + assertNull(findNamespace(admin, NAMESPACE3)); + assertNull(findNamespace(admin, NAMESPACE4)); + + model3 = testNamespacesInstanceModel.buildTestModel(NAMESPACE3, NAMESPACE3_PROPS); + testNamespacesInstanceModel.checkModel(model3, NAMESPACE3, NAMESPACE3_PROPS); + model4 = testNamespacesInstanceModel.buildTestModel(NAMESPACE4, NAMESPACE4_PROPS); + testNamespacesInstanceModel.checkModel(model4, NAMESPACE4, NAMESPACE4_PROPS); + + //Defines null headers for use in tests where no body content is provided, so that we set + // no content-type in the request + Header[] nullHeaders = null; + + // Test cannot PUT (alter) non-existent namespace. + response = client.put(namespacePath3, nullHeaders, new byte[]{}); + assertEquals(403, response.getCode()); + response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, + model4.createProtobufOutput()); + assertEquals(403, response.getCode()); + + // Test cannot create tables when in read only mode. + conf.set("hbase.rest.readonly", "true"); + response = client.post(namespacePath3, nullHeaders, new byte[]{}); + assertEquals(403, response.getCode()); + response = client.put(namespacePath4, Constants.MIMETYPE_PROTOBUF, + model4.createProtobufOutput()); + assertEquals(403, response.getCode()); + NamespaceDescriptor nd3 = findNamespace(admin, NAMESPACE3); + NamespaceDescriptor nd4 = findNamespace(admin, NAMESPACE4); + assertNull(nd3); + assertNull(nd4); + conf.set("hbase.rest.readonly", "false"); + + // Create namespace with no body and binary content type. + response = client.post(namespacePath3, nullHeaders, new byte[]{}); + assertEquals(201, response.getCode()); + // Create namespace with protobuf content-type. + response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, + model4.createProtobufOutput()); + assertEquals(201, response.getCode()); + //check setting unsupported content-type returns 415 + response = client.post(namespacePath3, Constants.MIMETYPE_BINARY, new byte[]{}); + assertEquals(415, response.getCode()); + + // Check that created namespaces correctly. + nd3 = findNamespace(admin, NAMESPACE3); + nd4 = findNamespace(admin, NAMESPACE4); + assertNotNull(nd3); + assertNotNull(nd4); + checkNamespaceProperties(nd3, new HashMap<>()); + checkNamespaceProperties(nd4, NAMESPACE4_PROPS); + + // Check cannot post tables that already exist. + response = client.post(namespacePath3, nullHeaders, new byte[]{}); + assertEquals(403, response.getCode()); + response = client.post(namespacePath4, Constants.MIMETYPE_PROTOBUF, + model4.createProtobufOutput()); + assertEquals(403, response.getCode()); + + // Check cannot post tables when in read only mode. + conf.set("hbase.rest.readonly", "true"); + response = client.delete(namespacePath3); + assertEquals(403, response.getCode()); + response = client.delete(namespacePath4); + assertEquals(403, response.getCode()); + nd3 = findNamespace(admin, NAMESPACE3); + nd4 = findNamespace(admin, NAMESPACE4); + assertNotNull(nd3); + assertNotNull(nd4); + conf.set("hbase.rest.readonly", "false"); + + // Delete namespaces via XML and JSON. + response = client.delete(namespacePath3); + assertEquals(200, response.getCode()); + response = client.delete(namespacePath4); + assertEquals(200, response.getCode()); + nd3 = findNamespace(admin, NAMESPACE3); + nd4 = findNamespace(admin, NAMESPACE4); + assertNull(nd3); + assertNull(nd4); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java new file mode 100755 index 00000000..3d0bfc32 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestNamespacesResource.java @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.NamespacesModel; +import org.apache.hadoop.hbase.rest.model.TestNamespacesModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestNamespacesResource { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestNamespacesResource.class); + + private static String NAMESPACE1 = "TestNamespacesInstanceResource1"; + private static String NAMESPACE2 = "TestNamespacesInstanceResource2"; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + private static Configuration conf; + private static TestNamespacesModel testNamespacesModel; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(conf); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + testNamespacesModel = new TestNamespacesModel(); + context = JAXBContext.newInstance(NamespacesModel.class); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + private static NamespacesModel fromXML(byte[] content) throws JAXBException { + return (NamespacesModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(content)); + } + + private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException { + NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); + for (NamespaceDescriptor namespaceDescriptor : nd) { + if (namespaceDescriptor.getName().equals(namespaceName)) { + return true; + } + } + return false; + } + + private void createNamespaceViaAdmin(Admin admin, String name) throws IOException { + NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(name); + NamespaceDescriptor nsd = builder.build(); + admin.createNamespace(nsd); + } + + @Test + public void testNamespaceListXMLandJSON() throws IOException, JAXBException { + String namespacePath = "/namespaces/"; + NamespacesModel model; + Response response; + + // Check that namespace does not yet exist via non-REST call. + Admin admin = TEST_UTIL.getAdmin(); + assertFalse(doesNamespaceExist(admin, NAMESPACE1)); + model = testNamespacesModel.buildTestModel(); + testNamespacesModel.checkModel(model); + + // Check that REST GET finds only default namespaces via XML and JSON responses. + response = client.get(namespacePath, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + model = fromXML(response.getBody()); + testNamespacesModel.checkModel(model, "hbase", "default"); + response = client.get(namespacePath, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + model = testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); + testNamespacesModel.checkModel(model, "hbase", "default"); + + // Create namespace and check that REST GET finds one additional namespace. + createNamespaceViaAdmin(admin, NAMESPACE1); + response = client.get(namespacePath, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + model = fromXML(response.getBody()); + testNamespacesModel.checkModel(model, NAMESPACE1, "hbase", "default"); + response = client.get(namespacePath, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + model = testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); + testNamespacesModel.checkModel(model, NAMESPACE1, "hbase", "default"); + + // Create another namespace and check that REST GET finds one additional namespace. + createNamespaceViaAdmin(admin, NAMESPACE2); + response = client.get(namespacePath, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + model = fromXML(response.getBody()); + testNamespacesModel.checkModel(model, NAMESPACE1, NAMESPACE2, "hbase", "default"); + response = client.get(namespacePath, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + model = testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); + testNamespacesModel.checkModel(model, NAMESPACE1, NAMESPACE2, "hbase", "default"); + + // Delete namespace and check that REST still finds correct namespaces. + admin.deleteNamespace(NAMESPACE1); + response = client.get(namespacePath, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + model = fromXML(response.getBody()); + testNamespacesModel.checkModel(model, NAMESPACE2, "hbase", "default"); + response = client.get(namespacePath, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + model = testNamespacesModel.fromJSON(Bytes.toString(response.getBody())); + testNamespacesModel.checkModel(model, NAMESPACE2, "hbase", "default"); + + admin.deleteNamespace(NAMESPACE2); + } + + @Test + public void testNamespaceListPBandDefault() throws IOException { + String schemaPath = "/namespaces/"; + NamespacesModel model; + Response response; + + // Check that namespace does not yet exist via non-REST call. + Admin admin = TEST_UTIL.getAdmin(); + assertFalse(doesNamespaceExist(admin, NAMESPACE1)); + model = testNamespacesModel.buildTestModel(); + testNamespacesModel.checkModel(model); + + // Check that REST GET finds only default namespaces via PB and default Accept header. + response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + model.getObjectFromMessage(response.getBody()); + testNamespacesModel.checkModel(model, "hbase", "default"); + response = client.get(schemaPath); + assertEquals(200, response.getCode()); + + // Create namespace and check that REST GET finds one additional namespace. + createNamespaceViaAdmin(admin, NAMESPACE1); + response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + model.getObjectFromMessage(response.getBody()); + testNamespacesModel.checkModel(model, NAMESPACE1, "hbase", "default"); + response = client.get(schemaPath); + assertEquals(200, response.getCode()); + + // Create another namespace and check that REST GET finds one additional namespace. + createNamespaceViaAdmin(admin, NAMESPACE2); + response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + model.getObjectFromMessage(response.getBody()); + testNamespacesModel.checkModel(model, NAMESPACE1, NAMESPACE2, "hbase", "default"); + response = client.get(schemaPath); + assertEquals(200, response.getCode()); + + // Delete namespace and check that REST GET still finds correct namespaces. + admin.deleteNamespace(NAMESPACE1); + response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + model.getObjectFromMessage(response.getBody()); + testNamespacesModel.checkModel(model, NAMESPACE2, "hbase", "default"); + response = client.get(schemaPath); + assertEquals(200, response.getCode()); + + admin.deleteNamespace(NAMESPACE2); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java new file mode 100755 index 00000000..ba0390d4 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestResourceFilter.java @@ -0,0 +1,68 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestResourceFilter { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestResourceFilter.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().set(Constants.FILTER_CLASSES, DummyFilter.class.getName()); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testFilter() throws Exception { + String path = "/status/cluster"; + Response response = client.get(path); + assertEquals(404, response.getCode()); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java new file mode 100755 index 00000000..bad39f49 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java @@ -0,0 +1,408 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Random; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.http.Header; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({RestTests.class, MediumTests.class}) +public class TestScannerResource { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestScannerResource.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestScannerResource.class); + private static final TableName TABLE = TableName.valueOf("TestScannerResource"); + private static final TableName TABLE_TO_BE_DISABLED = TableName.valueOf("ScannerResourceDisable"); + private static final String NONEXISTENT_TABLE = "ThisTableDoesNotExist"; + private static final String CFA = "a"; + private static final String CFB = "b"; + private static final String COLUMN_1 = CFA + ":1"; + private static final String COLUMN_2 = CFB + ":2"; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + private static Marshaller marshaller; + private static Unmarshaller unmarshaller; + private static int expectedRows1; + private static int expectedRows2; + private static Configuration conf; + + static int insertData(Configuration conf, TableName tableName, String column, double prob) + throws IOException { + Random rng = new Random(); + byte[] k = new byte[3]; + byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); + List puts = new ArrayList<>(); + for (byte b1 = 'a'; b1 < 'z'; b1++) { + for (byte b2 = 'a'; b2 < 'z'; b2++) { + for (byte b3 = 'a'; b3 < 'z'; b3++) { + if (rng.nextDouble() < prob) { + k[0] = b1; + k[1] = b2; + k[2] = b3; + Put put = new Put(k); + put.setDurability(Durability.SKIP_WAL); + put.addColumn(famAndQf[0], famAndQf[1], k); + puts.add(put); + } + } + } + } + try (Connection conn = ConnectionFactory.createConnection(conf); + Table table = conn.getTable(tableName)) { + table.put(puts); + } + return puts.size(); + } + + static int countCellSet(CellSetModel model) { + int count = 0; + Iterator rows = model.getRows().iterator(); + while (rows.hasNext()) { + RowModel row = rows.next(); + Iterator cells = row.getCells().iterator(); + while (cells.hasNext()) { + cells.next(); + count++; + } + } + return count; + } + + private static int fullTableScan(ScannerModel model) throws IOException { + model.setBatch(100); + Response response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + assertEquals(201, response.getCode()); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + int count = 0; + while (true) { + response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF); + assertTrue(response.getCode() == 200 || response.getCode() == 204); + if (response.getCode() == 200) { + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + CellSetModel cellSet = new CellSetModel(); + cellSet.getObjectFromMessage(response.getBody()); + Iterator rows = cellSet.getRows().iterator(); + while (rows.hasNext()) { + RowModel row = rows.next(); + Iterator cells = row.getCells().iterator(); + while (cells.hasNext()) { + cells.next(); + count++; + } + } + } else { + break; + } + } + // delete the scanner + response = client.delete(scannerURI); + assertEquals(200, response.getCode()); + return count; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(conf); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance( + CellModel.class, + CellSetModel.class, + RowModel.class, + ScannerModel.class); + marshaller = context.createMarshaller(); + unmarshaller = context.createUnmarshaller(); + Admin admin = TEST_UTIL.getAdmin(); + if (admin.tableExists(TABLE)) { + return; + } + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TABLE); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + + admin.createTable(tableDescriptorBuilder.build()); + expectedRows1 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_1, 1.0); + expectedRows2 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_2, 0.5); + + tableDescriptorBuilder=TableDescriptorBuilder.newBuilder(TABLE_TO_BE_DISABLED); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + + admin.createTable(tableDescriptorBuilder.build()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testSimpleScannerXML() throws IOException, JAXBException { + final int BATCH_SIZE = 5; + // new scanner + ScannerModel model = new ScannerModel(); + model.setBatch(BATCH_SIZE); + model.addColumn(Bytes.toBytes(COLUMN_1)); + StringWriter writer = new StringWriter(); + marshaller.marshal(model, writer); + byte[] body = Bytes.toBytes(writer.toString()); + + // test put operation is forbidden in read-only mode + conf.set("hbase.rest.readonly", "true"); + Response response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_XML, body); + assertEquals(403, response.getCode()); + String scannerURI = response.getLocation(); + assertNull(scannerURI); + + // recall previous put operation with read-only off + conf.set("hbase.rest.readonly", "false"); + response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, + body); + assertEquals(201, response.getCode()); + scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell set + response = client.get(scannerURI, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + // confirm batch size conformance + assertEquals(BATCH_SIZE, countCellSet(cellSet)); + + // test delete scanner operation is forbidden in read-only mode + conf.set("hbase.rest.readonly", "true"); + response = client.delete(scannerURI); + assertEquals(403, response.getCode()); + + // recall previous delete scanner operation with read-only off + conf.set("hbase.rest.readonly", "false"); + response = client.delete(scannerURI); + assertEquals(200, response.getCode()); + } + + @Test + public void testSimpleScannerPB() throws IOException { + final int BATCH_SIZE = 10; + // new scanner + ScannerModel model = new ScannerModel(); + model.setBatch(BATCH_SIZE); + model.addColumn(Bytes.toBytes(COLUMN_1)); + + // test put operation is forbidden in read-only mode + conf.set("hbase.rest.readonly", "true"); + Response response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + assertEquals(403, response.getCode()); + String scannerURI = response.getLocation(); + assertNull(scannerURI); + + // recall previous put operation with read-only off + conf.set("hbase.rest.readonly", "false"); + response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + assertEquals(201, response.getCode()); + scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell set + response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + CellSetModel cellSet = new CellSetModel(); + cellSet.getObjectFromMessage(response.getBody()); + // confirm batch size conformance + assertEquals(BATCH_SIZE, countCellSet(cellSet)); + + // test delete scanner operation is forbidden in read-only mode + conf.set("hbase.rest.readonly", "true"); + response = client.delete(scannerURI); + assertEquals(403, response.getCode()); + + // recall previous delete scanner operation with read-only off + conf.set("hbase.rest.readonly", "false"); + response = client.delete(scannerURI); + assertEquals(200, response.getCode()); + } + + @Test + public void testSimpleScannerBinary() throws IOException { + // new scanner + ScannerModel model = new ScannerModel(); + model.setBatch(1); + model.addColumn(Bytes.toBytes(COLUMN_1)); + + // test put operation is forbidden in read-only mode + conf.set("hbase.rest.readonly", "true"); + Response response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + assertEquals(403, response.getCode()); + String scannerURI = response.getLocation(); + assertNull(scannerURI); + + // recall previous put operation with read-only off + conf.set("hbase.rest.readonly", "false"); + response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + assertEquals(201, response.getCode()); + scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell + response = client.get(scannerURI, Constants.MIMETYPE_BINARY); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_BINARY, response.getHeader("content-type")); + // verify that data was returned + assertTrue(response.getBody().length > 0); + // verify that the expected X-headers are present + boolean foundRowHeader = false, foundColumnHeader = false, + foundTimestampHeader = false; + for (Header header: response.getHeaders()) { + if (header.getName().equals("X-Row")) { + foundRowHeader = true; + } else if (header.getName().equals("X-Column")) { + foundColumnHeader = true; + } else if (header.getName().equals("X-Timestamp")) { + foundTimestampHeader = true; + } + } + assertTrue(foundRowHeader); + assertTrue(foundColumnHeader); + assertTrue(foundTimestampHeader); + + // test delete scanner operation is forbidden in read-only mode + conf.set("hbase.rest.readonly", "true"); + response = client.delete(scannerURI); + assertEquals(403, response.getCode()); + + // recall previous delete scanner operation with read-only off + conf.set("hbase.rest.readonly", "false"); + response = client.delete(scannerURI); + assertEquals(200, response.getCode()); + } + + @Test + public void testFullTableScan() throws IOException { + ScannerModel model = new ScannerModel(); + model.addColumn(Bytes.toBytes(COLUMN_1)); + assertEquals(expectedRows1, fullTableScan(model)); + + model = new ScannerModel(); + model.addColumn(Bytes.toBytes(COLUMN_2)); + assertEquals(expectedRows2, fullTableScan(model)); + } + + @Test + public void testTableDoesNotExist() throws IOException, JAXBException { + ScannerModel model = new ScannerModel(); + StringWriter writer = new StringWriter(); + marshaller.marshal(model, writer); + byte[] body = Bytes.toBytes(writer.toString()); + Response response = client.put("/" + NONEXISTENT_TABLE + + "/scanner", Constants.MIMETYPE_XML, body); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + response = client.get(scannerURI, Constants.MIMETYPE_XML); + assertEquals(404, response.getCode()); + } + + @Test + public void testTableScanWithTableDisable() throws IOException { + TEST_UTIL.getAdmin().disableTable(TABLE_TO_BE_DISABLED); + ScannerModel model = new ScannerModel(); + model.addColumn(Bytes.toBytes(COLUMN_1)); + model.setCaching(1); + Response response = client.put("/" + TABLE_TO_BE_DISABLED + "/scanner", + Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + // we will see the exception when we actually want to get the result. + assertEquals(201, response.getCode()); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + response = client.get(scannerURI, Constants.MIMETYPE_PROTOBUF); + assertEquals(410, response.getCode()); + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java new file mode 100755 index 00000000..643f2c55 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -0,0 +1,1018 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.FilterList.Operator; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.InclusiveStopFilter; +import org.apache.hadoop.hbase.filter.PageFilter; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.filter.QualifierFilter; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.filter.SkipFilter; +import org.apache.hadoop.hbase.filter.SubstringComparator; +import org.apache.hadoop.hbase.filter.ValueFilter; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.xml.bind.JAXBContext; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; +import java.io.ByteArrayInputStream; +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +@Category({RestTests.class, MediumTests.class}) +public class TestScannersWithFilters { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestScannersWithFilters.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestScannersWithFilters.class); + + private static final TableName TABLE = TableName.valueOf("TestScannersWithFilters"); + + private static final byte[][] ROWS_ONE = { + Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"), + Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3") + }; + + private static final byte[][] ROWS_TWO = { + Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"), + Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3") + }; + + private static final byte[][] FAMILIES = { + Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo") + }; + + private static final byte[][] QUALIFIERS_ONE = { + Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"), + Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3") + }; + + private static final byte[][] QUALIFIERS_TWO = { + Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"), + Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3") + }; + + private static final byte[][] VALUES = { + Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo") + }; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + private static Marshaller marshaller; + private static Unmarshaller unmarshaller; + private static long numRows = (long) ROWS_ONE.length + ROWS_TWO.length; + private static long colsPerRow = (long) FAMILIES.length * QUALIFIERS_ONE.length; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + context = JAXBContext.newInstance( + CellModel.class, + CellSetModel.class, + RowModel.class, + ScannerModel.class); + marshaller = context.createMarshaller(); + unmarshaller = context.createUnmarshaller(); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + Admin admin = TEST_UTIL.getAdmin(); + if (!admin.tableExists(TABLE)) { + TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = + new TableDescriptorBuilder.ModifyableTableDescriptor(TABLE); + tableDescriptor.setColumnFamily( + new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILIES[0])); + tableDescriptor.setColumnFamily( + new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(FAMILIES[1])); + admin.createTable(tableDescriptor); + Table table = TEST_UTIL.getConnection().getTable(TABLE); + // Insert first half + for (byte[] ROW : ROWS_ONE) { + Put p = new Put(ROW); + p.setDurability(Durability.SKIP_WAL); + for (byte[] QUALIFIER : QUALIFIERS_ONE) { + p.addColumn(FAMILIES[0], QUALIFIER, VALUES[0]); + } + table.put(p); + } + for (byte[] ROW : ROWS_TWO) { + Put p = new Put(ROW); + p.setDurability(Durability.SKIP_WAL); + for (byte[] QUALIFIER : QUALIFIERS_TWO) { + p.addColumn(FAMILIES[1], QUALIFIER, VALUES[1]); + } + table.put(p); + } + + // Insert second half (reverse families) + for (byte[] ROW : ROWS_ONE) { + Put p = new Put(ROW); + p.setDurability(Durability.SKIP_WAL); + for (byte[] QUALIFIER : QUALIFIERS_ONE) { + p.addColumn(FAMILIES[1], QUALIFIER, VALUES[0]); + } + table.put(p); + } + for (byte[] ROW : ROWS_TWO) { + Put p = new Put(ROW); + p.setDurability(Durability.SKIP_WAL); + for (byte[] QUALIFIER : QUALIFIERS_TWO) { + p.addColumn(FAMILIES[0], QUALIFIER, VALUES[1]); + } + table.put(p); + } + + // Delete the second qualifier from all rows and families + for (byte[] ROW : ROWS_ONE) { + Delete d = new Delete(ROW); + d.addColumns(FAMILIES[0], QUALIFIERS_ONE[1]); + d.addColumns(FAMILIES[1], QUALIFIERS_ONE[1]); + table.delete(d); + } + for (byte[] ROW : ROWS_TWO) { + Delete d = new Delete(ROW); + d.addColumns(FAMILIES[0], QUALIFIERS_TWO[1]); + d.addColumns(FAMILIES[1], QUALIFIERS_TWO[1]); + table.delete(d); + } + colsPerRow -= 2; + + // Delete the second rows from both groups, one column at a time + for (byte[] QUALIFIER : QUALIFIERS_ONE) { + Delete d = new Delete(ROWS_ONE[1]); + d.addColumns(FAMILIES[0], QUALIFIER); + d.addColumns(FAMILIES[1], QUALIFIER); + table.delete(d); + } + for (byte[] QUALIFIER : QUALIFIERS_TWO) { + Delete d = new Delete(ROWS_TWO[1]); + d.addColumns(FAMILIES[0], QUALIFIER); + d.addColumns(FAMILIES[1], QUALIFIER); + table.delete(d); + } + numRows -= 2; + table.close(); + } + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + private static void verifyScan(Scan s, long expectedRows, long expectedKeys) + throws Exception { + ScannerModel model = ScannerModel.fromScan(s); + model.setBatch(Integer.MAX_VALUE); // fetch it all at once + StringWriter writer = new StringWriter(); + marshaller.marshal(model, writer); + LOG.debug(writer.toString()); + byte[] body = Bytes.toBytes(writer.toString()); + Response response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_XML, body); + assertEquals(201, response.getCode()); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell set + response = client.get(scannerURI, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cells = (CellSetModel) + unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + + int rows = cells.getRows().size(); + assertEquals("Scanned too many rows! Only expected " + expectedRows + + " total but scanned " + rows, expectedRows, rows); + for (RowModel row : cells.getRows()) { + int count = row.getCells().size(); + assertEquals("Expected " + expectedKeys + " keys per row but " + + "returned " + count, expectedKeys, count); + } + + // delete the scanner + response = client.delete(scannerURI); + assertEquals(200, response.getCode()); + } + + private static void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception { + ScannerModel model = ScannerModel.fromScan(s); + model.setBatch(Integer.MAX_VALUE); // fetch it all at once + StringWriter writer = new StringWriter(); + marshaller.marshal(model, writer); + LOG.debug(writer.toString()); + byte[] body = Bytes.toBytes(writer.toString()); + Response response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_XML, body); + assertEquals(201, response.getCode()); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell set + response = client.get(scannerURI, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + + // delete the scanner + response = client.delete(scannerURI); + assertEquals(200, response.getCode()); + + int row = 0; + int idx = 0; + Iterator i = cellSet.getRows().iterator(); + for (boolean done = true; done; row++) { + done = i.hasNext(); + if (!done) { + break; + } + + RowModel rowModel = i.next(); + List cells = rowModel.getCells(); + if (cells.isEmpty()) { + break; + } + + assertTrue("Scanned too many keys! Only expected " + kvs.length + + " total but already scanned " + (cells.size() + idx), + kvs.length >= idx + cells.size()); + for (CellModel cell: cells) { + assertTrue("Row mismatch", + Bytes.equals(rowModel.getKey(), CellUtil.cloneRow(kvs[idx]))); + byte[][] split = CellUtil.parseColumn(cell.getColumn()); + assertTrue("Family mismatch", + Bytes.equals(split[0], CellUtil.cloneFamily(kvs[idx]))); + assertTrue("Qualifier mismatch", + Bytes.equals(split[1], CellUtil.cloneQualifier(kvs[idx]))); + assertTrue("Value mismatch", + Bytes.equals(cell.getValue(), CellUtil.cloneValue(kvs[idx]))); + idx++; + } + } + assertEquals("Expected " + kvs.length + " total keys but scanned " + idx, + kvs.length, idx); + } + + private static void verifyScanNoEarlyOut(Scan s, long expectedRows, + long expectedKeys) throws Exception { + ScannerModel model = ScannerModel.fromScan(s); + model.setBatch(Integer.MAX_VALUE); // fetch it all at once + StringWriter writer = new StringWriter(); + marshaller.marshal(model, writer); + LOG.debug(writer.toString()); + byte[] body = Bytes.toBytes(writer.toString()); + Response response = client.put("/" + TABLE + "/scanner", + Constants.MIMETYPE_XML, body); + assertEquals(201, response.getCode()); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell set + response = client.get(scannerURI, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) + unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody())); + + // delete the scanner + response = client.delete(scannerURI); + assertEquals(200, response.getCode()); + + Iterator i = cellSet.getRows().iterator(); + int j = 0; + for (boolean done = true; done; j++) { + done = i.hasNext(); + if (!done) { + break; + } + + RowModel rowModel = i.next(); + List cells = rowModel.getCells(); + if (cells.isEmpty()) { + break; + } + + assertTrue("Scanned too many rows! Only expected " + expectedRows + + " total but already scanned " + (j+1), expectedRows > j); + assertEquals("Expected " + expectedKeys + " keys per row but " + + "returned " + cells.size(), expectedKeys, cells.size()); + } + assertEquals("Expected " + expectedRows + " rows but scanned " + j + + " rows", expectedRows, j); + } + + @Test + public void testNoFilter() throws Exception { + // No filter + long expectedRows = numRows; + long expectedKeys = colsPerRow; + + // Both families + Scan s = new Scan(); + verifyScan(s, expectedRows, expectedKeys); + + // One family + s = new Scan(); + s.addFamily(FAMILIES[0]); + verifyScan(s, expectedRows, expectedKeys/2); + } + + @Test + public void testPrefixFilter() throws Exception { + // Grab rows from group one (half of total) + long expectedRows = numRows / 2; + long expectedKeys = colsPerRow; + Scan s = new Scan(); + s.setFilter(new PrefixFilter(Bytes.toBytes("testRowOne"))); + verifyScan(s, expectedRows, expectedKeys); + } + + @Test + public void testPageFilter() throws Exception { + // KVs in first 6 rows + KeyValue [] expectedKVs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) + }; + + // Grab all 6 rows + long expectedRows = 6; + long expectedKeys = colsPerRow; + Scan s = new Scan(); + s.setFilter(new PageFilter(expectedRows)); + verifyScan(s, expectedRows, expectedKeys); + s.setFilter(new PageFilter(expectedRows)); + verifyScanFull(s, expectedKVs); + + // Grab first 4 rows (6 cols per row) + expectedRows = 4; + expectedKeys = colsPerRow; + s = new Scan(); + s.setFilter(new PageFilter(expectedRows)); + verifyScan(s, expectedRows, expectedKeys); + s.setFilter(new PageFilter(expectedRows)); + verifyScanFull(s, Arrays.copyOf(expectedKVs, 24)); + + // Grab first 2 rows + expectedRows = 2; + expectedKeys = colsPerRow; + s = new Scan(); + s.setFilter(new PageFilter(expectedRows)); + verifyScan(s, expectedRows, expectedKeys); + s.setFilter(new PageFilter(expectedRows)); + verifyScanFull(s, Arrays.copyOf(expectedKVs, 12)); + + // Grab first row + expectedRows = 1; + expectedKeys = colsPerRow; + s = new Scan(); + s.setFilter(new PageFilter(expectedRows)); + verifyScan(s, expectedRows, expectedKeys); + s.setFilter(new PageFilter(expectedRows)); + verifyScanFull(s, Arrays.copyOf(expectedKVs, 6)); + } + + @Test + public void testInclusiveStopFilter() throws Exception { + // Grab rows from group one + + // If we just use start/stop row, we get total/2 - 1 rows + long expectedRows = (numRows / 2) - 1; + long expectedKeys = colsPerRow; + Scan s = new Scan().withStartRow(Bytes.toBytes("testRowOne-0")) + .withStopRow(Bytes.toBytes("testRowOne-3")); + verifyScan(s, expectedRows, expectedKeys); + + // Now use start row with inclusive stop filter + expectedRows = numRows / 2; + s = new Scan().withStartRow(Bytes.toBytes("testRowOne-0")); + s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowOne-3"))); + verifyScan(s, expectedRows, expectedKeys); + + // Grab rows from group two + + // If we just use start/stop row, we get total/2 - 1 rows + expectedRows = (numRows / 2) - 1; + expectedKeys = colsPerRow; + s = new Scan().withStartRow(Bytes.toBytes("testRowTwo-0")) + .withStopRow(Bytes.toBytes("testRowTwo-3")); + verifyScan(s, expectedRows, expectedKeys); + + // Now use start row with inclusive stop filter + expectedRows = numRows / 2; + s = new Scan().withStartRow(Bytes.toBytes("testRowTwo-0")); + s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowTwo-3"))); + verifyScan(s, expectedRows, expectedKeys); + } + + @Test + public void testQualifierFilter() throws Exception { + // Match two keys (one from each family) in half the rows + long expectedRows = numRows / 2; + long expectedKeys = 2; + Filter f = new QualifierFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + Scan s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys less than same qualifier + // Expect only two keys (one from each family) in half the rows + expectedRows = numRows / 2; + expectedKeys = 2; + f = new QualifierFilter(CompareOperator.LESS, + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys less than or equal. Expect four keys (two from each family) in half the rows + expectedRows = numRows / 2; + expectedKeys = 4; + f = new QualifierFilter(CompareOperator.LESS_OR_EQUAL, + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys not equal. Expect four keys (two from each family) + // Only look in first group of rows + expectedRows = numRows / 2; + expectedKeys = 4; + f = new QualifierFilter(CompareOperator.NOT_EQUAL, + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) + .withStopRow(Bytes.toBytes("testRowTwo")); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys greater or equal. Expect four keys (two from each family) + // Only look in first group of rows + expectedRows = numRows / 2; + expectedKeys = 4; + f = new QualifierFilter(CompareOperator.GREATER_OR_EQUAL, + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) + .withStopRow(Bytes.toBytes("testRowTwo")); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys greater. Expect two keys (one from each family) + // Only look in first group of rows + expectedRows = numRows / 2; + expectedKeys = 2; + f = new QualifierFilter(CompareOperator.GREATER, + new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))); + s = new Scan().withStartRow(HConstants.EMPTY_START_ROW) + .withStopRow(Bytes.toBytes("testRowTwo")); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys not equal to. Look across rows and fully validate the keys and ordering + // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two + f = new QualifierFilter(CompareOperator.NOT_EQUAL, + new BinaryComparator(QUALIFIERS_ONE[2])); + s = new Scan(); + s.setFilter(f); + + KeyValue [] kvs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + }; + verifyScanFull(s, kvs); + + // Test across rows and groups with a regex. Filter out "test*-2" + // Expect 4 keys per row across both groups + f = new QualifierFilter(CompareOperator.NOT_EQUAL, + new RegexStringComparator("test.+-2")); + s = new Scan(); + s.setFilter(f); + + kvs = new KeyValue [] { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + }; + verifyScanFull(s, kvs); + } + + @Test + public void testRowFilter() throws Exception { + // Match a single row, all keys + long expectedRows = 1; + long expectedKeys = colsPerRow; + Filter f = new RowFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + Scan s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match a two rows, one from each group, using regex + expectedRows = 2; + expectedKeys = colsPerRow; + f = new RowFilter(CompareOperator.EQUAL, + new RegexStringComparator("testRow.+-2")); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match rows less than + // Expect all keys in one row + expectedRows = 1; + expectedKeys = colsPerRow; + f = new RowFilter(CompareOperator.LESS, + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match rows less than or equal + // Expect all keys in two rows + expectedRows = 2; + expectedKeys = colsPerRow; + f = new RowFilter(CompareOperator.LESS_OR_EQUAL, + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match rows not equal + // Expect all keys in all but one row + expectedRows = numRows - 1; + expectedKeys = colsPerRow; + f = new RowFilter(CompareOperator.NOT_EQUAL, + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys greater or equal + // Expect all keys in all but one row + expectedRows = numRows - 1; + expectedKeys = colsPerRow; + f = new RowFilter(CompareOperator.GREATER_OR_EQUAL, + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match keys greater + // Expect all keys in all but two rows + expectedRows = numRows - 2; + expectedKeys = colsPerRow; + f = new RowFilter(CompareOperator.GREATER, + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match rows not equal to testRowTwo-2 + // Look across rows and fully validate the keys and ordering + // Should see all keys in all rows but testRowTwo-2 + f = new RowFilter(CompareOperator.NOT_EQUAL, + new BinaryComparator(Bytes.toBytes("testRowOne-2"))); + s = new Scan(); + s.setFilter(f); + + KeyValue [] kvs = { + // testRowOne-0 + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowOne-3 + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + }; + verifyScanFull(s, kvs); + + // Test across rows and groups with a regex + // Filter out everything that doesn't match "*-2" + // Expect all keys in two rows + f = new RowFilter(CompareOperator.EQUAL, + new RegexStringComparator(".+-2")); + s = new Scan(); + s.setFilter(f); + + kvs = new KeyValue [] { + // testRowOne-2 + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]) + }; + verifyScanFull(s, kvs); + } + + @Test + public void testValueFilter() throws Exception { + // Match group one rows + long expectedRows = numRows / 2; + long expectedKeys = colsPerRow; + Filter f = new ValueFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("testValueOne"))); + Scan s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match group two rows + expectedRows = numRows / 2; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.EQUAL, + new BinaryComparator(Bytes.toBytes("testValueTwo"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match all values using regex + expectedRows = numRows; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.EQUAL, + new RegexStringComparator("testValue((One)|(Two))")); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match values less than + // Expect group one rows + expectedRows = numRows / 2; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.LESS, + new BinaryComparator(Bytes.toBytes("testValueTwo"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match values less than or equal + // Expect all rows + expectedRows = numRows; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.LESS_OR_EQUAL, + new BinaryComparator(Bytes.toBytes("testValueTwo"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match values less than or equal + // Expect group one rows + expectedRows = numRows / 2; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.LESS_OR_EQUAL, + new BinaryComparator(Bytes.toBytes("testValueOne"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match values not equal + // Expect half the rows + expectedRows = numRows / 2; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.NOT_EQUAL, + new BinaryComparator(Bytes.toBytes("testValueOne"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match values greater or equal + // Expect all rows + expectedRows = numRows; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.GREATER_OR_EQUAL, + new BinaryComparator(Bytes.toBytes("testValueOne"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match values greater + // Expect half rows + expectedRows = numRows / 2; + expectedKeys = colsPerRow; + f = new ValueFilter(CompareOperator.GREATER, + new BinaryComparator(Bytes.toBytes("testValueOne"))); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, expectedRows, expectedKeys); + + // Match values not equal to testValueOne + // Look across rows and fully validate the keys and ordering + // Should see all keys in all group two rows + f = new ValueFilter(CompareOperator.NOT_EQUAL, + new BinaryComparator(Bytes.toBytes("testValueOne"))); + s = new Scan(); + s.setFilter(f); + + KeyValue [] kvs = { + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + }; + verifyScanFull(s, kvs); + } + + @Test + public void testSkipFilter() throws Exception { + // Test for qualifier regex: "testQualifierOne-2" + // Should only get rows from second group, and all keys + Filter f = new SkipFilter(new QualifierFilter(CompareOperator.NOT_EQUAL, + new BinaryComparator(Bytes.toBytes("testQualifierOne-2")))); + Scan s = new Scan(); + s.setFilter(f); + + KeyValue [] kvs = { + // testRowTwo-0 + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-2 + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + // testRowTwo-3 + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]), + }; + verifyScanFull(s, kvs); + } + + @Test + public void testFilterList() throws Exception { + // Test getting a single row, single key using Row, Qualifier, and Value + // regular expression and substring filters + // Use must pass all + List filters = new ArrayList<>(3); + filters.add(new RowFilter(CompareOperator.EQUAL, + new RegexStringComparator(".+-2"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, + new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, + new SubstringComparator("One"))); + Filter f = new FilterList(Operator.MUST_PASS_ALL, filters); + Scan s = new Scan(); + s.addFamily(FAMILIES[0]); + s.setFilter(f); + KeyValue [] kvs = { + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]) + }; + verifyScanFull(s, kvs); + + // Test getting everything with a MUST_PASS_ONE filter including row, qf, + // val, regular expression and substring filters + filters.clear(); + filters.add(new RowFilter(CompareOperator.EQUAL, + new RegexStringComparator(".+Two.+"))); + filters.add(new QualifierFilter(CompareOperator.EQUAL, + new RegexStringComparator(".+-2"))); + filters.add(new ValueFilter(CompareOperator.EQUAL, + new SubstringComparator("One"))); + f = new FilterList(Operator.MUST_PASS_ONE, filters); + s = new Scan(); + s.setFilter(f); + verifyScanNoEarlyOut(s, numRows, colsPerRow); + } + + @Test + public void testFirstKeyOnlyFilter() throws Exception { + Scan s = new Scan(); + s.setFilter(new FirstKeyOnlyFilter()); + // Expected KVs, the first KV from each of the remaining 6 rows + KeyValue [] kvs = { + new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]), + new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]), + new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]) + }; + verifyScanFull(s, kvs); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java new file mode 100755 index 00000000..42e4d1b9 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java @@ -0,0 +1,250 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.visibility.CellVisibility; +import org.apache.hadoop.hbase.security.visibility.ScanLabelGenerator; +import org.apache.hadoop.hbase.security.visibility.SimpleScanLabelGenerator; +import org.apache.hadoop.hbase.security.visibility.VisibilityClient; +import org.apache.hadoop.hbase.security.visibility.VisibilityConstants; +import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; +import org.apache.hadoop.hbase.security.visibility.VisibilityUtils; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestScannersWithLabels { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestScannersWithLabels.class); + + private static final TableName TABLE = TableName.valueOf("TestScannersWithLabels"); + private static final String CFA = "a"; + private static final String CFB = "b"; + private static final String COLUMN_1 = CFA + ":1"; + private static final String COLUMN_2 = CFB + ":2"; + private final static String TOPSECRET = "topsecret"; + private final static String PUBLIC = "public"; + private final static String PRIVATE = "private"; + private final static String CONFIDENTIAL = "confidential"; + private final static String SECRET = "secret"; + private static User SUPERUSER; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + private static Marshaller marshaller; + private static Unmarshaller unmarshaller; + private static Configuration conf; + + private static int insertData(TableName tableName, String column, double prob) + throws IOException { + byte[] k = new byte[3]; + byte[][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(column)); + + List puts = new ArrayList<>(9); + for (int i = 0; i < 9; i++) { + Put put = new Put(Bytes.toBytes("row" + i)); + put.setDurability(Durability.SKIP_WAL); + put.addColumn(famAndQf[0], famAndQf[1], k); + put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + + TOPSECRET)); + puts.add(put); + } + try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + table.put(puts); + } + return puts.size(); + } + + private static int countCellSet(CellSetModel model) { + int count = 0; + Iterator rows = model.getRows().iterator(); + while (rows.hasNext()) { + RowModel row = rows.next(); + Iterator cells = row.getCells().iterator(); + while (cells.hasNext()) { + cells.next(); + count++; + } + } + return count; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + SUPERUSER = User.createUserForTesting(conf, "admin", + new String[] { "supergroup" }); + conf = TEST_UTIL.getConfiguration(); + conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, + SimpleScanLabelGenerator.class, ScanLabelGenerator.class); + conf.set("hbase.superuser", SUPERUSER.getShortName()); + VisibilityTestUtil.enableVisiblityLabels(conf); + TEST_UTIL.startMiniCluster(1); + // Wait for the labels table to become available + TEST_UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000); + createLabels(); + setAuths(); + REST_TEST_UTIL.startServletContainer(conf); + client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance(CellModel.class, CellSetModel.class, RowModel.class, + ScannerModel.class); + marshaller = context.createMarshaller(); + unmarshaller = context.createUnmarshaller(); + Admin admin = TEST_UTIL.getAdmin(); + if (admin.tableExists(TABLE)) { + return; + } + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TABLE); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + admin.createTable(tableDescriptorBuilder.build()); + insertData(TABLE, COLUMN_1, 1.0); + insertData(TABLE, COLUMN_2, 0.5); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + private static void createLabels() throws IOException, InterruptedException { + PrivilegedExceptionAction action = () -> { + String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + } catch (Throwable t) { + throw new IOException(t); + } + return null; + }; + SUPERUSER.runAs(action); + } + + private static void setAuths() throws Exception { + String[] labels = { SECRET, CONFIDENTIAL, PRIVATE, PUBLIC, TOPSECRET }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.setAuths(conn, labels, User.getCurrent().getShortName()); + } catch (Throwable t) { + throw new IOException(t); + } + } + + @Test + public void testSimpleScannerXMLWithLabelsThatReceivesNoData() throws IOException, JAXBException { + final int BATCH_SIZE = 5; + // new scanner + ScannerModel model = new ScannerModel(); + model.setBatch(BATCH_SIZE); + model.addColumn(Bytes.toBytes(COLUMN_1)); + model.addLabel(PUBLIC); + StringWriter writer = new StringWriter(); + marshaller.marshal(model, writer); + byte[] body = Bytes.toBytes(writer.toString()); + // recall previous put operation with read-only off + conf.set("hbase.rest.readonly", "false"); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); + assertEquals(201, response.getCode()); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell set + response = client.get(scannerURI, Constants.MIMETYPE_XML); + // Respond with 204 as there are no cells to be retrieved + assertEquals(204, response.getCode()); + // With no content in the payload, the 'Content-Type' header is not echo back + } + + @Test + public void testSimpleScannerXMLWithLabelsThatReceivesData() throws IOException, JAXBException { + // new scanner + ScannerModel model = new ScannerModel(); + model.setBatch(5); + model.addColumn(Bytes.toBytes(COLUMN_1)); + model.addLabel(SECRET); + StringWriter writer = new StringWriter(); + marshaller.marshal(model, writer); + byte[] body = Bytes.toBytes(writer.toString()); + + // recall previous put operation with read-only off + conf.set("hbase.rest.readonly", "false"); + Response response = client.put("/" + TABLE + "/scanner", Constants.MIMETYPE_XML, body); + assertEquals(201, response.getCode()); + String scannerURI = response.getLocation(); + assertNotNull(scannerURI); + + // get a cell set + response = client.get(scannerURI, Constants.MIMETYPE_XML); + // Respond with 204 as there are no cells to be retrieved + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + CellSetModel cellSet = (CellSetModel) unmarshaller.unmarshal(new ByteArrayInputStream(response + .getBody())); + assertEquals(5, countCellSet(cellSet)); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java new file mode 100755 index 00000000..609ee011 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java @@ -0,0 +1,262 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.StringWriter; +import java.util.Collection; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel; +import org.apache.hadoop.hbase.rest.model.TableSchemaModel; +import org.apache.hadoop.hbase.rest.model.TestTableSchemaModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@Category({RestTests.class, MediumTests.class}) +@RunWith(Parameterized.class) +public class TestSchemaResource { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSchemaResource.class); + + private static String TABLE1 = "TestSchemaResource1"; + private static String TABLE2 = "TestSchemaResource2"; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + private static Configuration conf; + private static TestTableSchemaModel testTableSchemaModel; + private static Header extraHdr = null; + + private static boolean csrfEnabled = true; + + @Parameterized.Parameters + public static Collection parameters() { + return HBaseCommonTestingUtility.BOOLEAN_PARAMETERIZED; + } + + public TestSchemaResource(Boolean csrf) { + csrfEnabled = csrf; + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + conf.setBoolean(RESTServer.REST_CSRF_ENABLED_KEY, csrfEnabled); + if (csrfEnabled) { + conf.set(RESTServer.REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY, ".*"); + } + extraHdr = new BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, ""); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(conf); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + testTableSchemaModel = new TestTableSchemaModel(); + context = JAXBContext.newInstance( + ColumnSchemaModel.class, + TableSchemaModel.class); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @After + public void tearDown() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + + for (String table : new String[] {TABLE1, TABLE2}) { + TableName t = TableName.valueOf(table); + if (admin.tableExists(t)) { + admin.disableTable(t); + admin.deleteTable(t); + } + } + + conf.set("hbase.rest.readonly", "false"); + } + + private static byte[] toXML(TableSchemaModel model) throws JAXBException { + StringWriter writer = new StringWriter(); + context.createMarshaller().marshal(model, writer); + return Bytes.toBytes(writer.toString()); + } + + private static TableSchemaModel fromXML(byte[] content) + throws JAXBException { + return (TableSchemaModel) context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(content)); + } + + @Test + public void testTableCreateAndDeleteXML() throws IOException, JAXBException { + String schemaPath = "/" + TABLE1 + "/schema"; + TableSchemaModel model; + Response response; + + Admin admin = TEST_UTIL.getAdmin(); + assertFalse("Table " + TABLE1 + " should not exist", + admin.tableExists(TableName.valueOf(TABLE1))); + + // create the table + model = testTableSchemaModel.buildTestModel(TABLE1); + testTableSchemaModel.checkModel(model, TABLE1); + if (csrfEnabled) { + // test put operation is forbidden without custom header + response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model)); + assertEquals(400, response.getCode()); + } + + response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model), extraHdr); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), + 201, response.getCode()); + + // recall the same put operation but in read-only mode + conf.set("hbase.rest.readonly", "true"); + response = client.put(schemaPath, Constants.MIMETYPE_XML, toXML(model), extraHdr); + assertEquals(403, response.getCode()); + + // retrieve the schema and validate it + response = client.get(schemaPath, Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + model = fromXML(response.getBody()); + testTableSchemaModel.checkModel(model, TABLE1); + + // with json retrieve the schema and validate it + response = client.get(schemaPath, Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + model = testTableSchemaModel.fromJSON(Bytes.toString(response.getBody())); + testTableSchemaModel.checkModel(model, TABLE1); + + if (csrfEnabled) { + // test delete schema operation is forbidden without custom header + response = client.delete(schemaPath); + assertEquals(400, response.getCode()); + } + + // test delete schema operation is forbidden in read-only mode + response = client.delete(schemaPath, extraHdr); + assertEquals(403, response.getCode()); + + // return read-only setting back to default + conf.set("hbase.rest.readonly", "false"); + + // delete the table and make sure HBase concurs + response = client.delete(schemaPath, extraHdr); + assertEquals(200, response.getCode()); + assertFalse(admin.tableExists(TableName.valueOf(TABLE1))); + } + + @Test + public void testTableCreateAndDeletePB() throws IOException { + String schemaPath = "/" + TABLE2 + "/schema"; + TableSchemaModel model; + Response response; + + Admin admin = TEST_UTIL.getAdmin(); + assertFalse(admin.tableExists(TableName.valueOf(TABLE2))); + + // create the table + model = testTableSchemaModel.buildTestModel(TABLE2); + testTableSchemaModel.checkModel(model, TABLE2); + + if (csrfEnabled) { + // test put operation is forbidden without custom header + response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + assertEquals(400, response.getCode()); + } + response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput(), extraHdr); + assertEquals("put failed with csrf " + (csrfEnabled ? "enabled" : "disabled"), + 201, response.getCode()); + + // recall the same put operation but in read-only mode + conf.set("hbase.rest.readonly", "true"); + response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput(), extraHdr); + assertNotNull(extraHdr); + assertEquals(403, response.getCode()); + + // retrieve the schema and validate it + response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + model = new TableSchemaModel(); + model.getObjectFromMessage(response.getBody()); + testTableSchemaModel.checkModel(model, TABLE2); + + // retrieve the schema and validate it with alternate pbuf type + response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF_IETF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type")); + model = new TableSchemaModel(); + model.getObjectFromMessage(response.getBody()); + testTableSchemaModel.checkModel(model, TABLE2); + + if (csrfEnabled) { + // test delete schema operation is forbidden without custom header + response = client.delete(schemaPath); + assertEquals(400, response.getCode()); + } + + // test delete schema operation is forbidden in read-only mode + response = client.delete(schemaPath, extraHdr); + assertEquals(403, response.getCode()); + + // return read-only setting back to default + conf.set("hbase.rest.readonly", "false"); + + // delete the table and make sure HBase concurs + response = client.delete(schemaPath, extraHdr); + assertEquals(200, response.getCode()); + assertFalse(admin.tableExists(TableName.valueOf(TABLE2))); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java new file mode 100755 index 00000000..1abb64d2 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecureRESTServer.java @@ -0,0 +1,428 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; + +import java.io.File; +import java.net.HttpURLConnection; +import java.net.URL; +import java.security.Principal; +import java.security.PrivilegedExceptionAction; + +import javax.ws.rs.core.MediaType; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.StartMiniClusterOption; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.rest.http.KeyStoreTestUtil; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.security.HBaseKerberosUtils; +import org.apache.hadoop.hbase.security.access.AccessControlClient; +import org.apache.hadoop.hbase.security.access.AccessControlConstants; +import org.apache.hadoop.hbase.security.access.AccessController; +import org.apache.hadoop.hbase.security.access.Permission.Action; +import org.apache.hadoop.hbase.security.token.TokenProvider; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosName; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.auth.AuthSchemeProvider; +import org.apache.http.auth.AuthScope; +import org.apache.http.auth.Credentials; +import org.apache.http.client.AuthCache; +import org.apache.http.client.CredentialsProvider; +import org.apache.http.client.config.AuthSchemes; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.config.Registry; +import org.apache.http.config.RegistryBuilder; +import org.apache.http.conn.HttpClientConnectionManager; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.auth.SPNegoSchemeFactory; +import org.apache.http.impl.client.BasicAuthCache; +import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.util.EntityUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache + * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. + */ +@Category({MiscTests.class, MediumTests.class}) +public class TestSecureRESTServer { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSecureRESTServer.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestSecureRESTServer.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST = new HBaseRESTTestingUtility(); + private static MiniHBaseCluster CLUSTER; + + private static final String HOSTNAME = "localhost"; + private static final String CLIENT_PRINCIPAL = "client"; + // The principal for accepting SPNEGO authn'ed requests (*must* be HTTP/fqdn) + private static final String SPNEGO_SERVICE_PRINCIPAL = "HTTP/" + HOSTNAME; + // The principal we use to connect to HBase + private static final String REST_SERVER_PRINCIPAL = "rest"; + private static final String SERVICE_PRINCIPAL = "hbase/" + HOSTNAME; + + private static URL baseUrl; + private static MiniKdc KDC; + private static RESTServer server; + private static File restServerKeytab; + private static File clientKeytab; + private static File serviceKeytab; + + @BeforeClass + public static void setupServer() throws Exception { + final File target = new File(System.getProperty("user.dir"), "target"); + assertTrue(target.exists()); + + /* + * Keytabs + */ + File keytabDir = new File(target, TestSecureRESTServer.class.getSimpleName() + + "_keytabs"); + if (keytabDir.exists()) { + FileUtils.deleteDirectory(keytabDir); + } + keytabDir.mkdirs(); + // Keytab for HBase services (RS, Master) + serviceKeytab = new File(keytabDir, "hbase.service.keytab"); + // The keytab for the REST server + restServerKeytab = new File(keytabDir, "spnego.keytab"); + // Keytab for the client + clientKeytab = new File(keytabDir, CLIENT_PRINCIPAL + ".keytab"); + + /* + * Update UGI + */ + Configuration conf = TEST_UTIL.getConfiguration(); + + /* + * Start KDC + */ + KDC = TEST_UTIL.setupMiniKdc(serviceKeytab); + KDC.createPrincipal(clientKeytab, CLIENT_PRINCIPAL); + KDC.createPrincipal(serviceKeytab, SERVICE_PRINCIPAL); + // REST server's keytab contains keys for both principals REST uses + KDC.createPrincipal(restServerKeytab, SPNEGO_SERVICE_PRINCIPAL, REST_SERVER_PRINCIPAL); + + // Set configuration for HBase + HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + HBaseKerberosUtils.setKeytabFileForTesting(serviceKeytab.getAbsolutePath()); + // Why doesn't `setKeytabFileForTesting` do this? + conf.set("hbase.master.keytab.file", serviceKeytab.getAbsolutePath()); + conf.set("hbase.regionserver.hostname", "localhost"); + conf.set("hbase.master.hostname", "localhost"); + HBaseKerberosUtils.setSecuredConfiguration(conf, + SERVICE_PRINCIPAL+ "@" + KDC.getRealm(), SPNEGO_SERVICE_PRINCIPAL+ "@" + KDC.getRealm()); + setHdfsSecuredConfiguration(conf); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + TokenProvider.class.getName(), AccessController.class.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + AccessController.class.getName()); + conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, + AccessController.class.getName()); + // Enable EXEC permission checking + conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); + conf.set("hbase.superuser", "hbase"); + conf.set("hadoop.proxyuser.rest.hosts", "*"); + conf.set("hadoop.proxyuser.rest.users", "*"); + UserGroupInformation.setConfiguration(conf); + + updateKerberosConfiguration(conf, REST_SERVER_PRINCIPAL, SPNEGO_SERVICE_PRINCIPAL, + restServerKeytab); + + // Start HDFS + TEST_UTIL.startMiniCluster(StartMiniClusterOption.builder() + .numMasters(1) + .numRegionServers(1) + .numZkServers(1) + .build()); + + // Start REST + UserGroupInformation restUser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + REST_SERVER_PRINCIPAL, restServerKeytab.getAbsolutePath()); + restUser.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + REST_TEST.startServletContainer(conf); + return null; + } + }); + baseUrl = new URL("http://localhost:" + REST_TEST.getServletPort()); + + LOG.info("HTTP server started: "+ baseUrl); + TEST_UTIL.waitTableAvailable(TableName.valueOf("hbase:acl")); + + // Let the REST server create, read, and write globally + UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + superuser.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { + AccessControlClient.grant( + conn, REST_SERVER_PRINCIPAL, Action.CREATE, Action.READ, Action.WRITE); + } catch (Throwable t) { + if (t instanceof Exception) { + throw (Exception) t; + } else { + throw new Exception(t); + } + } + return null; + } + }); + } + + @AfterClass + public static void stopServer() throws Exception { + try { + if (null != server) { + server.stop(); + } + } catch (Exception e) { + LOG.info("Failed to stop info server", e); + } + try { + if (CLUSTER != null) { + CLUSTER.shutdown(); + } + } catch (Exception e) { + LOG.info("Failed to stop HBase cluster", e); + } + try { + if (null != KDC) { + KDC.stop(); + } + } catch (Exception e) { + LOG.info("Failed to stop mini KDC", e); + } + } + + private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception { + // Set principal+keytab configuration for HDFS + conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); + conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, + SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, serviceKeytab.getAbsolutePath()); + conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, + SPNEGO_SERVICE_PRINCIPAL + "@" + KDC.getRealm()); + // Enable token access for HDFS blocks + conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); + // Only use HTTPS (required because we aren't using "secure" ports) + conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); + // Bind on localhost for spnego to have a chance at working + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); + + // Generate SSL certs + File keystoresDir = new File(TEST_UTIL.getDataTestDir("keystore").toUri().getPath()); + keystoresDir.mkdirs(); + String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSecureRESTServer.class); + KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false); + + // Magic flag to tell hdfs to not fail on using ports above 1024 + conf.setBoolean("ignore.secure.ports.for.testing", true); + } + + private static void updateKerberosConfiguration(Configuration conf, + String serverPrincipal, String spnegoPrincipal, File serverKeytab) { + KerberosName.setRules("DEFAULT"); + + // Enable Kerberos (pre-req) + conf.set("hbase.security.authentication", "kerberos"); + conf.set(RESTServer.REST_AUTHENTICATION_TYPE, "kerberos"); + // User to talk to HBase as + conf.set(RESTServer.REST_KERBEROS_PRINCIPAL, serverPrincipal); + // User to accept SPNEGO-auth'd http calls as + conf.set("hbase.rest.authentication.kerberos.principal", spnegoPrincipal); + // Keytab for both principals above + conf.set(RESTServer.REST_KEYTAB_FILE, serverKeytab.getAbsolutePath()); + conf.set("hbase.rest.authentication.kerberos.keytab", serverKeytab.getAbsolutePath()); + } + + @Test + public void testPositiveAuthorization() throws Exception { + // Create a table, write a row to it, grant read perms to the client + UserGroupInformation superuser = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + SERVICE_PRINCIPAL, serviceKeytab.getAbsolutePath()); + final TableName table = TableName.valueOf("publicTable"); + superuser.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { + TableDescriptor desc = TableDescriptorBuilder.newBuilder(table) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1")) + .build(); + conn.getAdmin().createTable(desc); + try (Table t = conn.getTable(table)) { + Put p = new Put(Bytes.toBytes("a")); + p.addColumn(Bytes.toBytes("f1"), new byte[0], Bytes.toBytes("1")); + t.put(p); + } + AccessControlClient.grant(conn, CLIENT_PRINCIPAL, Action.READ); + } catch (Throwable e) { + if (e instanceof Exception) { + throw (Exception) e; + } else { + throw new Exception(e); + } + } + return null; + } + }); + + // Read that row as the client + Pair pair = getClient(); + CloseableHttpClient client = pair.getFirst(); + HttpClientContext context = pair.getSecond(); + + HttpGet get = new HttpGet(new URL("http://localhost:"+ REST_TEST.getServletPort()).toURI() + + "/" + table + "/a"); + get.addHeader("Accept", "application/json"); + UserGroupInformation user = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); + String jsonResponse = user.doAs(new PrivilegedExceptionAction() { + @Override + public String run() throws Exception { + try (CloseableHttpResponse response = client.execute(get, context)) { + final int statusCode = response.getStatusLine().getStatusCode(); + assertEquals(response.getStatusLine().toString(), HttpURLConnection.HTTP_OK, statusCode); + HttpEntity entity = response.getEntity(); + return EntityUtils.toString(entity); + } + } + }); + ObjectMapper mapper = new JacksonJaxbJsonProvider() + .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + CellSetModel model = mapper.readValue(jsonResponse, CellSetModel.class); + assertEquals(1, model.getRows().size()); + RowModel row = model.getRows().get(0); + assertEquals("a", Bytes.toString(row.getKey())); + assertEquals(1, row.getCells().size()); + CellModel cell = row.getCells().get(0); + assertEquals("1", Bytes.toString(cell.getValue())); + } + + @Test + public void testNegativeAuthorization() throws Exception { + Pair pair = getClient(); + CloseableHttpClient client = pair.getFirst(); + HttpClientContext context = pair.getSecond(); + + StringEntity entity = new StringEntity( + "{\"name\":\"test\", \"ColumnSchema\":[{\"name\":\"f\"}]}", ContentType.APPLICATION_JSON); + HttpPut put = new HttpPut("http://localhost:"+ REST_TEST.getServletPort() + "/test/schema"); + put.setEntity(entity); + + + UserGroupInformation unprivileged = UserGroupInformation.loginUserFromKeytabAndReturnUGI( + CLIENT_PRINCIPAL, clientKeytab.getAbsolutePath()); + unprivileged.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + try (CloseableHttpResponse response = client.execute(put, context)) { + final int statusCode = response.getStatusLine().getStatusCode(); + HttpEntity entity = response.getEntity(); + assertEquals("Got response: "+ EntityUtils.toString(entity), + HttpURLConnection.HTTP_FORBIDDEN, statusCode); + } + return null; + } + }); + } + + private Pair getClient() { + HttpClientConnectionManager pool = new PoolingHttpClientConnectionManager(); + HttpHost host = new HttpHost("localhost", REST_TEST.getServletPort()); + Registry authRegistry = + RegistryBuilder.create().register(AuthSchemes.SPNEGO, + new SPNegoSchemeFactory(true, true)).build(); + CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, EmptyCredentials.INSTANCE); + AuthCache authCache = new BasicAuthCache(); + + CloseableHttpClient client = HttpClients.custom() + .setDefaultAuthSchemeRegistry(authRegistry) + .setConnectionManager(pool).build(); + + HttpClientContext context = HttpClientContext.create(); + context.setTargetHost(host); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); + context.setAuthCache(authCache); + + return new Pair<>(client, context); + } + + private static class EmptyCredentials implements Credentials { + public static final EmptyCredentials INSTANCE = new EmptyCredentials(); + + @Override public String getPassword() { + return null; + } + @Override public Principal getUserPrincipal() { + return null; + } + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java new file mode 100755 index 00000000..bf0c6950 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSecurityHeadersFilter.java @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.junit.Assert.assertThat; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.junit.After; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestSecurityHeadersFilter { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSecurityHeadersFilter.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + + @After + public void tearDown() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testDefaultValues() throws Exception { + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + + String path = "/version/cluster"; + Response response = client.get(path); + assertThat(response.getCode(), equalTo(200)); + + assertThat("Header 'X-Content-Type-Options' is missing from Rest response", + response.getHeader("X-Content-Type-Options"), is(not((String)null))); + assertThat("Header 'X-Content-Type-Options' has invalid default value", + response.getHeader("X-Content-Type-Options"), equalTo("nosniff")); + + assertThat("Header 'X-XSS-Protection' is missing from Rest response", + response.getHeader("X-XSS-Protection"), is(not((String)null))); + assertThat("Header 'X-XSS-Protection' has invalid default value", + response.getHeader("X-XSS-Protection"), equalTo("1; mode=block")); + + assertThat("Header 'Strict-Transport-Security' should be missing from Rest response," + + "but it's present", + response.getHeader("Strict-Transport-Security"), is((String)null)); + assertThat("Header 'Content-Security-Policy' should be missing from Rest response," + + "but it's present", + response.getHeader("Content-Security-Policy"), is((String)null)); + } + + @Test + public void testHstsAndCspSettings() throws Exception { + TEST_UTIL.getConfiguration().set("hbase.http.filter.hsts.value", + "max-age=63072000;includeSubDomains;preload"); + TEST_UTIL.getConfiguration().set("hbase.http.filter.csp.value", + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + + String path = "/version/cluster"; + Response response = client.get(path); + assertThat(response.getCode(), equalTo(200)); + + assertThat("Header 'Strict-Transport-Security' is missing from Rest response", + response.getHeader("Strict-Transport-Security"), is(not((String)null))); + assertThat("Header 'Strict-Transport-Security' has invalid value", + response.getHeader("Strict-Transport-Security"), + equalTo("max-age=63072000;includeSubDomains;preload")); + + assertThat("Header 'Content-Security-Policy' is missing from Rest response", + response.getHeader("Content-Security-Policy"), is(not((String)null))); + assertThat("Header 'Content-Security-Policy' has invalid value", + response.getHeader("Content-Security-Policy"), + equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java new file mode 100755 index 00000000..92091208 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -0,0 +1,140 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({RestTests.class, MediumTests.class}) +public class TestStatusResource { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestStatusResource.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class); + + private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1"); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + private static Configuration conf; + + private static void validate(StorageClusterStatusModel model) { + assertNotNull(model); + assertTrue(model.getRegions() + ">= 1", model.getRegions() >= 1); + assertTrue(model.getRequests() >= 0); + assertTrue(model.getAverageLoad() >= 0.0); + assertNotNull(model.getLiveNodes()); + assertNotNull(model.getDeadNodes()); + assertFalse(model.getLiveNodes().isEmpty()); + boolean foundMeta = false; + for (StorageClusterStatusModel.Node node: model.getLiveNodes()) { + assertNotNull(node.getName()); + assertTrue(node.getStartCode() > 0L); + assertTrue(node.getRequests() >= 0); + for (StorageClusterStatusModel.Node.Region region: node.getRegions()) { + if (Bytes.equals(region.getName(), META_REGION_NAME)) { + foundMeta = true; + } + } + } + assertTrue(foundMeta); + } + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(); + TEST_UTIL.createTable(TableName.valueOf("TestStatusResource"), Bytes.toBytes("D")); + TEST_UTIL.createTable(TableName.valueOf("TestStatusResource2"), Bytes.toBytes("D")); + REST_TEST_UTIL.startServletContainer(conf); + Cluster cluster = new Cluster(); + cluster.add("localhost", REST_TEST_UTIL.getServletPort()); + client = new Client(cluster); + context = JAXBContext.newInstance(StorageClusterStatusModel.class); + TEST_UTIL.waitFor(6000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return TEST_UTIL.getMiniHBaseCluster().getClusterMetrics().getAverageLoad() > 0; + } + }); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testGetClusterStatusXML() throws IOException, JAXBException { + Response response = client.get("/status/cluster", Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + StorageClusterStatusModel model = (StorageClusterStatusModel) + context.createUnmarshaller().unmarshal( + new ByteArrayInputStream(response.getBody())); + validate(model); + } + + @Test + public void testGetClusterStatusPB() throws IOException { + Response response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + StorageClusterStatusModel model = new StorageClusterStatusModel(); + model.getObjectFromMessage(response.getBody()); + validate(model); + response = client.get("/status/cluster", Constants.MIMETYPE_PROTOBUF_IETF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type")); + model = new StorageClusterStatusModel(); + model.getObjectFromMessage(response.getBody()); + validate(model); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java new file mode 100755 index 00000000..0bece66d --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -0,0 +1,265 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.TableInfoModel; +import org.apache.hadoop.hbase.rest.model.TableListModel; +import org.apache.hadoop.hbase.rest.model.TableModel; +import org.apache.hadoop.hbase.rest.model.TableRegionModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({RestTests.class, MediumTests.class}) +public class TestTableResource { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTableResource.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestTableResource.class); + + private static final TableName TABLE = TableName.valueOf("TestTableResource"); + private static final String COLUMN_FAMILY = "test"; + private static final String COLUMN = COLUMN_FAMILY + ":qualifier"; + private static final int NUM_REGIONS = 4; + private static List regionMap; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance( + TableModel.class, + TableInfoModel.class, + TableListModel.class, + TableRegionModel.class); + TEST_UTIL.createMultiRegionTable(TABLE, Bytes.toBytes(COLUMN_FAMILY), NUM_REGIONS); + byte[] k = new byte[3]; + byte [][] famAndQf = CellUtil.parseColumn(Bytes.toBytes(COLUMN)); + List puts = new ArrayList<>(); + for (byte b1 = 'a'; b1 < 'z'; b1++) { + for (byte b2 = 'a'; b2 < 'z'; b2++) { + for (byte b3 = 'a'; b3 < 'z'; b3++) { + k[0] = b1; + k[1] = b2; + k[2] = b3; + Put put = new Put(k); + put.setDurability(Durability.SKIP_WAL); + put.addColumn(famAndQf[0], famAndQf[1], k); + puts.add(put); + } + } + } + + Connection connection = TEST_UTIL.getConnection(); + + Table table = connection.getTable(TABLE); + table.put(puts); + table.close(); + + RegionLocator regionLocator = connection.getRegionLocator(TABLE); + List m = regionLocator.getAllRegionLocations(); + + // should have four regions now + assertEquals(NUM_REGIONS, m.size()); + regionMap = m; + LOG.error("regions: " + regionMap); + regionLocator.close(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + private static void checkTableList(TableListModel model) { + boolean found = false; + Iterator tables = model.getTables().iterator(); + assertTrue(tables.hasNext()); + while (tables.hasNext()) { + TableModel table = tables.next(); + if (table.getName().equals(TABLE.getNameAsString())) { + found = true; + break; + } + } + assertTrue(found); + } + + void checkTableInfo(TableInfoModel model) { + assertEquals(model.getName(), TABLE.getNameAsString()); + Iterator regions = model.getRegions().iterator(); + assertTrue(regions.hasNext()); + while (regions.hasNext()) { + TableRegionModel region = regions.next(); + boolean found = false; + LOG.debug("looking for region " + region.getName()); + for (HRegionLocation e: regionMap) { + RegionInfo hri = e.getRegion(); + // getRegionNameAsString uses Bytes.toStringBinary which escapes some non-printable + // characters + String hriRegionName = Bytes.toString(hri.getRegionName()); + String regionName = region.getName(); + LOG.debug("comparing to region " + hriRegionName); + if (hriRegionName.equals(regionName)) { + found = true; + byte[] startKey = hri.getStartKey(); + byte[] endKey = hri.getEndKey(); + ServerName serverName = e.getServerName(); + InetSocketAddress sa = + new InetSocketAddress(serverName.getHostname(), serverName.getPort()); + String location = sa.getHostName() + ":" + + Integer.valueOf(sa.getPort()); + assertEquals(hri.getRegionId(), region.getId()); + assertTrue(Bytes.equals(startKey, region.getStartKey())); + assertTrue(Bytes.equals(endKey, region.getEndKey())); + assertEquals(location, region.getLocation()); + break; + } + } + assertTrue("Couldn't find region " + region.getName(), found); + } + } + + @Test + public void testTableListText() throws IOException { + Response response = client.get("/", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type")); + } + + @Test + public void testTableListXML() throws IOException, JAXBException { + Response response = client.get("/", Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + TableListModel model = (TableListModel) + context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); + checkTableList(model); + } + + @Test + public void testTableListJSON() throws IOException { + Response response = client.get("/", Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + } + + @Test + public void testTableListPB() throws IOException, JAXBException { + Response response = client.get("/", Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + TableListModel model = new TableListModel(); + model.getObjectFromMessage(response.getBody()); + checkTableList(model); + response = client.get("/", Constants.MIMETYPE_PROTOBUF_IETF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type")); + model = new TableListModel(); + model.getObjectFromMessage(response.getBody()); + checkTableList(model); + } + + @Test + public void testTableInfoText() throws IOException { + Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type")); + } + + @Test + public void testTableInfoXML() throws IOException, JAXBException { + Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + TableInfoModel model = (TableInfoModel) + context.createUnmarshaller() + .unmarshal(new ByteArrayInputStream(response.getBody())); + checkTableInfo(model); + } + + @Test + public void testTableInfoJSON() throws IOException { + Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + } + + @Test + public void testTableInfoPB() throws IOException, JAXBException { + Response response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + TableInfoModel model = new TableInfoModel(); + model.getObjectFromMessage(response.getBody()); + checkTableInfo(model); + response = client.get("/" + TABLE + "/regions", Constants.MIMETYPE_PROTOBUF_IETF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type")); + model = new TableInfoModel(); + model.getObjectFromMessage(response.getBody()); + checkTableInfo(model); + } + +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java new file mode 100755 index 00000000..1d4ed42e --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableScan.java @@ -0,0 +1,707 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; +import java.io.DataInputStream; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.Serializable; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import javax.ws.rs.core.MediaType; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Unmarshaller; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.parsers.SAXParserFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.ParseFilter; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.xml.sax.InputSource; +import org.xml.sax.XMLReader; + +@Category({RestTests.class, MediumTests.class}) +public class TestTableScan { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTableScan.class); + + private static final TableName TABLE = TableName.valueOf("TestScanResource"); + private static final String CFA = "a"; + private static final String CFB = "b"; + private static final String COLUMN_1 = CFA + ":1"; + private static final String COLUMN_2 = CFB + ":2"; + private static final String COLUMN_EMPTY = CFA + ":"; + private static Client client; + private static int expectedRows1; + private static int expectedRows2; + private static int expectedRows3; + private static Configuration conf; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = TEST_UTIL.getConfiguration(); + conf.set(Constants.CUSTOM_FILTERS, "CustomFilter:" + CustomFilter.class.getName()); + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(conf); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + Admin admin = TEST_UTIL.getAdmin(); + if (!admin.tableExists(TABLE)) { + TableDescriptorBuilder tableDescriptorBuilder = + TableDescriptorBuilder.newBuilder(TABLE); + ColumnFamilyDescriptor columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFA)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + columnFamilyDescriptor = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(CFB)).build(); + tableDescriptorBuilder.setColumnFamily(columnFamilyDescriptor); + admin.createTable(tableDescriptorBuilder.build()); + expectedRows1 = TestScannerResource.insertData(conf, TABLE, COLUMN_1, 1.0); + expectedRows2 = TestScannerResource.insertData(conf, TABLE, COLUMN_2, 0.5); + expectedRows3 = TestScannerResource.insertData(conf, TABLE, COLUMN_EMPTY, 1.0); + } + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.getAdmin().disableTable(TABLE); + TEST_UTIL.getAdmin().deleteTable(TABLE); + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testSimpleScannerXML() throws IOException, JAXBException { + // Test scanning particular columns + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_LIMIT + "=10"); + Response response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); + Unmarshaller ush = ctx.createUnmarshaller(); + CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream()); + int count = TestScannerResource.countCellSet(model); + assertEquals(10, count); + checkRowsNotNull(model); + + //Test with no limit. + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + model = (CellSetModel) ush.unmarshal(response.getStream()); + count = TestScannerResource.countCellSet(model); + assertEquals(expectedRows1, count); + checkRowsNotNull(model); + + //Test with start and end row. + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_END_ROW + "=aay"); + response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + model = (CellSetModel) ush.unmarshal(response.getStream()); + count = TestScannerResource.countCellSet(model); + RowModel startRow = model.getRows().get(0); + assertEquals("aaa", Bytes.toString(startRow.getKey())); + RowModel endRow = model.getRows().get(model.getRows().size() - 1); + assertEquals("aax", Bytes.toString(endRow.getKey())); + assertEquals(24, count); + checkRowsNotNull(model); + + //Test with start row and limit. + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_LIMIT + "=15"); + response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + model = (CellSetModel) ush.unmarshal(response.getStream()); + startRow = model.getRows().get(0); + assertEquals("aaa", Bytes.toString(startRow.getKey())); + count = TestScannerResource.countCellSet(model); + assertEquals(15, count); + checkRowsNotNull(model); + } + + @Test + public void testSimpleScannerJson() throws IOException { + // Test scanning particular columns with limit. + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_LIMIT + "=2"); + Response response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + ObjectMapper mapper = new JacksonJaxbJsonProvider() + .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); + int count = TestScannerResource.countCellSet(model); + assertEquals(2, count); + checkRowsNotNull(model); + + //Test scanning with no limit. + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_2); + response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + model = mapper.readValue(response.getStream(), CellSetModel.class); + count = TestScannerResource.countCellSet(model); + assertEquals(expectedRows2, count); + checkRowsNotNull(model); + + //Test with start row and end row. + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_END_ROW + "=aay"); + response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + model = mapper.readValue(response.getStream(), CellSetModel.class); + RowModel startRow = model.getRows().get(0); + assertEquals("aaa", Bytes.toString(startRow.getKey())); + RowModel endRow = model.getRows().get(model.getRows().size() - 1); + assertEquals("aax", Bytes.toString(endRow.getKey())); + count = TestScannerResource.countCellSet(model); + assertEquals(24, count); + checkRowsNotNull(model); + } + + /** + * An example to scan using listener in unmarshaller for XML. + * @throws Exception the exception + */ + @Test + public void testScanUsingListenerUnmarshallerXML() throws Exception { + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_LIMIT + "=10"); + Response response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + JAXBContext context = JAXBContext.newInstance(ClientSideCellSetModel.class, RowModel.class, + CellModel.class); + Unmarshaller unmarshaller = context.createUnmarshaller(); + + final ClientSideCellSetModel.Listener listener = new ClientSideCellSetModel.Listener() { + @Override + public void handleRowModel(ClientSideCellSetModel helper, RowModel row) { + assertTrue(row.getKey() != null); + assertTrue(row.getCells().size() > 0); + } + }; + + // install the callback on all ClientSideCellSetModel instances + unmarshaller.setListener(new Unmarshaller.Listener() { + @Override + public void beforeUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(listener); + } + } + + @Override + public void afterUnmarshal(Object target, Object parent) { + if (target instanceof ClientSideCellSetModel) { + ((ClientSideCellSetModel) target).setCellSetModelListener(null); + } + } + }); + + // create a new XML parser + SAXParserFactory factory = SAXParserFactory.newInstance(); + factory.setNamespaceAware(true); + XMLReader reader = factory.newSAXParser().getXMLReader(); + reader.setContentHandler(unmarshaller.getUnmarshallerHandler()); + assertFalse(ClientSideCellSetModel.listenerInvoked); + reader.parse(new InputSource(response.getStream())); + assertTrue(ClientSideCellSetModel.listenerInvoked); + + } + + @Test + public void testStreamingJSON() throws Exception { + //Test with start row and end row. + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_END_ROW + "=aay"); + Response response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + + int count = 0; + ObjectMapper mapper = new JacksonJaxbJsonProvider() + .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + JsonFactory jfactory = new JsonFactory(mapper); + JsonParser jParser = jfactory.createJsonParser(response.getStream()); + boolean found = false; + while (jParser.nextToken() != JsonToken.END_OBJECT) { + if(jParser.getCurrentToken() == JsonToken.START_OBJECT && found) { + RowModel row = jParser.readValueAs(RowModel.class); + assertNotNull(row.getKey()); + for (int i = 0; i < row.getCells().size(); i++) { + if (count == 0) { + assertEquals("aaa", Bytes.toString(row.getKey())); + } + if (count == 23) { + assertEquals("aax", Bytes.toString(row.getKey())); + } + count++; + } + jParser.skipChildren(); + } else { + found = jParser.getCurrentToken() == JsonToken.START_ARRAY; + } + } + assertEquals(24, count); + } + + @Test + public void testSimpleScannerProtobuf() throws Exception { + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_LIMIT + "=15"); + Response response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + int rowCount = readProtobufStream(response.getStream()); + assertEquals(15, rowCount); + + //Test with start row and end row. + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_END_ROW + "=aay"); + response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + rowCount = readProtobufStream(response.getStream()); + assertEquals(24, rowCount); + } + + private void checkRowsNotNull(CellSetModel model) { + for (RowModel row: model.getRows()) { + assertTrue(row.getKey() != null); + assertTrue(row.getCells().size() > 0); + } + } + + /** + * Read protobuf stream. + * @param inputStream the input stream + * @return The number of rows in the cell set model. + * @throws IOException Signals that an I/O exception has occurred. + */ + public int readProtobufStream(InputStream inputStream) throws IOException{ + DataInputStream stream = new DataInputStream(inputStream); + CellSetModel model = null; + int rowCount = 0; + try { + while (true) { + byte[] lengthBytes = new byte[2]; + int readBytes = stream.read(lengthBytes); + if (readBytes == -1) { + break; + } + assertEquals(2, readBytes); + int length = Bytes.toShort(lengthBytes); + byte[] cellset = new byte[length]; + stream.read(cellset); + model = new CellSetModel(); + model.getObjectFromMessage(cellset); + checkRowsNotNull(model); + rowCount = rowCount + TestScannerResource.countCellSet(model); + } + } catch (EOFException exp) { + exp.printStackTrace(); + } finally { + stream.close(); + } + return rowCount; + } + + @Test + public void testScanningUnknownColumnJson() throws IOException { + // Test scanning particular columns with limit. + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=a:test"); + Response response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + ObjectMapper mapper = new JacksonJaxbJsonProvider().locateMapper(CellSetModel.class, + MediaType.APPLICATION_JSON_TYPE); + CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); + int count = TestScannerResource.countCellSet(model); + assertEquals(0, count); + } + + @Test + public void testSimpleFilter() throws IOException, JAXBException { + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_END_ROW + "=aay"); + builder.append("&"); + builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("PrefixFilter('aab')", "UTF-8")); + Response response = + client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); + Unmarshaller ush = ctx.createUnmarshaller(); + CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream()); + int count = TestScannerResource.countCellSet(model); + assertEquals(1, count); + assertEquals("aab", + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + } + + @Test + public void testQualifierAndPrefixFilters() throws IOException, JAXBException { + StringBuilder builder = new StringBuilder(); + builder.append("/abc*"); + builder.append("?"); + builder.append(Constants.SCAN_FILTER + "=" + + URLEncoder.encode("QualifierFilter(=,'binary:1')", "UTF-8")); + Response response = + client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); + Unmarshaller ush = ctx.createUnmarshaller(); + CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream()); + int count = TestScannerResource.countCellSet(model); + assertEquals(1, count); + assertEquals("abc", + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + } + + @Test + public void testCompoundFilter() throws IOException, JAXBException { + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_FILTER + "=" + + URLEncoder.encode("PrefixFilter('abc') AND QualifierFilter(=,'binary:1')", "UTF-8")); + Response response = + client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); + Unmarshaller ush = ctx.createUnmarshaller(); + CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream()); + int count = TestScannerResource.countCellSet(model); + assertEquals(1, count); + assertEquals("abc", + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + } + + @Test + public void testCustomFilter() throws IOException, JAXBException { + StringBuilder builder = new StringBuilder(); + builder.append("/a*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); + Response response = + client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); + Unmarshaller ush = ctx.createUnmarshaller(); + CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream()); + int count = TestScannerResource.countCellSet(model); + assertEquals(1, count); + assertEquals("abc", + new String(model.getRows().get(0).getCells().get(0).getValue(), StandardCharsets.UTF_8)); + } + + @Test + public void testNegativeCustomFilter() throws IOException, JAXBException { + StringBuilder builder = new StringBuilder(); + builder.append("/b*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_FILTER + "=" + URLEncoder.encode("CustomFilter('abc')", "UTF-8")); + Response response = + client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); + Unmarshaller ush = ctx.createUnmarshaller(); + CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream()); + int count = TestScannerResource.countCellSet(model); + // Should return no rows as the filters conflict + assertEquals(0, count); + } + + @Test + public void testReversed() throws IOException, JAXBException { + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_END_ROW + "=aay"); + Response response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + JAXBContext ctx = JAXBContext.newInstance(CellSetModel.class); + Unmarshaller ush = ctx.createUnmarshaller(); + CellSetModel model = (CellSetModel) ush.unmarshal(response.getStream()); + int count = TestScannerResource.countCellSet(model); + assertEquals(24, count); + List rowModels = model.getRows().subList(1, count); + + //reversed + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_START_ROW + "=aay"); + builder.append("&"); + builder.append(Constants.SCAN_END_ROW + "=aaa"); + builder.append("&"); + builder.append(Constants.SCAN_REVERSED + "=true"); + response = client.get("/" + TABLE + builder.toString(), Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + model = (CellSetModel) ush.unmarshal(response.getStream()); + count = TestScannerResource.countCellSet(model); + assertEquals(24, count); + List reversedRowModels = model.getRows().subList(1, count); + + Collections.reverse(reversedRowModels); + assertEquals(rowModels.size(), reversedRowModels.size()); + for (int i = 0; i < rowModels.size(); i++) { + RowModel rowModel = rowModels.get(i); + RowModel reversedRowModel = reversedRowModels.get(i); + + assertEquals(new String(rowModel.getKey(), StandardCharsets.UTF_8), + new String(reversedRowModel.getKey(), StandardCharsets.UTF_8)); + assertEquals(new String(rowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8), + new String(reversedRowModel.getCells().get(0).getValue(), StandardCharsets.UTF_8)); + } + } + + @Test + public void testColumnWithEmptyQualifier() throws IOException { + // Test scanning with empty qualifier + StringBuilder builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); + Response response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + ObjectMapper mapper = new JacksonJaxbJsonProvider() + .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + CellSetModel model = mapper.readValue(response.getStream(), CellSetModel.class); + int count = TestScannerResource.countCellSet(model); + assertEquals(expectedRows3, count); + checkRowsNotNull(model); + RowModel startRow = model.getRows().get(0); + assertEquals("aaa", Bytes.toString(startRow.getKey())); + assertEquals(1, startRow.getCells().size()); + + // Test scanning with empty qualifier and normal qualifier + builder = new StringBuilder(); + builder.append("/*"); + builder.append("?"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_1); + builder.append("&"); + builder.append(Constants.SCAN_COLUMN + "=" + COLUMN_EMPTY); + response = client.get("/" + TABLE + builder.toString(), + Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + mapper = new JacksonJaxbJsonProvider() + .locateMapper(CellSetModel.class, MediaType.APPLICATION_JSON_TYPE); + model = mapper.readValue(response.getStream(), CellSetModel.class); + count = TestScannerResource.countCellSet(model); + assertEquals(expectedRows1 + expectedRows3, count); + checkRowsNotNull(model); + } + + public static class CustomFilter extends PrefixFilter { + private byte[] key = null; + + public CustomFilter(byte[] key) { + super(key); + } + + @Override + public boolean filterRowKey(Cell cell) { + int cmp = Bytes.compareTo(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), + this.key, 0, this.key.length); + return cmp != 0; + } + + public static Filter createFilterFromArguments(ArrayList filterArguments) { + byte[] prefix = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); + return new CustomFilter(prefix); + } + } + + /** + * The Class ClientSideCellSetModel which mimics cell set model, and contains listener to perform + * user defined operations on the row model. + */ + @XmlRootElement(name = "CellSet") + @XmlAccessorType(XmlAccessType.FIELD) + public static class ClientSideCellSetModel implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * This list is not a real list; instead it will notify a listener whenever JAXB has + * unmarshalled the next row. + */ + @XmlElement(name="Row") + private List row; + + static boolean listenerInvoked = false; + + /** + * Install a listener for row model on this object. If l is null, the listener + * is removed again. + */ + public void setCellSetModelListener(final Listener l) { + row = (l == null) ? null : new ArrayList() { + private static final long serialVersionUID = 1L; + + @Override + public boolean add(RowModel o) { + l.handleRowModel(ClientSideCellSetModel.this, o); + listenerInvoked = true; + return false; + } + }; + } + + /** + * This listener is invoked every time a new row model is unmarshalled. + */ + public interface Listener { + void handleRowModel(ClientSideCellSetModel helper, RowModel rowModel); + } + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java new file mode 100755 index 00000000..bbd5e082 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestVersionResource.java @@ -0,0 +1,197 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import javax.ws.rs.core.MediaType; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.rest.client.Client; +import org.apache.hadoop.hbase.rest.client.Cluster; +import org.apache.hadoop.hbase.rest.client.Response; +import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; +import org.apache.hadoop.hbase.rest.model.VersionModel; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({RestTests.class, MediumTests.class}) +public class TestVersionResource { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestVersionResource.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestVersionResource.class); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private static Client client; + private static JAXBContext context; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + client = new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())); + context = JAXBContext.newInstance( + VersionModel.class, + StorageClusterVersionModel.class); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + private static void validate(VersionModel model) { + assertNotNull(model); + assertNotNull(model.getRESTVersion()); + assertEquals(RESTServlet.VERSION_STRING, model.getRESTVersion()); + String osVersion = model.getOSVersion(); + assertNotNull(osVersion); + assertTrue(osVersion.contains(System.getProperty("os.name"))); + assertTrue(osVersion.contains(System.getProperty("os.version"))); + assertTrue(osVersion.contains(System.getProperty("os.arch"))); + String jvmVersion = model.getJVMVersion(); + assertNotNull(jvmVersion); + assertTrue(jvmVersion.contains(System.getProperty("java.vm.vendor"))); + assertTrue(jvmVersion.contains(System.getProperty("java.version"))); + assertTrue(jvmVersion.contains(System.getProperty("java.vm.version"))); + assertNotNull(model.getServerVersion()); + String jerseyVersion = model.getJerseyVersion(); + assertNotNull(jerseyVersion); + // TODO: fix when we actually get a jersey version + // assertEquals(jerseyVersion, ServletContainer.class.getPackage().getImplementationVersion()); + } + + @Test + public void testGetStargateVersionText() throws IOException { + Response response = client.get("/version", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type")); + String body = Bytes.toString(response.getBody()); + assertTrue(body.length() > 0); + assertTrue(body.contains(RESTServlet.VERSION_STRING)); + assertTrue(body.contains(System.getProperty("java.vm.vendor"))); + assertTrue(body.contains(System.getProperty("java.version"))); + assertTrue(body.contains(System.getProperty("java.vm.version"))); + assertTrue(body.contains(System.getProperty("os.name"))); + assertTrue(body.contains(System.getProperty("os.version"))); + assertTrue(body.contains(System.getProperty("os.arch"))); + // TODO: fix when we actually get a jersey version + // assertTrue(body.contains(ServletContainer.class.getPackage().getImplementationVersion())); + } + + @Test + public void testGetStargateVersionXML() throws IOException, JAXBException { + Response response = client.get("/version", Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + VersionModel model = (VersionModel) + context.createUnmarshaller().unmarshal( + new ByteArrayInputStream(response.getBody())); + validate(model); + LOG.info("success retrieving Stargate version as XML"); + } + + @Test + public void testGetStargateVersionJSON() throws IOException { + Response response = client.get("/version", Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + ObjectMapper mapper = new JacksonJaxbJsonProvider() + .locateMapper(VersionModel.class, MediaType.APPLICATION_JSON_TYPE); + VersionModel model + = mapper.readValue(response.getBody(), VersionModel.class); + validate(model); + LOG.info("success retrieving Stargate version as JSON"); + } + + @Test + public void testGetStargateVersionPB() throws IOException { + Response response = client.get("/version", Constants.MIMETYPE_PROTOBUF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF, response.getHeader("content-type")); + VersionModel model = new VersionModel(); + model.getObjectFromMessage(response.getBody()); + validate(model); + response = client.get("/version", Constants.MIMETYPE_PROTOBUF_IETF); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_PROTOBUF_IETF, response.getHeader("content-type")); + model = new VersionModel(); + model.getObjectFromMessage(response.getBody()); + validate(model); + } + + @Test + public void testGetStorageClusterVersionText() throws IOException { + Response response = client.get("/version/cluster", Constants.MIMETYPE_TEXT); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_TEXT, response.getHeader("content-type")); + } + + @Test + public void testGetStorageClusterVersionXML() throws IOException, + JAXBException { + Response response = client.get("/version/cluster",Constants.MIMETYPE_XML); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_XML, response.getHeader("content-type")); + StorageClusterVersionModel clusterVersionModel = + (StorageClusterVersionModel) + context.createUnmarshaller().unmarshal( + new ByteArrayInputStream(response.getBody())); + assertNotNull(clusterVersionModel); + assertNotNull(clusterVersionModel.getVersion()); + LOG.info("success retrieving storage cluster version as XML"); + } + + @Test + public void testGetStorageClusterVersionJSON() throws IOException { + Response response = client.get("/version/cluster", Constants.MIMETYPE_JSON); + assertEquals(200, response.getCode()); + assertEquals(Constants.MIMETYPE_JSON, response.getHeader("content-type")); + ObjectMapper mapper = new JacksonJaxbJsonProvider() + .locateMapper(StorageClusterVersionModel.class, MediaType.APPLICATION_JSON_TYPE); + StorageClusterVersionModel clusterVersionModel + = mapper.readValue(response.getBody(), StorageClusterVersionModel.class); + assertNotNull(clusterVersionModel); + assertNotNull(clusterVersionModel.getVersion()); + LOG.info("success retrieving storage cluster version as JSON"); + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java new file mode 100755 index 00000000..a47a7a7f --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java @@ -0,0 +1,419 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.rest.Constants; +import org.apache.hadoop.hbase.rest.model.StorageClusterStatusModel; +import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; +import org.apache.hadoop.hbase.rest.model.TableListModel; +import org.apache.hadoop.hbase.rest.model.TableSchemaModel; +import org.apache.hadoop.hbase.rest.model.VersionModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Unmarshaller; +import javax.xml.stream.XMLInputFactory; +import javax.xml.stream.XMLStreamException; +import javax.xml.stream.XMLStreamReader; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InterruptedIOException; + +@InterfaceAudience.Private +public class RemoteAdmin { + + final Client client; + final Configuration conf; + final String accessToken; + final int maxRetries; + final long sleepTime; + + // This unmarshaller is necessary for getting the /version/cluster resource. + // This resource does not support protobufs. Therefore this is necessary to + // request/interpret it as XML. + private static volatile Unmarshaller versionClusterUnmarshaller; + + /** + * Constructor + * + * @param client + * @param conf + */ + public RemoteAdmin(Client client, Configuration conf) { + this(client, conf, null); + } + + static Unmarshaller getUnmarsheller() throws JAXBException { + + if (versionClusterUnmarshaller == null) { + + RemoteAdmin.versionClusterUnmarshaller = JAXBContext.newInstance( + StorageClusterVersionModel.class).createUnmarshaller(); + } + return RemoteAdmin.versionClusterUnmarshaller; + } + + /** + * Constructor + * @param client + * @param conf + * @param accessToken + */ + public RemoteAdmin(Client client, Configuration conf, String accessToken) { + this.client = client; + this.conf = conf; + this.accessToken = accessToken; + this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10); + this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000); + } + + /** + * @param tableName name of table to check + * @return true if all regions of the table are available + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableAvailable(String tableName) throws IOException { + return isTableAvailable(Bytes.toBytes(tableName)); + } + + /** + * @return string representing the rest api's version + * @throws IOException + * if the endpoint does not exist, there is a timeout, or some other + * general failure mode + */ + public VersionModel getRestVersion() throws IOException { + + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); + path.append('/'); + } + + path.append("version/rest"); + + int code = 0; + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(path.toString(), + Constants.MIMETYPE_PROTOBUF); + code = response.getCode(); + switch (code) { + case 200: + + VersionModel v = new VersionModel(); + return (VersionModel) v.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("REST version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + + " returned " + code); + } + } + throw new IOException("get request to " + path.toString() + " timed out"); + } + + /** + * @return string representing the cluster's version + * @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode + */ + public StorageClusterStatusModel getClusterStatus() throws IOException { + + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken !=null) { + path.append(accessToken); + path.append('/'); + } + + path.append("status/cluster"); + + int code = 0; + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(path.toString(), + Constants.MIMETYPE_PROTOBUF); + code = response.getCode(); + switch (code) { + case 200: + StorageClusterStatusModel s = new StorageClusterStatusModel(); + return (StorageClusterStatusModel) s.getObjectFromMessage(response + .getBody()); + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path + " returned " + code); + } + } + throw new IOException("get request to " + path + " timed out"); + } + + /** + * @return string representing the cluster's version + * @throws IOException + * if the endpoint does not exist, there is a timeout, or some other + * general failure mode + */ + public StorageClusterVersionModel getClusterVersion() throws IOException { + + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); + path.append('/'); + } + + path.append("version/cluster"); + + int code = 0; + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(path.toString(), Constants.MIMETYPE_XML); + code = response.getCode(); + switch (code) { + case 200: + try { + + return (StorageClusterVersionModel) getUnmarsheller().unmarshal( + getInputStream(response)); + } catch (JAXBException jaxbe) { + + throw new IOException( + "Issue parsing StorageClusterVersionModel object in XML form: " + + jaxbe.getLocalizedMessage(), jaxbe); + } + case 404: + throw new IOException("Cluster version not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException(path.toString() + " request returned " + code); + } + } + throw new IOException("get request to " + path.toString() + + " request timed out"); + } + + /** + * @param tableName name of table to check + * @return true if all regions of the table are available + * @throws IOException if a remote or network exception occurs + */ + public boolean isTableAvailable(byte[] tableName) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); + path.append('/'); + } + path.append(Bytes.toStringBinary(tableName)); + path.append('/'); + path.append("exists"); + int code = 0; + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(path.toString(), Constants.MIMETYPE_PROTOBUF); + code = response.getCode(); + switch (code) { + case 200: + return true; + case 404: + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + " returned " + code); + } + } + throw new IOException("get request to " + path.toString() + " timed out"); + } + + /** + * Creates a new table. + * @param desc table descriptor for table + * @throws IOException if a remote or network exception occurs + */ + public void createTable(TableDescriptor desc) + throws IOException { + TableSchemaModel model = new TableSchemaModel(desc); + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); + path.append('/'); + } + path.append(desc.getTableName()); + path.append('/'); + path.append("schema"); + int code = 0; + for (int i = 0; i < maxRetries; i++) { + Response response = client.put(path.toString(), Constants.MIMETYPE_PROTOBUF, + model.createProtobufOutput()); + code = response.getCode(); + switch (code) { + case 201: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("create request to " + path.toString() + " returned " + code); + } + } + throw new IOException("create request to " + path.toString() + " timed out"); + } + + /** + * Deletes a table. + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + */ + public void deleteTable(final String tableName) throws IOException { + deleteTable(Bytes.toBytes(tableName)); + } + + /** + * Deletes a table. + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + */ + public void deleteTable(final byte [] tableName) throws IOException { + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); + path.append('/'); + } + path.append(Bytes.toStringBinary(tableName)); + path.append('/'); + path.append("schema"); + int code = 0; + for (int i = 0; i < maxRetries; i++) { + Response response = client.delete(path.toString()); + code = response.getCode(); + switch (code) { + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("delete request to " + path.toString() + " returned " + code); + } + } + throw new IOException("delete request to " + path.toString() + " timed out"); + } + + /** + * @return string representing the cluster's version + * @throws IOException + * if the endpoint does not exist, there is a timeout, or some other + * general failure mode + */ + public TableListModel getTableList() throws IOException { + + StringBuilder path = new StringBuilder(); + path.append('/'); + if (accessToken != null) { + path.append(accessToken); + path.append('/'); + } + + int code = 0; + for (int i = 0; i < maxRetries; i++) { + // Response response = client.get(path.toString(), + // Constants.MIMETYPE_XML); + Response response = client.get(path.toString(), + Constants.MIMETYPE_PROTOBUF); + code = response.getCode(); + switch (code) { + case 200: + TableListModel t = new TableListModel(); + return (TableListModel) t.getObjectFromMessage(response.getBody()); + case 404: + throw new IOException("Table list not found"); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request to " + path.toString() + + " request returned " + code); + } + } + throw new IOException("get request to " + path.toString() + + " request timed out"); + } + + /** + * Convert the REST server's response to an XML reader. + * + * @param response The REST server's response. + * @return A reader over the parsed XML document. + * @throws IOException If the document fails to parse + */ + private XMLStreamReader getInputStream(Response response) throws IOException { + try { + // Prevent the parser from reading XMl with external entities defined + XMLInputFactory xif = XMLInputFactory.newFactory(); + xif.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); + xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); + return xif.createXMLStreamReader(new ByteArrayInputStream(response.getBody())); + } catch (XMLStreamException e) { + throw new IOException("Failed to parse XML", e); + } + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java new file mode 100755 index 00000000..4b6d23b1 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -0,0 +1,877 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.client; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.rest.Constants; +import org.apache.hadoop.hbase.rest.model.CellModel; +import org.apache.hadoop.hbase.rest.model.CellSetModel; +import org.apache.hadoop.hbase.rest.model.RowModel; +import org.apache.hadoop.hbase.rest.model.ScannerModel; +import org.apache.hadoop.hbase.rest.model.TableSchemaModel; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.Service; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + +/** + * HTable interface to remote tables accessed via REST gateway + */ +@InterfaceAudience.Private +public class RemoteHTable implements Table { + + private static final Logger LOG = LoggerFactory.getLogger(RemoteHTable.class); + + final Client client; + final Configuration conf; + final byte[] name; + final int maxRetries; + final long sleepTime; + + @SuppressWarnings("rawtypes") + protected String buildRowSpec(final byte[] row, final Map familyMap, final long startTime, + final long endTime, final int maxVersions) { + StringBuffer sb = new StringBuffer(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append('/'); + sb.append(toURLEncodedBytes(row)); + Set families = familyMap.entrySet(); + if (families != null) { + Iterator i = familyMap.entrySet().iterator(); + sb.append('/'); + while (i.hasNext()) { + Map.Entry e = (Map.Entry) i.next(); + Collection quals = (Collection) e.getValue(); + if (quals == null || quals.isEmpty()) { + // this is an unqualified family. append the family name and NO ':' + sb.append(toURLEncodedBytes((byte[]) e.getKey())); + } else { + Iterator ii = quals.iterator(); + while (ii.hasNext()) { + sb.append(toURLEncodedBytes((byte[]) e.getKey())); + Object o = ii.next(); + // Puts use byte[] but Deletes use KeyValue + if (o instanceof byte[]) { + sb.append(':'); + sb.append(toURLEncodedBytes((byte[]) o)); + } else if (o instanceof KeyValue) { + if (((KeyValue) o).getQualifierLength() != 0) { + sb.append(':'); + sb.append(toURLEncodedBytes(CellUtil.cloneQualifier((KeyValue) o))); + } + } else { + throw new RuntimeException("object type not handled"); + } + if (ii.hasNext()) { + sb.append(','); + } + } + } + if (i.hasNext()) { + sb.append(','); + } + } + } + if (startTime >= 0 && endTime != Long.MAX_VALUE) { + sb.append('/'); + sb.append(startTime); + if (startTime != endTime) { + sb.append(','); + sb.append(endTime); + } + } else if (endTime != Long.MAX_VALUE) { + sb.append('/'); + sb.append(endTime); + } + if (maxVersions > 1) { + sb.append("?v="); + sb.append(maxVersions); + } + return sb.toString(); + } + + protected String buildMultiRowSpec(final byte[][] rows, int maxVersions) { + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append("/multiget/"); + if (rows == null || rows.length == 0) { + return sb.toString(); + } + sb.append("?"); + for (int i = 0; i < rows.length; i++) { + byte[] rk = rows[i]; + if (i != 0) { + sb.append('&'); + } + sb.append("row="); + sb.append(toURLEncodedBytes(rk)); + } + sb.append("&v="); + sb.append(maxVersions); + + return sb.toString(); + } + + protected Result[] buildResultFromModel(final CellSetModel model) { + List results = new ArrayList<>(); + for (RowModel row : model.getRows()) { + List kvs = new ArrayList<>(row.getCells().size()); + for (CellModel cell : row.getCells()) { + byte[][] split = CellUtil.parseColumn(cell.getColumn()); + byte[] column = split[0]; + byte[] qualifier = null; + if (split.length == 1) { + qualifier = HConstants.EMPTY_BYTE_ARRAY; + } else if (split.length == 2) { + qualifier = split[1]; + } else { + throw new IllegalArgumentException("Invalid familyAndQualifier provided."); + } + kvs + .add(new KeyValue(row.getKey(), column, qualifier, cell.getTimestamp(), cell.getValue())); + } + results.add(Result.create(kvs)); + } + return results.toArray(new Result[results.size()]); + } + + protected CellSetModel buildModelFromPut(Put put) { + RowModel row = new RowModel(put.getRow()); + long ts = put.getTimestamp(); + for (List cells : put.getFamilyCellMap().values()) { + for (Cell cell : cells) { + row.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), + ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), CellUtil.cloneValue(cell))); + } + } + CellSetModel model = new CellSetModel(); + model.addRow(row); + return model; + } + + /** + * Constructor + */ + public RemoteHTable(Client client, String name) { + this(client, HBaseConfiguration.create(), Bytes.toBytes(name)); + } + + /** + * Constructor + */ + public RemoteHTable(Client client, Configuration conf, String name) { + this(client, conf, Bytes.toBytes(name)); + } + + /** + * Constructor + */ + public RemoteHTable(Client client, Configuration conf, byte[] name) { + this.client = client; + this.conf = conf; + this.name = name; + this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10); + this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000); + } + + public byte[] getTableName() { + return name.clone(); + } + + @Override + public TableName getName() { + return TableName.valueOf(name); + } + + @Override + public Configuration getConfiguration() { + return conf; + } + + @Override + public void close() throws IOException { + client.shutdown(); + } + + @Override + public Result get(Get get) throws IOException { + TimeRange range = get.getTimeRange(); + String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), range.getMin(), range.getMax(), + get.getMaxVersions()); + if (get.getFilter() != null) { + LOG.warn("filters not supported on gets"); + } + Result[] results = getResults(spec); + if (results.length > 0) { + if (results.length > 1) { + LOG.warn("too many results for get (" + results.length + ")"); + } + return results[0]; + } else { + return new Result(); + } + } + + @Override + public Result[] get(List gets) throws IOException { + byte[][] rows = new byte[gets.size()][]; + int maxVersions = 1; + int count = 0; + + for (Get g : gets) { + + if (count == 0) { + maxVersions = g.getMaxVersions(); + } else if (g.getMaxVersions() != maxVersions) { + LOG.warn( + "MaxVersions on Gets do not match, using the first in the list (" + maxVersions + ")"); + } + + if (g.getFilter() != null) { + LOG.warn("filters not supported on gets"); + } + + rows[count] = g.getRow(); + count++; + } + + String spec = buildMultiRowSpec(rows, maxVersions); + + return getResults(spec); + } + + private Result[] getResults(String spec) throws IOException { + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(spec, Constants.MIMETYPE_PROTOBUF); + int code = response.getCode(); + switch (code) { + case 200: + CellSetModel model = new CellSetModel(); + model.getObjectFromMessage(response.getBody()); + Result[] results = buildResultFromModel(model); + if (results.length > 0) { + return results; + } + // fall through + case 404: + return new Result[0]; + + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("get request returned " + code); + } + } + throw new IOException("get request timed out"); + } + + @Override + public boolean exists(Get get) throws IOException { + LOG.warn("exists() is really get(), just use get()"); + Result result = get(get); + return (result != null && !(result.isEmpty())); + } + + @Override + public boolean[] exists(List gets) throws IOException { + LOG.warn("exists(List) is really list of get() calls, just use get()"); + boolean[] results = new boolean[gets.size()]; + for (int i = 0; i < results.length; i++) { + results[i] = exists(gets.get(i)); + } + return results; + } + + @Override + public void put(Put put) throws IOException { + CellSetModel model = buildModelFromPut(put); + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append('/'); + sb.append(toURLEncodedBytes(put.getRow())); + for (int i = 0; i < maxRetries; i++) { + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + int code = response.getCode(); + switch (code) { + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("put request failed with " + code); + } + } + throw new IOException("put request timed out"); + } + + @Override + public void put(List puts) throws IOException { + // this is a trick: The gateway accepts multiple rows in a cell set and + // ignores the row specification in the URI + + // separate puts by row + TreeMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Put put : puts) { + byte[] row = put.getRow(); + List cells = map.get(row); + if (cells == null) { + cells = new ArrayList<>(); + map.put(row, cells); + } + for (List l : put.getFamilyCellMap().values()) { + cells.addAll(l); + } + } + + // build the cell set + CellSetModel model = new CellSetModel(); + for (Map.Entry> e : map.entrySet()) { + RowModel row = new RowModel(e.getKey()); + for (Cell cell : e.getValue()) { + row.addCell(new CellModel(cell)); + } + model.addRow(row); + } + + // build path for multiput + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append("/$multiput"); // can be any nonexistent row + for (int i = 0; i < maxRetries; i++) { + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + int code = response.getCode(); + switch (code) { + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("multiput request failed with " + code); + } + } + throw new IOException("multiput request timed out"); + } + + @Override + public void delete(Delete delete) throws IOException { + String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimestamp(), + delete.getTimestamp(), 1); + for (int i = 0; i < maxRetries; i++) { + Response response = client.delete(spec); + int code = response.getCode(); + switch (code) { + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("delete request failed with " + code); + } + } + throw new IOException("delete request timed out"); + } + + @Override + public void delete(List deletes) throws IOException { + for (Delete delete : deletes) { + delete(delete); + } + } + + public void flushCommits() throws IOException { + // no-op + } + + @Override + public TableDescriptor getDescriptor() throws IOException { + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append('/'); + sb.append("schema"); + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); + int code = response.getCode(); + switch (code) { + case 200: + TableSchemaModel schema = new TableSchemaModel(); + schema.getObjectFromMessage(response.getBody()); + return schema.getTableDescriptor(); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("schema request returned " + code); + } + } + throw new IOException("schema request timed out"); + } + + class Scanner implements ResultScanner { + + String uri; + + public Scanner(Scan scan) throws IOException { + ScannerModel model; + try { + model = ScannerModel.fromScan(scan); + } catch (Exception e) { + throw new IOException(e); + } + StringBuffer sb = new StringBuffer(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append('/'); + sb.append("scanner"); + for (int i = 0; i < maxRetries; i++) { + Response response = + client.post(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + int code = response.getCode(); + switch (code) { + case 201: + uri = response.getLocation(); + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("scan request failed with " + code); + } + } + throw new IOException("scan request timed out"); + } + + @Override + public Result[] next(int nbRows) throws IOException { + StringBuilder sb = new StringBuilder(uri); + sb.append("?n="); + sb.append(nbRows); + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); + int code = response.getCode(); + switch (code) { + case 200: + CellSetModel model = new CellSetModel(); + model.getObjectFromMessage(response.getBody()); + return buildResultFromModel(model); + case 204: + case 206: + return null; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("scanner.next request failed with " + code); + } + } + throw new IOException("scanner.next request timed out"); + } + + @Override + public Result next() throws IOException { + Result[] results = next(1); + if (results == null || results.length < 1) { + return null; + } + return results[0]; + } + + class Iter implements Iterator { + + Result cache; + + public Iter() { + try { + cache = Scanner.this.next(); + } catch (IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + } + + @Override + public boolean hasNext() { + return cache != null; + } + + @Override + public Result next() { + Result result = cache; + try { + cache = Scanner.this.next(); + } catch (IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + cache = null; + } + return result; + } + + @Override + public void remove() { + throw new RuntimeException("remove() not supported"); + } + + } + + @Override + public Iterator iterator() { + return new Iter(); + } + + @Override + public void close() { + try { + client.delete(uri); + } catch (IOException e) { + LOG.warn(StringUtils.stringifyException(e)); + } + } + + @Override + public boolean renewLease() { + throw new RuntimeException("renewLease() not supported"); + } + + @Override + public ScanMetrics getScanMetrics() { + throw new RuntimeException("getScanMetrics() not supported"); + } + } + + @Override + public ResultScanner getScanner(Scan scan) throws IOException { + return new Scanner(scan); + } + + @Override + public ResultScanner getScanner(byte[] family) throws IOException { + Scan scan = new Scan(); + scan.addFamily(family); + return new Scanner(scan); + } + + @Override + public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { + Scan scan = new Scan(); + scan.addColumn(family, qualifier); + return new Scanner(scan); + } + + public boolean isAutoFlush() { + return true; + } + + private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) + throws IOException { + // column to check-the-value + put.add(new KeyValue(row, family, qualifier, value)); + + CellSetModel model = buildModelFromPut(put); + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append('/'); + sb.append(toURLEncodedBytes(put.getRow())); + sb.append("?check=put"); + + for (int i = 0; i < maxRetries; i++) { + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + int code = response.getCode(); + switch (code) { + case 200: + return true; + case 304: // NOT-MODIFIED + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (final InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("checkAndPut request failed with " + code); + } + } + throw new IOException("checkAndPut request timed out"); + } + + private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, + Delete delete) throws IOException { + Put put = new Put(row, HConstants.LATEST_TIMESTAMP, delete.getFamilyCellMap()); + // column to check-the-value + put.add(new KeyValue(row, family, qualifier, value)); + CellSetModel model = buildModelFromPut(put); + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append('/'); + sb.append(toURLEncodedBytes(row)); + sb.append("?check=delete"); + + for (int i = 0; i < maxRetries; i++) { + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + int code = response.getCode(); + switch (code) { + case 200: + return true; + case 304: // NOT-MODIFIED + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (final InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("checkAndDelete request failed with " + code); + } + } + throw new IOException("checkAndDelete request timed out"); + } + + @Override + public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { + return new CheckAndMutateBuilderImpl(row, family); + } + + @Override + public Result increment(Increment increment) throws IOException { + throw new IOException("Increment not supported"); + } + + @Override + public Result append(Append append) throws IOException { + throw new IOException("Append not supported"); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) + throws IOException { + throw new IOException("incrementColumnValue not supported"); + } + + @Override + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, + Durability durability) throws IOException { + throw new IOException("incrementColumnValue not supported"); + } + + @Override + public void batch(List actions, Object[] results) throws IOException { + throw new IOException("batch not supported"); + } + + @Override + public void batchCallback(List actions, Object[] results, + Callback callback) throws IOException, InterruptedException { + throw new IOException("batchCallback not supported"); + } + + @Override + public CoprocessorRpcChannel coprocessorService(byte[] row) { + throw new UnsupportedOperationException("coprocessorService not implemented"); + } + + @Override + public void mutateRow(RowMutations rm) throws IOException { + throw new IOException("atomicMutation not supported"); + } + + @Override + public long getReadRpcTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public long getRpcTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public long getWriteRpcTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public long getOperationTimeout(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + /* + * Only a small subset of characters are valid in URLs. Row keys, column families, and qualifiers + * cannot be appended to URLs without first URL escaping. Table names are ok because they can only + * contain alphanumeric, ".","_", and "-" which are valid characters in URLs. + */ + private static String toURLEncodedBytes(byte[] row) { + try { + return URLEncoder.encode(new String(row, StandardCharsets.UTF_8), "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException("URLEncoder doesn't support UTF-8", e); + } + } + + private class CheckAndMutateBuilderImpl implements CheckAndMutateBuilder { + + private final byte[] row; + private final byte[] family; + private byte[] qualifier; + private byte[] value; + + CheckAndMutateBuilderImpl(byte[] row, byte[] family) { + this.row = Preconditions.checkNotNull(row, "row is null"); + this.family = Preconditions.checkNotNull(family, "family is null"); + } + + @Override + public CheckAndMutateBuilder qualifier(byte[] qualifier) { + this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + + " an empty byte array, or just do not call this method if you want a null qualifier"); + return this; + } + + @Override + public CheckAndMutateBuilder timeRange(TimeRange timeRange) { + throw new UnsupportedOperationException("timeRange not implemented"); + } + + @Override + public CheckAndMutateBuilder ifNotExists() { + throw new UnsupportedOperationException( + "CheckAndMutate for non-equal comparison " + "not implemented"); + } + + @Override + public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) { + if (compareOp == CompareOperator.EQUAL) { + this.value = Preconditions.checkNotNull(value, "value is null"); + return this; + } else { + throw new UnsupportedOperationException( + "CheckAndMutate for non-equal comparison " + "not implemented"); + } + } + + @Override + public CheckAndMutateBuilder ifEquals(byte[] value) { + this.value = Preconditions.checkNotNull(value, "value is null"); + return this; + } + + @Override + public boolean thenPut(Put put) throws IOException { + return doCheckAndPut(row, family, qualifier, value, put); + } + + @Override + public boolean thenDelete(Delete delete) throws IOException { + return doCheckAndDelete(row, family, qualifier, value, delete); + } + + @Override + public boolean thenMutate(RowMutations mutation) throws IOException { + throw new UnsupportedOperationException("thenMutate not implemented"); + } + } + +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java new file mode 100755 index 00000000..4d0359dd --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteAdminRetries.java @@ -0,0 +1,172 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.client; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Tests {@link RemoteAdmin} retries. + */ +@Category({RestTests.class, SmallTests.class}) +public class TestRemoteAdminRetries { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRemoteAdminRetries.class); + + private static final int SLEEP_TIME = 50; + private static final int RETRIES = 3; + private static final long MAX_TIME = SLEEP_TIME * (RETRIES - 1); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private RemoteAdmin remoteAdmin; + private Client client; + + @Before + public void setup() throws Exception { + client = mock(Client.class); + Response response = new Response(509); + when(client.get(anyString(), anyString())).thenReturn(response); + when(client.delete(anyString())).thenReturn(response); + when(client.put(anyString(), anyString(), any())).thenReturn(response); + when(client.post(anyString(), anyString(), any())).thenReturn(response); + Configuration configuration = TEST_UTIL.getConfiguration(); + + configuration.setInt("hbase.rest.client.max.retries", RETRIES); + configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME); + + remoteAdmin = new RemoteAdmin(client, TEST_UTIL.getConfiguration(), "MyTable"); + } + + @Test + public void testFailingGetRestVersion() throws Exception { + testTimedOutGetCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteAdmin.getRestVersion(); + } + }); + } + + @Test + public void testFailingGetClusterStatus() throws Exception { + testTimedOutGetCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteAdmin.getClusterStatus(); + } + }); + } + + @Test + public void testFailingGetClusterVersion() throws Exception { + testTimedOutGetCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteAdmin.getClusterVersion(); + } + }); + } + + @Test + public void testFailingGetTableAvailable() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteAdmin.isTableAvailable(Bytes.toBytes("TestTable")); + } + }); + } + + @Test + @SuppressWarnings("deprecation") + public void testFailingCreateTable() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteAdmin.createTable(new HTableDescriptor(TableName.valueOf("TestTable"))); + } + }); + verify(client, times(RETRIES)).put(anyString(), anyString(), any()); + } + + @Test + public void testFailingDeleteTable() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteAdmin.deleteTable("TestTable"); + } + }); + verify(client, times(RETRIES)).delete(anyString()); + } + + @Test + public void testFailingGetTableList() throws Exception { + testTimedOutGetCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteAdmin.getTableList(); + } + }); + } + + private void testTimedOutGetCall(CallExecutor callExecutor) throws Exception { + testTimedOutCall(callExecutor); + verify(client, times(RETRIES)).get(anyString(), anyString()); + } + + private void testTimedOutCall(CallExecutor callExecutor) throws Exception { + long start = System.currentTimeMillis(); + try { + callExecutor.run(); + fail("should be timeout exception!"); + } catch (IOException e) { + assertTrue(Pattern.matches(".*MyTable.*timed out", e.toString())); + } + assertTrue((System.currentTimeMillis() - start) > MAX_TIME); + } + + private static interface CallExecutor { + void run() throws Exception; + } + +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java new file mode 100755 index 00000000..247897fe --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteHTableRetries.java @@ -0,0 +1,198 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.client; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.Arrays; +import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test RemoteHTable retries. + */ +@Category({RestTests.class, SmallTests.class}) +public class TestRemoteHTableRetries { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRemoteHTableRetries.class); + + private static final int SLEEP_TIME = 50; + private static final int RETRIES = 3; + private static final long MAX_TIME = SLEEP_TIME * (RETRIES - 1); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final byte[] ROW_1 = Bytes.toBytes("testrow1"); + private static final byte[] COLUMN_1 = Bytes.toBytes("a"); + private static final byte[] QUALIFIER_1 = Bytes.toBytes("1"); + private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1"); + + private Client client; + private RemoteHTable remoteTable; + + @Before + public void setup() throws Exception { + client = mock(Client.class); + Response response = new Response(509); + when(client.get(anyString(), anyString())).thenReturn(response); + when(client.delete(anyString())).thenReturn(response); + when(client.put(anyString(), anyString(), any())).thenReturn( + response); + when(client.post(anyString(), anyString(), any())).thenReturn( + response); + + Configuration configuration = TEST_UTIL.getConfiguration(); + configuration.setInt("hbase.rest.client.max.retries", RETRIES); + configuration.setInt("hbase.rest.client.sleep", SLEEP_TIME); + + remoteTable = new RemoteHTable(client, TEST_UTIL.getConfiguration(), + "MyTable"); + } + + @After + public void tearDownAfterClass() throws Exception { + remoteTable.close(); + } + + @Test + public void testDelete() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + Delete delete = new Delete(Bytes.toBytes("delete")); + remoteTable.delete(delete); + } + }); + verify(client, times(RETRIES)).delete(anyString()); + } + + @Test + public void testGet() throws Exception { + testTimedOutGetCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteTable.get(new Get(Bytes.toBytes("Get"))); + } + }); + } + + @Test + public void testSingleRowPut() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteTable.put(new Put(Bytes.toBytes("Row"))); + } + }); + verify(client, times(RETRIES)).put(anyString(), anyString(), any()); + } + + @Test + public void testMultiRowPut() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + Put[] puts = { new Put(Bytes.toBytes("Row1")), new Put(Bytes.toBytes("Row2")) }; + remoteTable.put(Arrays.asList(puts)); + } + }); + verify(client, times(RETRIES)).put(anyString(), anyString(), any()); + } + + @Test + public void testGetScanner() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + remoteTable.getScanner(new Scan()); + } + }); + verify(client, times(RETRIES)).post(anyString(), anyString(), any()); + } + + @Test + public void testCheckAndPut() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + Put put = new Put(ROW_1); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) + .ifEquals(VALUE_1).thenPut(put); + } + }); + verify(client, times(RETRIES)).put(anyString(), anyString(), any()); + } + + @Test + public void testCheckAndDelete() throws Exception { + testTimedOutCall(new CallExecutor() { + @Override + public void run() throws Exception { + Put put = new Put(ROW_1); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + Delete delete= new Delete(ROW_1); + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) + .ifEquals(VALUE_1).thenDelete(delete); + } + }); + } + + private void testTimedOutGetCall(CallExecutor callExecutor) throws Exception { + testTimedOutCall(callExecutor); + verify(client, times(RETRIES)).get(anyString(), anyString()); + } + + private void testTimedOutCall(CallExecutor callExecutor) throws Exception { + long start = System.currentTimeMillis(); + try { + callExecutor.run(); + fail("should be timeout exception!"); + } catch (IOException e) { + assertTrue(Pattern.matches(".*request timed out", e.toString())); + } + assertTrue((System.currentTimeMillis() - start) > MAX_TIME); + } + + private interface CallExecutor { + void run() throws Exception; + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java new file mode 100755 index 00000000..2d3af51c --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -0,0 +1,683 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility; +import org.apache.hadoop.hbase.rest.RESTServlet; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.http.Header; +import org.apache.http.message.BasicHeader; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, MediumTests.class}) +public class TestRemoteTable { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRemoteTable.class); + + // Verify that invalid URL characters and arbitrary bytes are escaped when + // constructing REST URLs per HBASE-7621. RemoteHTable should support row keys + // and qualifiers containing any byte for all table operations. + private static final String INVALID_URL_CHARS_1 = + "|\"\\^{}\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009\u000B\u000C"; + + // HColumnDescriptor prevents certain characters in column names. The following + // are examples of characters are allowed in column names but are not valid in + // URLs. + private static final String INVALID_URL_CHARS_2 = "|^{}\u0242"; + + // Besides alphanumeric these characters can also be present in table names. + private static final String VALID_TABLE_NAME_CHARS = "_-."; + + private static final TableName TABLE = + TableName.valueOf("TestRemoteTable" + VALID_TABLE_NAME_CHARS); + + private static final byte[] ROW_1 = Bytes.toBytes("testrow1" + INVALID_URL_CHARS_1); + private static final byte[] ROW_2 = Bytes.toBytes("testrow2" + INVALID_URL_CHARS_1); + private static final byte[] ROW_3 = Bytes.toBytes("testrow3" + INVALID_URL_CHARS_1); + private static final byte[] ROW_4 = Bytes.toBytes("testrow4"+ INVALID_URL_CHARS_1); + + private static final byte[] COLUMN_1 = Bytes.toBytes("a" + INVALID_URL_CHARS_2); + private static final byte[] COLUMN_2 = Bytes.toBytes("b" + INVALID_URL_CHARS_2); + private static final byte[] COLUMN_3 = Bytes.toBytes("c" + INVALID_URL_CHARS_2); + + private static final byte[] QUALIFIER_1 = Bytes.toBytes("1" + INVALID_URL_CHARS_1); + private static final byte[] QUALIFIER_2 = Bytes.toBytes("2" + INVALID_URL_CHARS_1); + private static final byte[] VALUE_1 = Bytes.toBytes("testvalue1"); + private static final byte[] VALUE_2 = Bytes.toBytes("testvalue2"); + + private static final long ONE_HOUR = 60 * 60 * 1000; + private static final long TS_2 = System.currentTimeMillis(); + private static final long TS_1 = TS_2 - ONE_HOUR; + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseRESTTestingUtility REST_TEST_UTIL = + new HBaseRESTTestingUtility(); + private RemoteHTable remoteTable; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(); + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + } + + @Before + public void before() throws Exception { + Admin admin = TEST_UTIL.getAdmin(); + if (admin.tableExists(TABLE)) { + if (admin.isTableEnabled(TABLE)) { + admin.disableTable(TABLE); + } + + admin.deleteTable(TABLE); + } + + TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = + new TableDescriptorBuilder.ModifyableTableDescriptor(TABLE); + tableDescriptor.setColumnFamily( + new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(COLUMN_1) + .setMaxVersions(3)); + tableDescriptor.setColumnFamily( + new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(COLUMN_2) + .setMaxVersions(3)); + tableDescriptor.setColumnFamily( + new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(COLUMN_3) + .setMaxVersions(3)); + admin.createTable(tableDescriptor); + try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) { + Put put = new Put(ROW_1); + put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1); + table.put(put); + put = new Put(ROW_2); + put.addColumn(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1); + put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2); + put.addColumn(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2); + table.put(put); + } + remoteTable = new RemoteHTable( + new Client(new Cluster().add("localhost", + REST_TEST_UTIL.getServletPort())), + TEST_UTIL.getConfiguration(), TABLE.toBytes()); + } + + @After + public void after() throws Exception { + remoteTable.close(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + REST_TEST_UTIL.shutdownServletContainer(); + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testGetTableDescriptor() throws IOException { + try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) { + TableDescriptor local = table.getDescriptor(); + assertEquals(remoteTable.getDescriptor(), local); + } + } + + @Test + public void testGet() throws IOException { + Get get = new Get(ROW_1); + Result result = remoteTable.get(get); + byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1); + byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); + assertNull(value2); + + get = new Get(ROW_1); + get.addFamily(COLUMN_3); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNull(value1); + assertNull(value2); + + get = new Get(ROW_1); + get.addColumn(COLUMN_1, QUALIFIER_1); + get.addColumn(COLUMN_2, QUALIFIER_2); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); + assertNull(value2); + + get = new Get(ROW_2); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2 + assertNotNull(value2); + assertTrue(Bytes.equals(VALUE_2, value2)); + + get = new Get(ROW_2); + get.addFamily(COLUMN_1); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2 + assertNull(value2); + + get = new Get(ROW_2); + get.addColumn(COLUMN_1, QUALIFIER_1); + get.addColumn(COLUMN_2, QUALIFIER_2); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_2, value1)); // @TS_2 + assertNotNull(value2); + assertTrue(Bytes.equals(VALUE_2, value2)); + + // test timestamp + get = new Get(ROW_2); + get.addFamily(COLUMN_1); + get.addFamily(COLUMN_2); + get.setTimestamp(TS_1); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); // @TS_1 + assertNull(value2); + + // test timerange + get = new Get(ROW_2); + get.addFamily(COLUMN_1); + get.addFamily(COLUMN_2); + get.setTimeRange(0, TS_1 + 1); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); // @TS_1 + assertNull(value2); + + // test maxVersions + get = new Get(ROW_2); + get.addFamily(COLUMN_1); + get.readVersions(2); + result = remoteTable.get(get); + int count = 0; + for (Cell kv: result.listCells()) { + if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_1 == kv.getTimestamp()) { + assertTrue(CellUtil.matchingValue(kv, VALUE_1)); // @TS_1 + count++; + } + if (CellUtil.matchingFamily(kv, COLUMN_1) && TS_2 == kv.getTimestamp()) { + assertTrue(CellUtil.matchingValue(kv, VALUE_2)); // @TS_2 + count++; + } + } + assertEquals(2, count); + } + + @Test + public void testMultiGet() throws Exception { + ArrayList gets = new ArrayList<>(2); + gets.add(new Get(ROW_1)); + gets.add(new Get(ROW_2)); + Result[] results = remoteTable.get(gets); + assertNotNull(results); + assertEquals(2, results.length); + assertEquals(1, results[0].size()); + assertEquals(2, results[1].size()); + + //Test Versions + gets = new ArrayList<>(2); + Get g = new Get(ROW_1); + g.readVersions(3); + gets.add(g); + gets.add(new Get(ROW_2)); + results = remoteTable.get(gets); + assertNotNull(results); + assertEquals(2, results.length); + assertEquals(1, results[0].size()); + assertEquals(3, results[1].size()); + + //404 + gets = new ArrayList<>(1); + gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); + results = remoteTable.get(gets); + assertNotNull(results); + assertEquals(0, results.length); + + gets = new ArrayList<>(3); + gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE"))); + gets.add(new Get(ROW_1)); + gets.add(new Get(ROW_2)); + results = remoteTable.get(gets); + assertNotNull(results); + assertEquals(2, results.length); + } + + @Test + public void testPut() throws IOException { + Put put = new Put(ROW_3); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + remoteTable.put(put); + + Get get = new Get(ROW_3); + get.addFamily(COLUMN_1); + Result result = remoteTable.get(get); + byte[] value = result.getValue(COLUMN_1, QUALIFIER_1); + assertNotNull(value); + assertTrue(Bytes.equals(VALUE_1, value)); + + // multiput + List puts = new ArrayList<>(3); + put = new Put(ROW_3); + put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2); + puts.add(put); + put = new Put(ROW_4); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + put = new Put(ROW_4); + put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2); + puts.add(put); + remoteTable.put(puts); + + get = new Get(ROW_3); + get.addFamily(COLUMN_2); + result = remoteTable.get(get); + value = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value); + assertTrue(Bytes.equals(VALUE_2, value)); + get = new Get(ROW_4); + result = remoteTable.get(get); + value = result.getValue(COLUMN_1, QUALIFIER_1); + assertNotNull(value); + assertTrue(Bytes.equals(VALUE_1, value)); + value = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value); + assertTrue(Bytes.equals(VALUE_2, value)); + + assertTrue(Bytes.equals(Bytes.toBytes("TestRemoteTable" + VALID_TABLE_NAME_CHARS), + remoteTable.getTableName())); + } + + @Test + public void testDelete() throws IOException { + Put put = new Put(ROW_3); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2); + put.addColumn(COLUMN_3, QUALIFIER_1, VALUE_1); + put.addColumn(COLUMN_3, QUALIFIER_2, VALUE_2); + remoteTable.put(put); + + Get get = new Get(ROW_3); + get.addFamily(COLUMN_1); + get.addFamily(COLUMN_2); + get.addFamily(COLUMN_3); + Result result = remoteTable.get(get); + byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1); + byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2); + byte[] value3 = result.getValue(COLUMN_3, QUALIFIER_1); + byte[] value4 = result.getValue(COLUMN_3, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); + assertNotNull(value2); + assertTrue(Bytes.equals(VALUE_2, value2)); + assertNotNull(value3); + assertTrue(Bytes.equals(VALUE_1, value3)); + assertNotNull(value4); + assertTrue(Bytes.equals(VALUE_2, value4)); + + Delete delete = new Delete(ROW_3); + delete.addColumn(COLUMN_2, QUALIFIER_2); + remoteTable.delete(delete); + + get = new Get(ROW_3); + get.addFamily(COLUMN_1); + get.addFamily(COLUMN_2); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); + assertNull(value2); + + delete = new Delete(ROW_3); + delete.setTimestamp(1L); + remoteTable.delete(delete); + + get = new Get(ROW_3); + get.addFamily(COLUMN_1); + get.addFamily(COLUMN_2); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); + assertNull(value2); + + // Delete column family from row + delete = new Delete(ROW_3); + delete.addFamily(COLUMN_3); + remoteTable.delete(delete); + + get = new Get(ROW_3); + get.addFamily(COLUMN_3); + result = remoteTable.get(get); + value3 = result.getValue(COLUMN_3, QUALIFIER_1); + value4 = result.getValue(COLUMN_3, QUALIFIER_2); + assertNull(value3); + assertNull(value4); + + delete = new Delete(ROW_3); + remoteTable.delete(delete); + + get = new Get(ROW_3); + get.addFamily(COLUMN_1); + get.addFamily(COLUMN_2); + result = remoteTable.get(get); + value1 = result.getValue(COLUMN_1, QUALIFIER_1); + value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNull(value1); + assertNull(value2); + } + + /** + * Test RemoteHTable.Scanner + */ + @Test + public void testScanner() throws IOException { + List puts = new ArrayList<>(4); + Put put = new Put(ROW_1); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + put = new Put(ROW_2); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + put = new Put(ROW_3); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + put = new Put(ROW_4); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + remoteTable.put(puts); + + ResultScanner scanner = remoteTable.getScanner(new Scan()); + + Result[] results = scanner.next(1); + assertNotNull(results); + assertEquals(1, results.length); + assertTrue(Bytes.equals(ROW_1, results[0].getRow())); + + Result result = scanner.next(); + assertNotNull(result); + assertTrue(Bytes.equals(ROW_2, result.getRow())); + + results = scanner.next(2); + assertNotNull(results); + assertEquals(2, results.length); + assertTrue(Bytes.equals(ROW_3, results[0].getRow())); + assertTrue(Bytes.equals(ROW_4, results[1].getRow())); + + results = scanner.next(1); + assertNull(results); + scanner.close(); + + scanner = remoteTable.getScanner(COLUMN_1); + results = scanner.next(4); + assertNotNull(results); + assertEquals(4, results.length); + assertTrue(Bytes.equals(ROW_1, results[0].getRow())); + assertTrue(Bytes.equals(ROW_2, results[1].getRow())); + assertTrue(Bytes.equals(ROW_3, results[2].getRow())); + assertTrue(Bytes.equals(ROW_4, results[3].getRow())); + + scanner.close(); + + scanner = remoteTable.getScanner(COLUMN_1,QUALIFIER_1); + results = scanner.next(4); + assertNotNull(results); + assertEquals(4, results.length); + assertTrue(Bytes.equals(ROW_1, results[0].getRow())); + assertTrue(Bytes.equals(ROW_2, results[1].getRow())); + assertTrue(Bytes.equals(ROW_3, results[2].getRow())); + assertTrue(Bytes.equals(ROW_4, results[3].getRow())); + scanner.close(); + assertTrue(remoteTable.isAutoFlush()); + } + + @Test + public void testCheckAndDelete() throws IOException { + Get get = new Get(ROW_1); + Result result = remoteTable.get(get); + byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1); + byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2); + assertNotNull(value1); + assertTrue(Bytes.equals(VALUE_1, value1)); + assertNull(value2); + assertTrue(remoteTable.exists(get)); + assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length); + Delete delete = new Delete(ROW_1); + + remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) + .ifEquals(VALUE_1).thenDelete(delete); + assertFalse(remoteTable.exists(get)); + + Put put = new Put(ROW_1); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + remoteTable.put(put); + + assertTrue(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) + .ifEquals(VALUE_1).thenPut(put)); + assertFalse(remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) + .ifEquals(VALUE_2).thenPut(put)); + } + + /** + * Test RemoteHable.Scanner.iterator method + */ + @Test + public void testIteratorScaner() throws IOException { + List puts = new ArrayList<>(4); + Put put = new Put(ROW_1); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + put = new Put(ROW_2); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + put = new Put(ROW_3); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + put = new Put(ROW_4); + put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1); + puts.add(put); + remoteTable.put(puts); + + ResultScanner scanner = remoteTable.getScanner(new Scan()); + Iterator iterator = scanner.iterator(); + assertTrue(iterator.hasNext()); + int counter = 0; + while (iterator.hasNext()) { + iterator.next(); + counter++; + } + assertEquals(4, counter); + } + + /** + * Test a some methods of class Response. + */ + @Test + public void testResponse(){ + Response response = new Response(200); + assertEquals(200, response.getCode()); + Header[] headers = new Header[2]; + headers[0] = new BasicHeader("header1", "value1"); + headers[1] = new BasicHeader("header2", "value2"); + response = new Response(200, headers); + assertEquals("value1", response.getHeader("header1")); + assertFalse(response.hasBody()); + response.setCode(404); + assertEquals(404, response.getCode()); + headers = new Header[2]; + headers[0] = new BasicHeader("header1", "value1.1"); + headers[1] = new BasicHeader("header2", "value2"); + response.setHeaders(headers); + assertEquals("value1.1", response.getHeader("header1")); + response.setBody(Bytes.toBytes("body")); + assertTrue(response.hasBody()); + } + + /** + * Tests scanner with limitation + * limit the number of rows each scanner scan fetch at life time + * The number of rows returned should be equal to the limit + * @throws Exception + */ + @Test + public void testLimitedScan() throws Exception { + int numTrials = 100; + int limit = 60; + + // Truncate the test table for inserting test scenarios rows keys + TEST_UTIL.getAdmin().disableTable(TABLE); + TEST_UTIL.getAdmin().truncateTable(TABLE, false); + String row = "testrow"; + + try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) { + List puts = new ArrayList<>(); + Put put = null; + for (int i = 1; i <= numTrials; i++) { + put = new Put(Bytes.toBytes(row + i)); + put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, Bytes.toBytes("testvalue" + i)); + puts.add(put); + } + table.put(puts); + } + + remoteTable = + new RemoteHTable(new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), + TEST_UTIL.getConfiguration(), TABLE.toBytes()); + + Scan scan = new Scan(); + scan.setLimit(limit); + ResultScanner scanner = remoteTable.getScanner(scan); + Iterator resultIterator = scanner.iterator(); + int counter = 0; + while (resultIterator.hasNext()) { + resultIterator.next(); + counter++; + } + assertEquals(limit, counter); + } + + /** + * Tests keeping a HBase scanner alive for long periods of time. Each call to next() should reset + * the ConnectionCache timeout for the scanner's connection. + * + * @throws Exception if starting the servlet container or disabling or truncating the table fails + */ + @Test + public void testLongLivedScan() throws Exception { + int numTrials = 6; + int trialPause = 1000; + int cleanUpInterval = 100; + + // Shutdown the Rest Servlet container + REST_TEST_UTIL.shutdownServletContainer(); + + // Set the ConnectionCache timeout to trigger halfway through the trials + TEST_UTIL.getConfiguration().setLong(RESTServlet.MAX_IDLETIME, (numTrials / 2) * trialPause); + TEST_UTIL.getConfiguration().setLong(RESTServlet.CLEANUP_INTERVAL, cleanUpInterval); + + // Start the Rest Servlet container + REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); + + // Truncate the test table for inserting test scenarios rows keys + TEST_UTIL.getAdmin().disableTable(TABLE); + TEST_UTIL.getAdmin().truncateTable(TABLE, false); + + remoteTable = new RemoteHTable( + new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort())), + TEST_UTIL.getConfiguration(), TABLE.toBytes()); + + String row = "testrow"; + + try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) { + List puts = new ArrayList(); + Put put = null; + for (int i = 1; i <= numTrials; i++) { + put = new Put(Bytes.toBytes(row + i)); + put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, Bytes.toBytes("testvalue" + i)); + puts.add(put); + } + table.put(puts); + } + + Scan scan = new Scan(); + scan.setCaching(1); + scan.setBatch(1); + + ResultScanner scanner = remoteTable.getScanner(scan); + Result result = null; + // get scanner and rows + for (int i = 1; i <= numTrials; i++) { + // Make sure that the Scanner doesn't throw an exception after the ConnectionCache timeout + result = scanner.next(); + assertEquals(row + i, Bytes.toString(result.getRow())); + Thread.sleep(trialPause); + } + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java new file mode 100755 index 00000000..0c11a4b7 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestXmlParsing.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.client; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.rest.Constants; +import org.apache.hadoop.hbase.rest.model.StorageClusterVersionModel; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.xml.bind.UnmarshalException; +import java.io.IOException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test class for {@link RemoteAdmin} to verify XML is parsed in a certain manner. + */ +@Category(SmallTests.class) +public class TestXmlParsing { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestXmlParsing.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestXmlParsing.class); + + @Test + public void testParsingClusterVersion() throws Exception { + final String xml = "" + + ""; + Client client = mock(Client.class); + RemoteAdmin admin = new RemoteAdmin(client, HBaseConfiguration.create(), null); + Response resp = new Response(200, null, Bytes.toBytes(xml)); + + when(client.get("/version/cluster", Constants.MIMETYPE_XML)).thenReturn(resp); + + StorageClusterVersionModel cv = admin.getClusterVersion(); + assertEquals("2.0.0", cv.getVersion()); + } + + @Test + public void testFailOnExternalEntities() throws Exception { + final String externalEntitiesXml = + "" + + " ] >" + + " &xee;"; + Client client = mock(Client.class); + RemoteAdmin admin = new RemoteAdmin(client, HBaseConfiguration.create(), null); + Response resp = new Response(200, null, Bytes.toBytes(externalEntitiesXml)); + + when(client.get("/version/cluster", Constants.MIMETYPE_XML)).thenReturn(resp); + + try { + admin.getClusterVersion(); + fail("Expected getClusterVersion() to throw an exception"); + } catch (IOException e) { + assertEquals("Cause of exception ought to be a failure to parse the stream due to our " + + "invalid external entity. Make sure this isn't just a false positive due to " + + "implementation. see HBASE-19020.", UnmarshalException.class, e.getCause().getClass()); + final String exceptionText = StringUtils.stringifyException(e); + final String expectedText = "\"xee\""; + LOG.debug("exception text: '" + exceptionText + "'", e); + assertTrue("Exception does not contain expected text", exceptionText.contains(expectedText)); + } + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/http/KeyStoreTestUtil.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/http/KeyStoreTestUtil.java new file mode 100755 index 00000000..35f5e63f --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/http/KeyStoreTestUtil.java @@ -0,0 +1,341 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rest.http; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; +import org.apache.hadoop.security.ssl.SSLFactory; +import org.bouncycastle.x509.X509V1CertificateGenerator; + +import javax.security.auth.x500.X500Principal; +import java.io.File; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.IOException; +import java.io.Writer; +import java.math.BigInteger; +import java.net.URL; +import java.security.GeneralSecurityException; +import java.security.InvalidKeyException; +import java.security.Key; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.KeyStore; +import java.security.NoSuchAlgorithmException; +import java.security.NoSuchProviderException; +import java.security.SecureRandom; +import java.security.SignatureException; +import java.security.cert.Certificate; +import java.security.cert.CertificateEncodingException; +import java.security.cert.X509Certificate; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; + +public class KeyStoreTestUtil { + + public static String getClasspathDir(Class klass) throws Exception { + String file = klass.getName(); + file = file.replace('.', '/') + ".class"; + URL url = Thread.currentThread().getContextClassLoader().getResource(file); + String baseDir = url.toURI().getPath(); + baseDir = baseDir.substring(0, baseDir.length() - file.length() - 1); + return baseDir; + } + + /** + * Create a self-signed X.509 Certificate. + * + * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" + * @param pair the KeyPair + * @param days how many days from now the Certificate is valid for + * @param algorithm the signing algorithm, eg "SHA1withRSA" + * @return the self-signed certificate + */ + public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm) + throws CertificateEncodingException, InvalidKeyException, IllegalStateException, + NoSuchProviderException, NoSuchAlgorithmException, SignatureException { + Date from = new Date(); + Date to = new Date(from.getTime() + days * 86400000l); + BigInteger sn = new BigInteger(64, new SecureRandom()); + KeyPair keyPair = pair; + X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); + X500Principal dnName = new X500Principal(dn); + + certGen.setSerialNumber(sn); + certGen.setIssuerDN(dnName); + certGen.setNotBefore(from); + certGen.setNotAfter(to); + certGen.setSubjectDN(dnName); + certGen.setPublicKey(keyPair.getPublic()); + certGen.setSignatureAlgorithm(algorithm); + X509Certificate cert = certGen.generate(pair.getPrivate()); + return cert; + } + + public static KeyPair generateKeyPair(String algorithm) + throws NoSuchAlgorithmException { + KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); + keyGen.initialize(1024); + return keyGen.genKeyPair(); + } + + private static KeyStore createEmptyKeyStore() + throws GeneralSecurityException, IOException { + KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(null, null); // initialize + return ks; + } + + private static void saveKeyStore(KeyStore ks, String filename, + String password) + throws GeneralSecurityException, IOException { + FileOutputStream out = new FileOutputStream(filename); + try { + ks.store(out, password.toCharArray()); + } finally { + out.close(); + } + } + + public static void createKeyStore(String filename, + String password, String alias, + Key privateKey, Certificate cert) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + ks.setKeyEntry(alias, privateKey, password.toCharArray(), + new Certificate[]{cert}); + saveKeyStore(ks, filename, password); + } + + /** + * Creates a keystore with a single key and saves it to a file. + * + * @param filename String file to save + * @param password String store password to set on keystore + * @param keyPassword String key password to set on key + * @param alias String alias to use for the key + * @param privateKey Key to save in keystore + * @param cert Certificate to use as certificate chain associated to key + * @throws GeneralSecurityException for any error with the security APIs + * @throws IOException if there is an I/O error saving the file + */ + public static void createKeyStore(String filename, + String password, String keyPassword, String alias, + Key privateKey, Certificate cert) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), + new Certificate[]{cert}); + saveKeyStore(ks, filename, password); + } + + public static void createTrustStore(String filename, + String password, String alias, + Certificate cert) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + ks.setCertificateEntry(alias, cert); + saveKeyStore(ks, filename, password); + } + + public static void createTrustStore( + String filename, String password, Map certs) + throws GeneralSecurityException, IOException { + KeyStore ks = createEmptyKeyStore(); + for (Map.Entry cert : certs.entrySet()) { + ks.setCertificateEntry(cert.getKey(), cert.getValue()); + } + saveKeyStore(ks, filename, password); + } + + public static void cleanupSSLConfig(String keystoresDir, String sslConfDir) + throws Exception { + File f = new File(keystoresDir + "/clientKS.jks"); + f.delete(); + f = new File(keystoresDir + "/serverKS.jks"); + f.delete(); + f = new File(keystoresDir + "/trustKS.jks"); + f.delete(); + f = new File(sslConfDir + "/ssl-client.xml"); + f.delete(); + f = new File(sslConfDir + "/ssl-server.xml"); + f.delete(); + } + + /** + * Performs complete setup of SSL configuration in preparation for testing an + * SSLFactory. This includes keys, certs, keystores, truststores, the server + * SSL configuration file, the client SSL configuration file, and the master + * configuration file read by the SSLFactory. + * + * @param keystoresDir String directory to save keystores + * @param sslConfDir String directory to save SSL configuration files + * @param conf Configuration master configuration to be used by an SSLFactory, + * which will be mutated by this method + * @param useClientCert boolean true to make the client present a cert in the + * SSL handshake + */ + public static void setupSSLConfig(String keystoresDir, String sslConfDir, + Configuration conf, boolean useClientCert) + throws Exception { + String clientKS = keystoresDir + "/clientKS.jks"; + String clientPassword = "clientP"; + String serverKS = keystoresDir + "/serverKS.jks"; + String serverPassword = "serverP"; + String trustKS = keystoresDir + "/trustKS.jks"; + String trustPassword = "trustP"; + + File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml"); + File sslServerConfFile = new File(sslConfDir + "/ssl-server.xml"); + + Map certs = new HashMap<>(); + + if (useClientCert) { + KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA"); + X509Certificate cCert = + KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, + "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", + cKP.getPrivate(), cCert); + certs.put("client", cCert); + } + + KeyPair sKP = KeyStoreTestUtil.generateKeyPair("RSA"); + X509Certificate sCert = + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, + "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", + sKP.getPrivate(), sCert); + certs.put("server", sCert); + + KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs); + + Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword, + clientPassword, trustKS); + Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword, + serverPassword, trustKS); + + saveConfig(sslClientConfFile, clientSSLConf); + saveConfig(sslServerConfFile, serverSSLConf); + + conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL"); + conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName()); + conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName()); + conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert); + } + + /** + * Creates SSL configuration for a client. + * + * @param clientKS String client keystore file + * @param password String store password, or null to avoid setting store + * password + * @param keyPassword String key password, or null to avoid setting key + * password + * @param trustKS String truststore file + * @return Configuration for client SSL + */ + public static Configuration createClientSSLConfig(String clientKS, + String password, String keyPassword, String trustKS) { + Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT, + clientKS, password, keyPassword, trustKS); + return clientSSLConf; + } + + /** + * Creates SSL configuration for a server. + * + * @param serverKS String server keystore file + * @param password String store password, or null to avoid setting store + * password + * @param keyPassword String key password, or null to avoid setting key + * password + * @param trustKS String truststore file + * @return Configuration for server SSL + */ + public static Configuration createServerSSLConfig(String serverKS, + String password, String keyPassword, String trustKS) throws IOException { + Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER, + serverKS, password, keyPassword, trustKS); + return serverSSLConf; + } + + /** + * Creates SSL configuration. + * + * @param mode SSLFactory.Mode mode to configure + * @param keystore String keystore file + * @param password String store password, or null to avoid setting store + * password + * @param keyPassword String key password, or null to avoid setting key + * password + * @param trustKS String truststore file + * @return Configuration for SSL + */ + private static Configuration createSSLConfig(SSLFactory.Mode mode, + String keystore, String password, String keyPassword, String trustKS) { + String trustPassword = "trustP"; + + Configuration sslConf = new Configuration(false); + if (keystore != null) { + sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), keystore); + } + if (password != null) { + sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, + FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), password); + } + if (keyPassword != null) { + sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, + FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), + keyPassword); + } + if (trustKS != null) { + sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS); + } + if (trustPassword != null) { + sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), + trustPassword); + } + sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000"); + + return sslConf; + } + + /** + * Saves configuration to a file. + * + * @param file File to save + * @param conf Configuration contents to write to file + * @throws IOException if there is an I/O error saving the file + */ + public static void saveConfig(File file, Configuration conf) + throws IOException { + Writer writer = new FileWriter(file); + try { + conf.writeXml(writer); + } finally { + writer.close(); + } + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java new file mode 100755 index 00000000..b8305d56 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellModel.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestCellModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCellModel.class); + + private static final long TIMESTAMP = 1245219839331L; + private static final byte[] COLUMN = Bytes.toBytes("testcolumn"); + private static final byte[] VALUE = Bytes.toBytes("testvalue"); + + public TestCellModel() throws Exception { + super(CellModel.class); + AS_XML = + "dGVzdHZhbHVl"; + AS_PB = + "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl"; + + AS_JSON = + "{\"column\":\"dGVzdGNvbHVtbg==\",\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVl\"}"; + } + + @Override + protected CellModel buildTestModel() { + CellModel model = new CellModel(); + model.setColumn(COLUMN); + model.setTimestamp(TIMESTAMP); + model.setValue(VALUE); + return model; + } + + @Override + protected void checkModel(CellModel model) { + assertTrue(Bytes.equals(model.getColumn(), COLUMN)); + assertTrue(Bytes.equals(model.getValue(), VALUE)); + assertTrue(model.hasUserTimestamp()); + assertEquals(TIMESTAMP, model.getTimestamp()); + } + + @Override + public void testBuildModel() throws Exception { + checkModel(buildTestModel()); + } + + @Override + public void testFromXML() throws Exception { + checkModel(fromXML(AS_XML)); + } + + @Override + public void testFromPB() throws Exception { + checkModel(fromPB(AS_PB)); + } + + @Test + public void testEquals() throws Exception { + CellModel cellModel1 = buildTestModel(); + CellModel cellModel2 = buildTestModel(); + + assertEquals(cellModel1, cellModel2); + + CellModel cellModel3 = new CellModel(); + assertFalse(cellModel1.equals(cellModel3)); + } + + @Test + public void testToString() throws Exception { + String expectedColumn = ToStringBuilder.reflectionToString(COLUMN, ToStringStyle.SIMPLE_STYLE); + + CellModel cellModel = buildTestModel(); + System.out.println(cellModel); + + assertTrue(StringUtils.contains(cellModel.toString(), expectedColumn)); + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java new file mode 100755 index 00000000..1d40effb --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestCellSetModel.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Iterator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestCellSetModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestCellSetModel.class); + + private static final byte[] ROW1 = Bytes.toBytes("testrow1"); + private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1"); + private static final byte[] VALUE1 = Bytes.toBytes("testvalue1"); + private static final long TIMESTAMP1 = 1245219839331L; + private static final byte[] ROW2 = Bytes.toBytes("testrow1"); + private static final byte[] COLUMN2 = Bytes.toBytes("testcolumn2"); + private static final byte[] VALUE2 = Bytes.toBytes("testvalue2"); + private static final long TIMESTAMP2 = 1245239813319L; + private static final byte[] COLUMN3 = Bytes.toBytes("testcolumn3"); + private static final byte[] VALUE3 = Bytes.toBytes("testvalue3"); + private static final long TIMESTAMP3 = 1245393318192L; + + public TestCellSetModel() throws Exception { + super(CellSetModel.class); + AS_XML = + "" + + "" + + "" + + "dGVzdHZhbHVlMQ==" + + "" + + "" + + "" + + "dGVzdHZhbHVlMg==" + + "" + + "dGVzdHZhbHVlMw==" + + "" + + ""; + + AS_PB = + "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" + + "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" + + "Igp0ZXN0dmFsdWUz"; + + AS_XML = + "" + + "" + + "dGVzdHZhbHVlMQ==" + + "" + + "dGVzdHZhbHVlMg==" + + "dGVzdHZhbHVlMw==" + + ""; + + AS_JSON = + "{\"Row\":[{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\",\"timestamp\":1245219839331," + + "\"$\":\"dGVzdHZhbHVlMQ==\"}]},{\"key\":\"dGVzdHJvdzE=\"," + + "\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjI=\",\"timestamp\":1245239813319," + + "\"$\":\"dGVzdHZhbHVlMg==\"},{\"column\":\"dGVzdGNvbHVtbjM=\"," + + "\"timestamp\":1245393318192,\"$\":\"dGVzdHZhbHVlMw==\"}]}]}"; + } + + @Override + protected CellSetModel buildTestModel() { + CellSetModel model = new CellSetModel(); + RowModel row; + row = new RowModel(); + row.setKey(ROW1); + row.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1)); + model.addRow(row); + row = new RowModel(); + row.setKey(ROW2); + row.addCell(new CellModel(COLUMN2, TIMESTAMP2, VALUE2)); + row.addCell(new CellModel(COLUMN3, TIMESTAMP3, VALUE3)); + model.addRow(row); + return model; + } + + @Override + protected void checkModel(CellSetModel model) { + Iterator rows = model.getRows().iterator(); + RowModel row = rows.next(); + assertTrue(Bytes.equals(ROW1, row.getKey())); + Iterator cells = row.getCells().iterator(); + CellModel cell = cells.next(); + assertTrue(Bytes.equals(COLUMN1, cell.getColumn())); + assertTrue(Bytes.equals(VALUE1, cell.getValue())); + assertTrue(cell.hasUserTimestamp()); + assertEquals(TIMESTAMP1, cell.getTimestamp()); + assertFalse(cells.hasNext()); + row = rows.next(); + assertTrue(Bytes.equals(ROW2, row.getKey())); + cells = row.getCells().iterator(); + cell = cells.next(); + assertTrue(Bytes.equals(COLUMN2, cell.getColumn())); + assertTrue(Bytes.equals(VALUE2, cell.getValue())); + assertTrue(cell.hasUserTimestamp()); + assertEquals(TIMESTAMP2, cell.getTimestamp()); + cell = cells.next(); + assertTrue(Bytes.equals(COLUMN3, cell.getColumn())); + assertTrue(Bytes.equals(VALUE3, cell.getValue())); + assertTrue(cell.hasUserTimestamp()); + assertEquals(TIMESTAMP3, cell.getTimestamp()); + assertFalse(cells.hasNext()); + } + + @Override + public void testBuildModel() throws Exception { + checkModel(buildTestModel()); + } + + @Override + public void testFromXML() throws Exception { + checkModel(fromXML(AS_XML)); + } + + @Override + public void testFromPB() throws Exception { + checkModel(fromPB(AS_PB)); + } + +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java new file mode 100755 index 00000000..75c089d1 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestColumnSchemaModel.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@Category({RestTests.class, SmallTests.class}) +public class TestColumnSchemaModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestColumnSchemaModel.class); + + protected static final String COLUMN_NAME = "testcolumn"; + protected static final boolean BLOCKCACHE = true; + protected static final int BLOCKSIZE = 16384; + protected static final String BLOOMFILTER = "NONE"; + protected static final String COMPRESSION = "GZ"; + protected static final boolean IN_MEMORY = false; + protected static final int TTL = 86400; + protected static final int VERSIONS = 1; + + public TestColumnSchemaModel() throws Exception { + super(ColumnSchemaModel.class); + AS_XML = + ""; + + AS_JSON = + "{\"name\":\"testcolumn\",\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\"," + + "\"BLOCKCACHE\":\"true\",\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\"," + + "\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}"; + } + + @Override + protected ColumnSchemaModel buildTestModel() { + ColumnSchemaModel model = new ColumnSchemaModel(); + model.setName(COLUMN_NAME); + model.__setBlocksize(BLOCKSIZE); + model.__setBloomfilter(BLOOMFILTER); + model.__setBlockcache(BLOCKCACHE); + model.__setCompression(COMPRESSION); + model.__setVersions(VERSIONS); + model.__setTTL(TTL); + model.__setInMemory(IN_MEMORY); + return model; + } + + @Override + protected void checkModel(ColumnSchemaModel model) { + assertEquals("name", COLUMN_NAME, model.getName()); + assertEquals("block cache", BLOCKCACHE, model.__getBlockcache()); + assertEquals("block size", BLOCKSIZE, model.__getBlocksize()); + assertEquals("bloomfilter", BLOOMFILTER, model.__getBloomfilter()); + assertTrue("compression", model.__getCompression().equalsIgnoreCase(COMPRESSION)); + assertEquals("in memory", IN_MEMORY, model.__getInMemory()); + assertEquals("ttl", TTL, model.__getTTL()); + assertEquals("versions", VERSIONS, model.__getVersions()); + } + + @Override + @Test + public void testFromPB() throws Exception { + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java new file mode 100755 index 00000000..e00a545e --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestModelBase.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.fasterxml.jackson.jaxrs.json.JacksonJaxbJsonProvider; +import java.io.IOException; +import java.io.StringReader; +import java.io.StringWriter; +import java.util.Base64; +import javax.ws.rs.core.MediaType; +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; +import org.apache.hadoop.hbase.rest.provider.JAXBContextResolver; +import org.junit.Test; + +public abstract class TestModelBase { + + protected String AS_XML; + + protected String AS_PB; + + protected String AS_JSON; + + protected JAXBContext context; + + protected Class clazz; + + protected ObjectMapper mapper; + + protected TestModelBase(Class clazz) throws Exception { + super(); + this.clazz = clazz; + context = new JAXBContextResolver().getContext(clazz); + mapper = new JacksonJaxbJsonProvider().locateMapper(clazz, + MediaType.APPLICATION_JSON_TYPE); + } + + protected abstract T buildTestModel(); + + @SuppressWarnings("unused") + protected String toXML(T model) throws JAXBException { + StringWriter writer = new StringWriter(); + context.createMarshaller().marshal(model, writer); + return writer.toString(); + } + + protected String toJSON(T model) throws JAXBException, IOException { + StringWriter writer = new StringWriter(); + mapper.writeValue(writer, model); +// original marshaller, uncomment this and comment mapper to verify backward compatibility +// ((JSONJAXBContext)context).createJSONMarshaller().marshallToJSON(model, writer); + return writer.toString(); + } + + public T fromJSON(String json) throws JAXBException, IOException { + return (T) + mapper.readValue(json, clazz); + } + + public T fromXML(String xml) throws JAXBException { + return (T) + context.createUnmarshaller().unmarshal(new StringReader(xml)); + } + + @SuppressWarnings("unused") + protected byte[] toPB(ProtobufMessageHandler model) { + return model.createProtobufOutput(); + } + + protected T fromPB(String pb) throws + Exception { + return (T)clazz.getMethod("getObjectFromMessage", byte[].class).invoke( + clazz.getDeclaredConstructor().newInstance(), + Base64.getDecoder().decode(AS_PB)); + } + + protected abstract void checkModel(T model); + + @Test + public void testBuildModel() throws Exception { + checkModel(buildTestModel()); + } + + @Test + public void testFromPB() throws Exception { + checkModel(fromPB(AS_PB)); + } + + @Test + public void testFromXML() throws Exception { + checkModel(fromXML(AS_XML)); + } + + @Test + public void testToXML() throws Exception { + // Uses fromXML to check model because XML element ordering can be random. + checkModel(fromXML(toXML(buildTestModel()))); + } + + @Test + public void testToJSON() throws Exception { + try { + ObjectNode expObj = mapper.readValue(AS_JSON, ObjectNode.class); + ObjectNode actObj = mapper.readValue(toJSON(buildTestModel()), ObjectNode.class); + assertEquals(expObj, actObj); + } catch(Exception e) { + assertEquals(AS_JSON, toJSON(buildTestModel())); + } + } + + @Test + public void testFromJSON() throws Exception { + checkModel(fromJSON(AS_JSON)); + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java new file mode 100755 index 00000000..784cd226 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesInstanceModel.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotSame; + +@Category({RestTests.class, SmallTests.class}) +public class TestNamespacesInstanceModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestNamespacesInstanceModel.class); + + public static final Map NAMESPACE_PROPERTIES = new HashMap<>(); + public static final String NAMESPACE_NAME = "namespaceName"; + + public TestNamespacesInstanceModel() throws Exception { + super(NamespacesInstanceModel.class); + + NAMESPACE_PROPERTIES.put("KEY_1","VALUE_1"); + NAMESPACE_PROPERTIES.put("KEY_2","VALUE_2"); + NAMESPACE_PROPERTIES.put("NAME","testNamespace"); + + AS_XML = + "" + + "NAMEtestNamespace" + + "KEY_2VALUE_2" + + "KEY_1VALUE_1" + + ""; + + AS_PB = "ChUKBE5BTUUSDXRlc3ROYW1lc3BhY2UKEAoFS0VZXzESB1ZBTFVFXzEKEAoFS0VZXzISB1ZBTFVFXzI="; + + AS_JSON = "{\"properties\":{\"NAME\":\"testNamespace\"," + + "\"KEY_1\":\"VALUE_1\",\"KEY_2\":\"VALUE_2\"}}"; + } + + @Override + protected NamespacesInstanceModel buildTestModel() { + return buildTestModel(NAMESPACE_NAME, NAMESPACE_PROPERTIES); + } + + public NamespacesInstanceModel buildTestModel(String namespace, Map properties) { + NamespacesInstanceModel model = new NamespacesInstanceModel(); + for(String key: properties.keySet()){ + model.addProperty(key, properties.get(key)); + } + return model; + } + + @Override + protected void checkModel(NamespacesInstanceModel model) { + checkModel(model, NAMESPACE_NAME, NAMESPACE_PROPERTIES); + } + + public void checkModel(NamespacesInstanceModel model, String namespace, + Map properties) { + Map modProperties = model.getProperties(); + assertEquals(properties.size(), modProperties.size()); + // Namespace name comes from REST URI, not properties. + assertNotSame(namespace, model.getNamespaceName()); + for(String property: properties.keySet()){ + assertEquals(properties.get(property), modProperties.get(property)); + } + } + + @Override + @Test + public void testBuildModel() throws Exception { + checkModel(buildTestModel()); + } + + @Override + @Test + public void testFromXML() throws Exception { + checkModel(fromXML(AS_XML)); + } + + @Override + @Test + public void testFromPB() throws Exception { + checkModel(fromPB(AS_PB)); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java new file mode 100755 index 00000000..5da776ab --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestNamespacesModel.java @@ -0,0 +1,96 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestNamespacesModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestNamespacesModel.class); + + public static final String NAMESPACE_NAME_1 = "testNamespace1"; + public static final String NAMESPACE_NAME_2 = "testNamespace2"; + + public TestNamespacesModel() throws Exception { + super(NamespacesModel.class); + + AS_XML = + "" + + "testNamespace1" + + "testNamespace2"; + + AS_PB = "Cg50ZXN0TmFtZXNwYWNlMQoOdGVzdE5hbWVzcGFjZTI="; + + AS_JSON = "{\"Namespace\":[\"testNamespace1\",\"testNamespace2\"]}"; + } + + @Override + protected NamespacesModel buildTestModel() { + return buildTestModel(NAMESPACE_NAME_1, NAMESPACE_NAME_2); + } + + public NamespacesModel buildTestModel(String... namespaces) { + NamespacesModel model = new NamespacesModel(); + model.setNamespaces(Arrays.asList(namespaces)); + return model; + } + + @Override + protected void checkModel(NamespacesModel model) { + checkModel(model, NAMESPACE_NAME_1, NAMESPACE_NAME_2); + } + + public void checkModel(NamespacesModel model, String... namespaceName) { + List namespaces = model.getNamespaces(); + assertEquals(namespaceName.length, namespaces.size()); + for(int i = 0; i < namespaceName.length; i++){ + assertTrue(namespaces.contains(namespaceName[i])); + } + } + + @Override + @Test + public void testBuildModel() throws Exception { + checkModel(buildTestModel()); + } + + @Override + @Test + public void testFromXML() throws Exception { + checkModel(fromXML(AS_XML)); + } + + @Override + @Test + public void testFromPB() throws Exception { + checkModel(fromPB(AS_PB)); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java new file mode 100755 index 00000000..99f8e3df --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestRowModel.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Iterator; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestRowModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRowModel.class); + + private static final byte[] ROW1 = Bytes.toBytes("testrow1"); + private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1"); + private static final byte[] VALUE1 = Bytes.toBytes("testvalue1"); + private static final long TIMESTAMP1 = 1245219839331L; + + public TestRowModel() throws Exception { + super(RowModel.class); + AS_XML = + "" + + "dGVzdHZhbHVlMQ=="; + + AS_JSON = + "{\"key\":\"dGVzdHJvdzE=\",\"Cell\":[{\"column\":\"dGVzdGNvbHVtbjE=\"," + + "\"timestamp\":1245219839331,\"$\":\"dGVzdHZhbHVlMQ==\"}]}"; + } + + @Override + protected RowModel buildTestModel() { + RowModel model = new RowModel(); + model.setKey(ROW1); + model.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1)); + return model; + } + + @Override + protected void checkModel(RowModel model) { + assertTrue(Bytes.equals(ROW1, model.getKey())); + Iterator cells = model.getCells().iterator(); + CellModel cell = cells.next(); + assertTrue(Bytes.equals(COLUMN1, cell.getColumn())); + assertTrue(Bytes.equals(VALUE1, cell.getValue())); + assertTrue(cell.hasUserTimestamp()); + assertEquals(TIMESTAMP1, cell.getTimestamp()); + assertFalse(cells.hasNext()); + } + + @Override + public void testFromPB() throws Exception { + //do nothing row model has no PB + } + + @Test + public void testEquals() throws Exception { + RowModel rowModel1 = buildTestModel(); + RowModel rowModel2 = buildTestModel(); + + assertEquals(rowModel1, rowModel2); + + RowModel rowModel3 = new RowModel(); + assertFalse(rowModel1.equals(rowModel3)); + } + + @Test + public void testToString() throws Exception { + String expectedRowKey = ToStringBuilder.reflectionToString(ROW1, ToStringStyle.SIMPLE_STYLE); + + RowModel rowModel = buildTestModel(); + System.out.println(rowModel); + + assertTrue(StringUtils.contains(rowModel.toString(), expectedRowKey)); + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java new file mode 100755 index 00000000..4835b7b0 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestScannerModel.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonMappingException; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.rest.ScannerResultGenerator; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestScannerModel extends TestModelBase { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestScannerModel.class); + + private static final String PRIVATE = "private"; + private static final String PUBLIC = "public"; + private static final byte[] START_ROW = Bytes.toBytes("abracadabra"); + private static final byte[] END_ROW = Bytes.toBytes("zzyzx"); + private static final byte[] COLUMN1 = Bytes.toBytes("column1"); + private static final byte[] COLUMN2 = Bytes.toBytes("column2:foo"); + private static final long START_TIME = 1245219839331L; + private static final long END_TIME = 1245393318192L; + private static final int CACHING = 1000; + private static final int LIMIT = 10000; + private static final int BATCH = 100; + private static final boolean CACHE_BLOCKS = false; + + public TestScannerModel() throws Exception { + super(ScannerModel.class); + + AS_XML = "" + + "" + + "Y29sdW1uMQ== Y29sdW1uMjpmb28=" + + "private public"; + + AS_JSON = "{\"batch\":100,\"caching\":1000,\"cacheBlocks\":false,\"endRow\":\"enp5eng=\"," + + "\"endTime\":1245393318192,\"maxVersions\":2147483647,\"startRow\":\"YWJyYWNhZGFicmE=\"," + + "\"startTime\":1245219839331,\"column\":[\"Y29sdW1uMQ==\",\"Y29sdW1uMjpmb28=\"]," + +"\"labels\":[\"private\",\"public\"]," + +"\"limit\":10000}"; + + AS_PB = "CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mfJDj" + +"/////B0joB1IHcHJpdmF0ZVIGcHVibGljWABgkE4="; + } + + @Override + protected ScannerModel buildTestModel() { + ScannerModel model = new ScannerModel(); + model.setStartRow(START_ROW); + model.setEndRow(END_ROW); + model.addColumn(COLUMN1); + model.addColumn(COLUMN2); + model.setStartTime(START_TIME); + model.setEndTime(END_TIME); + model.setBatch(BATCH); + model.setCaching(CACHING); + model.addLabel(PRIVATE); + model.addLabel(PUBLIC); + model.setCacheBlocks(CACHE_BLOCKS); + model.setLimit(LIMIT); + return model; + } + + @Override + protected void checkModel(ScannerModel model) { + assertTrue(Bytes.equals(model.getStartRow(), START_ROW)); + assertTrue(Bytes.equals(model.getEndRow(), END_ROW)); + boolean foundCol1 = false, foundCol2 = false; + for (byte[] column : model.getColumns()) { + if (Bytes.equals(column, COLUMN1)) { + foundCol1 = true; + } else if (Bytes.equals(column, COLUMN2)) { + foundCol2 = true; + } + } + assertTrue(foundCol1); + assertTrue(foundCol2); + assertEquals(START_TIME, model.getStartTime()); + assertEquals(END_TIME, model.getEndTime()); + assertEquals(BATCH, model.getBatch()); + assertEquals(LIMIT, model.getLimit()); + assertEquals(CACHING, model.getCaching()); + assertEquals(CACHE_BLOCKS, model.getCacheBlocks()); + boolean foundLabel1 = false; + boolean foundLabel2 = false; + if (model.getLabels() != null && model.getLabels().size() > 0) { + for (String label : model.getLabels()) { + if (label.equals(PRIVATE)) { + foundLabel1 = true; + } else if (label.equals(PUBLIC)) { + foundLabel2 = true; + } + } + assertTrue(foundLabel1); + assertTrue(foundLabel2); + } + } + + @Test + public void testExistingFilter() throws Exception { + final String CORRECT_FILTER = "{\"type\": \"PrefixFilter\", \"value\": \"cg==\"}"; + verifyException(CORRECT_FILTER); + } + + @Test(expected = IllegalArgumentException.class) + public void testNonExistingFilter() throws Exception { + final String UNKNOWN_FILTER = "{\"type\": \"UnknownFilter\", \"value\": \"cg==\"}"; + verifyException(UNKNOWN_FILTER); + } + + @Test(expected = JsonMappingException.class) + public void testIncorrectFilterThrowsJME() throws Exception { + final String JME_FILTER = "{\"invalid_tag\": \"PrefixFilter\", \"value\": \"cg==\"}"; + verifyException(JME_FILTER); + } + + @Test(expected = JsonParseException.class) + public void tesIncorrecttFilterThrowsJPE() throws Exception { + final String JPE_FILTER = "{\"type\": \"PrefixFilter\",, \"value\": \"cg==\"}"; + verifyException(JPE_FILTER); + } + + private void verifyException(final String FILTER) throws Exception { + ScannerModel model = new ScannerModel(); + model.setFilter(FILTER); + ScannerResultGenerator.buildFilterFromModel(model); + } +} diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java new file mode 100755 index 00000000..75b64a69 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +import java.util.Iterator; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@Category({ RestTests.class, SmallTests.class}) +public class TestStorageClusterStatusModel extends TestModelBase { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestStorageClusterStatusModel.class); + + public TestStorageClusterStatusModel() throws Exception { + super(StorageClusterStatusModel.class); + + AS_XML = + "" + + "" + + "" + + "" + + "" + + "" + + ""; + + AS_PB = + "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB" + + "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0" + + "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; + + + //Using jackson will break json backward compatibilty for this representation + //but the original one was broken as it would only print one Node element + //so the format itself was broken + AS_JSON = + "{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," + + "\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," + + "\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + + "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," + + "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," + + "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245219839331," + + "\"heapSizeMB\":128,\"maxHeapSizeMB\":1024},{\"name\":\"test2\"," + + "\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\",\"stores\":1," + + "\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," + + "\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," + + "\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," + + "\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245239331198," + + "\"heapSizeMB\":512,\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}"; + } + + @Override + protected StorageClusterStatusModel buildTestModel() { + StorageClusterStatusModel model = new StorageClusterStatusModel(); + model.setRegions(2); + model.setRequests(0); + model.setAverageLoad(1.0); + model.addLiveNode("test1", 1245219839331L, 128, 1024) + .addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1); + model.addLiveNode("test2", 1245239331198L, 512, 1024) + .addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 1, 0, 0, 0, + 1, 2, 1, 1, 1, 1, 1); + return model; + } + + @Override + protected void checkModel(StorageClusterStatusModel model) { + assertEquals(2, model.getRegions()); + assertEquals(0, model.getRequests()); + assertEquals(1.0, model.getAverageLoad(), 0.0); + Iterator nodes = + model.getLiveNodes().iterator(); + StorageClusterStatusModel.Node node = nodes.next(); + assertEquals("test1", node.getName()); + assertEquals(1245219839331L, node.getStartCode()); + assertEquals(128, node.getHeapSizeMB()); + assertEquals(1024, node.getMaxHeapSizeMB()); + Iterator regions = + node.getRegions().iterator(); + StorageClusterStatusModel.Node.Region region = regions.next(); + assertTrue(Bytes.toString(region.getName()).equals( + "hbase:root,,0")); + assertEquals(1, region.getStores()); + assertEquals(1, region.getStorefiles()); + assertEquals(0, region.getStorefileSizeMB()); + assertEquals(0, region.getMemStoreSizeMB()); + assertEquals(0, region.getStorefileIndexSizeKB()); + assertEquals(1, region.getReadRequestsCount()); + assertEquals(2, region.getWriteRequestsCount()); + assertEquals(1, region.getRootIndexSizeKB()); + assertEquals(1, region.getTotalStaticIndexSizeKB()); + assertEquals(1, region.getTotalStaticBloomSizeKB()); + assertEquals(1, region.getTotalCompactingKVs()); + assertEquals(1, region.getCurrentCompactedKVs()); + assertFalse(regions.hasNext()); + node = nodes.next(); + assertEquals("test2", node.getName()); + assertEquals(1245239331198L, node.getStartCode()); + assertEquals(512, node.getHeapSizeMB()); + assertEquals(1024, node.getMaxHeapSizeMB()); + regions = node.getRegions().iterator(); + region = regions.next(); + assertEquals(Bytes.toString(region.getName()), + TableName.META_TABLE_NAME+",,1246000043724"); + assertEquals(1, region.getStores()); + assertEquals(1, region.getStorefiles()); + assertEquals(0, region.getStorefileSizeMB()); + assertEquals(0, region.getMemStoreSizeMB()); + assertEquals(0, region.getStorefileIndexSizeKB()); + assertEquals(1, region.getReadRequestsCount()); + assertEquals(2, region.getWriteRequestsCount()); + assertEquals(1, region.getRootIndexSizeKB()); + assertEquals(1, region.getTotalStaticIndexSizeKB()); + assertEquals(1, region.getTotalStaticBloomSizeKB()); + assertEquals(1, region.getTotalCompactingKVs()); + assertEquals(1, region.getCurrentCompactedKVs()); + + assertFalse(regions.hasNext()); + assertFalse(nodes.hasNext()); + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java new file mode 100755 index 00000000..b6101462 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestStorageClusterVersionModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestStorageClusterVersionModel.class); + + private static final String VERSION = "0.0.1-testing"; + + public TestStorageClusterVersionModel() throws Exception { + super(StorageClusterVersionModel.class); + AS_XML = + ""+ + ""; + + AS_JSON = "{\"Version\": \"0.0.1-testing\"}"; + } + + @Override + protected StorageClusterVersionModel buildTestModel() { + StorageClusterVersionModel model = new StorageClusterVersionModel(); + model.setVersion(VERSION); + return model; + } + + @Override + protected void checkModel(StorageClusterVersionModel model) { + assertEquals(VERSION, model.getVersion()); + } + + @Override + public void testFromPB() throws Exception { + //ignore test no pb + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java new file mode 100755 index 00000000..2ada01c5 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableInfoModel.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Iterator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestTableInfoModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTableInfoModel.class); + + private static final String TABLE = "testtable"; + private static final byte[] START_KEY = Bytes.toBytes("abracadbra"); + private static final byte[] END_KEY = Bytes.toBytes("zzyzx"); + private static final long ID = 8731042424L; + private static final String LOCATION = "testhost:9876"; + + public TestTableInfoModel() throws Exception { + super(TableInfoModel.class); + AS_XML = + ""; + + AS_PB = + "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" + + "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY="; + + AS_JSON = + "{\"name\":\"testtable\",\"Region\":[{\"endKey\":\"enp5eng=\",\"id\":8731042424," + + "\"location\":\"testhost:9876\",\"" + + "name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}]}"; + } + + @Override + protected TableInfoModel buildTestModel() { + TableInfoModel model = new TableInfoModel(); + model.setName(TABLE); + model.add(new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION)); + return model; + } + + @Override + protected void checkModel(TableInfoModel model) { + assertEquals(TABLE, model.getName()); + Iterator regions = model.getRegions().iterator(); + TableRegionModel region = regions.next(); + assertTrue(Bytes.equals(region.getStartKey(), START_KEY)); + assertTrue(Bytes.equals(region.getEndKey(), END_KEY)); + assertEquals(ID, region.getId()); + assertEquals(LOCATION, region.getLocation()); + assertFalse(regions.hasNext()); + } + + @Override + public void testBuildModel() throws Exception { + checkModel(buildTestModel()); + } + + @Override + public void testFromXML() throws Exception { + checkModel(fromXML(AS_XML)); + } + + @Override + public void testFromPB() throws Exception { + checkModel(fromPB(AS_PB)); + } + +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java new file mode 100755 index 00000000..eca14978 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableListModel.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.util.Iterator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestTableListModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTableListModel.class); + + private static final String TABLE1 = "table1"; + private static final String TABLE2 = "table2"; + private static final String TABLE3 = "table3"; + + public TestTableListModel() throws Exception { + super(TableListModel.class); + AS_XML = + "
"; + + AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz"; + + AS_JSON = + "{\"table\":[{\"name\":\"table1\"},{\"name\":\"table2\"},{\"name\":\"table3\"}]}"; + } + + @Override + protected TableListModel buildTestModel() { + TableListModel model = new TableListModel(); + model.add(new TableModel(TABLE1)); + model.add(new TableModel(TABLE2)); + model.add(new TableModel(TABLE3)); + return model; + } + + @Override + protected void checkModel(TableListModel model) { + Iterator tables = model.getTables().iterator(); + TableModel table = tables.next(); + assertEquals(TABLE1, table.getName()); + table = tables.next(); + assertEquals(TABLE2, table.getName()); + table = tables.next(); + assertEquals(TABLE3, table.getName()); + assertFalse(tables.hasNext()); + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java new file mode 100755 index 00000000..4285c9bf --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestTableRegionModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTableRegionModel.class); + + private static final String TABLE = "testtable"; + private static final byte[] START_KEY = Bytes.toBytes("abracadbra"); + private static final byte[] END_KEY = Bytes.toBytes("zzyzx"); + private static final long ID = 8731042424L; + private static final String LOCATION = "testhost:9876"; + + public TestTableRegionModel() throws Exception { + super(TableRegionModel.class); + + AS_XML = + ""; + + AS_JSON = + "{\"endKey\":\"enp5eng=\",\"id\":8731042424,\"location\":\"testhost:9876\"," + + "\"name\":\"testtable,abracadbra,8731042424.ad9860f031282c46ed431d7af8f94aca.\",\"" + + "startKey\":\"YWJyYWNhZGJyYQ==\"}"; + } + + @Override + protected TableRegionModel buildTestModel() { + TableRegionModel model = + new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); + return model; + } + + @Override + protected void checkModel(TableRegionModel model) { + assertTrue(Bytes.equals(model.getStartKey(), START_KEY)); + assertTrue(Bytes.equals(model.getEndKey(), END_KEY)); + assertEquals(ID, model.getId()); + assertEquals(LOCATION, model.getLocation()); + assertEquals(model.getName(), + TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + + ".ad9860f031282c46ed431d7af8f94aca."); + } + + @Test + public void testGetName() { + TableRegionModel model = buildTestModel(); + String modelName = model.getName(); + HRegionInfo hri = new HRegionInfo(TableName.valueOf(TABLE), + START_KEY, END_KEY, false, ID); + assertEquals(modelName, hri.getRegionNameAsString()); + } + + @Test + public void testSetName() { + TableRegionModel model = buildTestModel(); + String name = model.getName(); + model.setName(name); + assertEquals(name, model.getName()); + } + + @Override + public void testFromPB() throws Exception { + //no pb ignore + } +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java new file mode 100755 index 00000000..6b50ab70 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableSchemaModel.java @@ -0,0 +1,125 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Iterator; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category({RestTests.class, SmallTests.class}) +public class TestTableSchemaModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestTableSchemaModel.class); + + private static final Logger LOG = LoggerFactory.getLogger(TestTableSchemaModel.class); + + public static final String TABLE_NAME = "testTable"; + private static final boolean IS_META = false; + private static final boolean IS_ROOT = false; + private static final boolean READONLY = false; + + TestColumnSchemaModel testColumnSchemaModel; + + public TestTableSchemaModel() throws Exception { + super(TableSchemaModel.class); + testColumnSchemaModel = new TestColumnSchemaModel(); + + AS_XML = + "" + + "" + + "" + + ""; + + AS_PB = + "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" + + "TFkSBWZhbHNlGpcBCgp0ZXN0Y29sdW1uEhIKCUJMT0NLU0laRRIFMTYzODQSEwoLQkxPT01GSUxU" + + "RVISBE5PTkUSEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01QUkVTU0lPThICR1oSDQoIVkVSU0lP" + + "TlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZhbHNlGICjBSABKgJHWigA"; + + AS_JSON = + "{\"name\":\"testTable\",\"IS_META\":\"false\",\"IS_ROOT\":\"false\"," + + "\"READONLY\":\"false\",\"ColumnSchema\":[{\"name\":\"testcolumn\"," + + "\"BLOCKSIZE\":\"16384\",\"BLOOMFILTER\":\"NONE\",\"BLOCKCACHE\":\"true\"," + + "\"COMPRESSION\":\"GZ\",\"VERSIONS\":\"1\",\"TTL\":\"86400\",\"IN_MEMORY\":\"false\"}]}"; + } + + @Override + protected TableSchemaModel buildTestModel() { + return buildTestModel(TABLE_NAME); + } + + public TableSchemaModel buildTestModel(String name) { + TableSchemaModel model = new TableSchemaModel(); + model.setName(name); + model.__setIsMeta(IS_META); + model.__setIsRoot(IS_ROOT); + model.__setReadOnly(READONLY); + model.addColumnFamily(testColumnSchemaModel.buildTestModel()); + return model; + } + + @Override + protected void checkModel(TableSchemaModel model) { + checkModel(model, TABLE_NAME); + } + + public void checkModel(TableSchemaModel model, String tableName) { + assertEquals(model.getName(), tableName); + assertEquals(IS_META, model.__getIsMeta()); + assertEquals(IS_ROOT, model.__getIsRoot()); + assertEquals(READONLY, model.__getReadOnly()); + Iterator families = model.getColumns().iterator(); + assertTrue(families.hasNext()); + ColumnSchemaModel family = families.next(); + testColumnSchemaModel.checkModel(family); + assertFalse(families.hasNext()); + } + + @Override + @Test + public void testBuildModel() throws Exception { + checkModel(buildTestModel()); + } + + @Override + @Test + public void testFromXML() throws Exception { + checkModel(fromXML(AS_XML)); + } + + @Override + @Test + public void testFromPB() throws Exception { + checkModel(fromPB(AS_PB)); + } + +} + diff --git a/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java new file mode 100755 index 00000000..b3529505 --- /dev/null +++ b/rest/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestVersionModel.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rest.model; + +import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.RestTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.experimental.categories.Category; + +@Category({RestTests.class, SmallTests.class}) +public class TestVersionModel extends TestModelBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestVersionModel.class); + + private static final String REST_VERSION = "0.0.1"; + private static final String OS_VERSION = + "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"; + private static final String JVM_VERSION = + "Sun Microsystems Inc. 1.6.0_13-11.3-b02"; + private static final String JETTY_VERSION = "6.1.14"; + private static final String JERSEY_VERSION = "1.1.0-ea"; + + public TestVersionModel() throws Exception { + super(VersionModel.class); + AS_XML = + ""; + + AS_PB = + "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" + + "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE="; + + AS_JSON = + "{\"JVM\":\"Sun Microsystems Inc. 1.6.0_13-11.3-b02\",\"Jersey\":\"1.1.0-ea\"," + + "\"OS\":\"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64\",\"" + + "REST\":\"0.0.1\",\"Server\":\"6.1.14\"}"; + } + + @Override + protected VersionModel buildTestModel() { + VersionModel model = new VersionModel(); + model.setRESTVersion(REST_VERSION); + model.setOSVersion(OS_VERSION); + model.setJVMVersion(JVM_VERSION); + model.setServerVersion(JETTY_VERSION); + model.setJerseyVersion(JERSEY_VERSION); + return model; + } + + @Override + protected void checkModel(VersionModel model) { + assertEquals(REST_VERSION, model.getRESTVersion()); + assertEquals(OS_VERSION, model.getOSVersion()); + assertEquals(JVM_VERSION, model.getJVMVersion()); + assertEquals(JETTY_VERSION, model.getServerVersion()); + assertEquals(JERSEY_VERSION, model.getJerseyVersion()); + } +} + diff --git a/rest/hbase-rest/src/test/resources/hbase-site.xml b/rest/hbase-rest/src/test/resources/hbase-site.xml new file mode 100755 index 00000000..2bd3ee45 --- /dev/null +++ b/rest/hbase-rest/src/test/resources/hbase-site.xml @@ -0,0 +1,142 @@ + + + + + + hbase.regionserver.msginterval + 1000 + Interval between messages from the RegionServer to HMaster + in milliseconds. Default is 15. Set this value low if you want unit + tests to be responsive. + + + + hbase.defaults.for.version.skip + true + + + hbase.server.thread.wakefrequency + 1000 + Time to sleep in between searches for work (in milliseconds). + Used as sleep interval by service threads such as hbase:meta scanner and log roller. + + + + hbase.master.event.waiting.time + 50 + Time to sleep between checks to see if a table event took place. + + + + hbase.regionserver.handler.count + 5 + + + hbase.master.info.port + -1 + The port for the hbase master web UI + Set to -1 if you do not want the info server to run. + + + + hbase.master.port + 0 + Always have masters and regionservers come up on port '0' so we don't clash over + default ports. + + + + hbase.regionserver.port + 0 + Always have masters and regionservers come up on port '0' so we don't clash over + default ports. + + + + hbase.ipc.client.fallback-to-simple-auth-allowed + true + + + + hbase.regionserver.info.port + -1 + The port for the hbase regionserver web UI + Set to -1 if you do not want the info server to run. + + + + hbase.regionserver.info.port.auto + true + Info server auto port bind. Enables automatic port + search if hbase.regionserver.info.port is already in use. + Enabled for testing to run multiple tests on one machine. + + + + hbase.regionserver.safemode + false + + Turn on/off safe mode in region server. Always on for production, always off + for tests. + + + + hbase.hregion.max.filesize + 67108864 + + Maximum desired file size for an HRegion. If filesize exceeds + value + (value / 2), the HRegion is split in two. Default: 256M. + + Keep the maximum filesize small so we split more often in tests. + + + + hadoop.log.dir + ${user.dir}/../logs + + + hbase.zookeeper.property.clientPort + 21818 + Property from ZooKeeper's config zoo.cfg. + The port at which the clients will connect. + + + + hbase.defaults.for.version.skip + true + + Set to true to skip the 'hbase.defaults.for.version'. + Setting this to true can be useful in contexts other than + the other side of a maven generation; i.e. running in an + ide. You'll want to set this boolean to true to avoid + seeing the RuntimeException complaint: "hbase-default.xml file + seems to be for and old version of HBase (@@@VERSION@@@), this + version is X.X.X-SNAPSHOT" + + + + hbase.table.sanity.checks + false + Skip sanity checks in tests + + + diff --git a/rest/hbase-rest/src/test/resources/hdfs-site.xml b/rest/hbase-rest/src/test/resources/hdfs-site.xml new file mode 100755 index 00000000..9230105a --- /dev/null +++ b/rest/hbase-rest/src/test/resources/hdfs-site.xml @@ -0,0 +1,56 @@ + + + + + + + + dfs.namenode.fs-limits.min-block-size + 0 + + + dfs.datanode.handler.count + 5 + Default is 10 + + + dfs.namenode.handler.count + 5 + Default is 10 + + + dfs.namenode.service.handler.count + 5 + Default is 10 + + + diff --git a/rest/hbase-rest/src/test/resources/log4j.properties b/rest/hbase-rest/src/test/resources/log4j.properties new file mode 100755 index 00000000..4e5f014b --- /dev/null +++ b/rest/hbase-rest/src/test/resources/log4j.properties @@ -0,0 +1,69 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Define some default values that can be overridden by system properties +hbase.root.logger=INFO,console +hbase.log.dir=. +hbase.log.file=hbase.log + +# Define the root logger to the system property "hbase.root.logger". +log4j.rootLogger=${hbase.root.logger} + +# Logging Threshold +log4j.threshold=ALL + +# +# Daily Rolling File Appender +# +log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} + +# Rollver at midnight +log4j.appender.DRFA.DatePattern=.yyyy-MM-dd + +# 30-day backup +#log4j.appender.DRFA.MaxBackupIndex=30 +log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout +# Debugging Pattern format +log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n + + +# +# console +# Add "console" to rootlogger above if you want to use this +# +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n + +# Custom Logging levels + +#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG + +log4j.logger.org.apache.hadoop=WARN +log4j.logger.org.apache.zookeeper=ERROR +log4j.logger.org.apache.hadoop.hbase=DEBUG + +#These settings are workarounds against spurious logs from the minicluster. +#See HBASE-4709 +log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN +log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN +log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN +log4j.logger.org.apache.hadoop.metrics2.util.MBeans=WARN +# Enable this to get detailed connection error/retry logging. +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE +log4j.logger.org.apache.directory=WARN diff --git a/rest/hbase-rest/src/test/resources/mapred-queues.xml b/rest/hbase-rest/src/test/resources/mapred-queues.xml new file mode 100755 index 00000000..43f3e2ab --- /dev/null +++ b/rest/hbase-rest/src/test/resources/mapred-queues.xml @@ -0,0 +1,75 @@ + + + + + + + + + + default + + + + + + + running + + + * + + + * + + + + diff --git a/rest/hbase-rest/src/test/resources/mapred-site.xml b/rest/hbase-rest/src/test/resources/mapred-site.xml new file mode 100755 index 00000000..787ffb75 --- /dev/null +++ b/rest/hbase-rest/src/test/resources/mapred-site.xml @@ -0,0 +1,34 @@ + + + + + + mapred.map.child.java.opts + -Djava.awt.headless=true + + + + mapred.reduce.child.java.opts + -Djava.awt.headless=true + + + diff --git a/rest/pom.xml b/rest/pom.xml new file mode 100755 index 00000000..9ac8c854 --- /dev/null +++ b/rest/pom.xml @@ -0,0 +1,330 @@ + + + 4.0.0 + pom + + + hbase-rest + hbase-rest-protocol + + + + + + org.apache.hbase.connectors + hbase-connectors + ${revision} + ../ + + + rest + Apache HBase - Rest + HBase Rest + + + 2.10.1 + 4.5.3 + 4.4.13 + 2.2.12 + 3.1.0 + 1.2.0 + 1.2 + 9.3.28.v20191105 + 2.25.1 + 2.0.1 + + + + + + org.apache.hbase.connectors.rest + hbase-rest-protocol + ${revision} + + + com.fasterxml.jackson.jaxrs + jackson-jaxrs-json-provider + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-annotations + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-core + ${jackson.version} + + + com.fasterxml.jackson.core + jackson-databind + ${jackson.version} + + + org.codehaus.jettison + jettison + ${jettison.version} + + + + org.glassfish.web + javax.servlet.jsp + ${glassfish.jsp.version} + + + org.bouncycastle + bcprov-jdk15on + ${bouncycastle.version} + test + + + javax.ws.rs + javax.ws.rs-api + ${wx.rs.api.version} + + + org.apache.httpcomponents + httpclient + ${httpclient.version} + + + org.apache.httpcomponents + httpcore + ${httpcore.version} + + + javax.xml.bind + jaxb-api + ${jaxb-api.version} + + + javax.xml.stream + stax-api + + + + + javax.servlet + javax.servlet-api + ${servlet.api.version} + + + com.sun.activation + javax.activation + ${javax.activation.version} + + + javax.annotation + javax.annotation-api + ${javax.annotation.version} + + + org.eclipse.jetty + jetty-server + ${jetty.version} + + + org.eclipse.jetty + jetty-servlet + ${jetty.version} + + + org.eclipse.jetty + servlet-api + + + + + org.eclipse.jetty + jetty-http + ${jetty.version} + + + org.eclipse.jetty + jetty-util + ${jetty.version} + + + org.eclipse.jetty + jetty-jmx + ${jetty.version} + + + org.glassfish.jersey.containers + jersey-container-servlet-core + ${jersey.version} + + + org.apache.hadoop + hadoop-common + test-jar + tests + ${hadoop-three.version} + + + com.sun.jersey + jersey-core + + + com.google.code.findbugs + jsr305 + + + + + org.apache.hadoop + hadoop-annotations + ${hadoop-three.version} + + + org.apache.hadoop + hadoop-auth + ${hadoop-three.version} + + + com.google.guava + guava + + + net.minidev + json-smart + + + + + org.apache.hbase + hbase-testing-util + ${hbase.version} + test + + + org.apache.hadoop + hadoop-common + ${hadoop-three.version} + + + com.sun.jersey + jersey-core + + + com.sun.jersey + jersey-json + + + com.sun.jersey + jersey-servlet + + + com.sun.jersey + jersey-server + + + javax.servlet.jsp + jsp-api + + + javax.servlet + javax.servlet-api + + + stax + stax-api + + + io.netty + netty + + + com.google.code.findbugs + jsr305 + + + junit + junit + + + org.codehause.jackson + jackson-core-asl + + + org.codehause.jackson + jackson-mapper-asl + + + + + org.apache.hadoop + hadoop-mapreduce-client-core + ${hadoop-three.version} + + + com.sun.jersey + jersey-core + + + org.codehaus.jackson + jackson-jaxrs + + + org.codehaus.jackson + jackson-xc + + + io.netty + netty + + + javax.inject + javax.inject + + + org.codehaus.jackson + jackson-core-asl + + + org.codehaus.jackson + jackson-mapper-asl + + + com.google.guava + guava + + + + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + + \ No newline at end of file