diff --git a/.asf.yaml b/.asf.yaml deleted file mode 100644 index 734b2b573867..000000000000 --- a/.asf.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -github: - description: "Apache Geode" - homepage: https://geode.apache.org/ - labels: - - geode - - datagrid - - apache - features: - # To enable wiki for documentation - wiki: false - # To enable issue management - issues: false - # To enable projects for project management boards - projects: true - - enabled_merge_buttons: - # enable squash button: - squash: true - # disable merge button: - merge: false - # enable rebase button: - rebase: true - - protected_branches: - develop: - required_status_checks: - # strict means "Require branches to be up to date before merging". - strict: false - # contexts are the names of checks that must pass - contexts: - - concourse-ci/api-check-test-openjdk11 - - concourse-ci/build - - concourse-ci/unit-test-openjdk11 - - "Analyze (java)" - - "Analyze (javascript)" - - "Analyze (python)" - - "Analyze (go)" - - CodeQL - - required_pull_request_reviews: - dismiss_stale_reviews: false - require_code_owner_reviews: true - required_approving_review_count: 1 - - required_signatures: false - -notifications: - commits: commits@geode.apache.org - issues: issues@geode.apache.org - pullrequests: notifications@geode.apache.org - jira_options: link label diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d7fc81031212..aa0c8cb844a4 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,24 +1 @@ - - - - -### For all changes: -- [ ] Is there a JIRA ticket associated with this PR? Is it referenced in the commit message? - -- [ ] Has your PR been rebased against the latest commit within the target branch (typically `develop`)? - -- [ ] Is your initial contribution a single, squashed commit? - -- [ ] Does `gradlew build` run cleanly? - -- [ ] Have you written or updated unit tests to verify your changes? - -- [ ] If adding new dependencies to the code, are these dependencies licensed in a way that is compatible for inclusion under [ASF 2.0](http://www.apache.org/legal/resolved.html#category-a)? - - +description here (optional) \ No newline at end of file diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml deleted file mode 100644 index a9d97c00418b..000000000000 --- a/.github/workflows/codeql.yml +++ /dev/null @@ -1,97 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: [ develop ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ develop ] - schedule: - - cron: '22 22 * * 2' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: [ 'go', 'java', 'javascript', 'python' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support - - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - - name: Setup Java JDK - uses: actions/setup-java@v2.3.1 - with: - java-version: 8 - distribution: temurin - cache: gradle - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - - # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality - - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - if: matrix.language != 'java' - # ℹ️ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - - # If the Autobuild fails above, remove it and uncomment the following three lines. - # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. - # - run: | - # echo "Run, Build Application using script" - # ./location_of_script_within_repo/buildscript.sh - - run: ./gradlew dev installDist --no-daemon --no-build-cache - if: matrix.language == 'java' - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 diff --git a/BUILDING.md b/BUILDING.md index b25ed3db394f..d924dc329ddd 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -10,10 +10,7 @@ Set the JAVA\_HOME environment variable. For example: | OSX | ``export JAVA_HOME=`/usr/libexec/java_home -v 1.8` `` | | Windows | ``set JAVA_HOME="C:\Program Files\Java\jdk1.8.0_121"`` | -Download the project source from the Releases page at -[Apache Geode](http://geode.apache.org/releases/), and unpack the source code. - -Within the directory containing the unpacked source code, run the gradle build: +Run the gradle build: ```console $ ./gradlew build @@ -81,17 +78,17 @@ The following steps have been tested with **IntelliJ IDEA 2020.3.3** 1. Enter **To:** *GeodeStyle*, check **Current scheme**, and press **OK**. 1. Select *GeodeStyle* in **Scheme** drop-down box. -1. Make Apache the default Copyright. +1. Make VMware the default Copyright. 1. Select **IntelliJ IDEA -> Preferences...** from the menu. 1. Open the **Editor -> Copyright** section. - 1. If *Apache* does not appear in the **Default project copyright** drop-down box: + 1. If *VMware* does not appear in the **Default project copyright** drop-down box: 1. Open the **Copyright Profiles** subsection. 1. Select the "import" icon (the small arrow pointing down and to the left) from the Copyright Profiles section's toolbar. - 1. Select `etc/intellij-apache-copyright-notice.xml` from the Geode repository root. + 1. Select `etc/intellij-vmware-copyright-notice.xml` from the repository root. 1. Return to the **Copyright** section. - 1. Select *Apache* in the **Default project copyright** drop-down box. + 1. Select *VMware* in the **Default project copyright** drop-down box. 1. Open the **Formatting** subsection. 1. Uncheck **Add blank line after** and select **OK**. diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index 092d63324731..000000000000 --- a/CODEOWNERS +++ /dev/null @@ -1,317 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#----------------------------------------------------------------- -# CODEOWNERS and .asf.yml - ownerless so everyone "owns" it -#----------------------------------------------------------------- -#CODEOWNERS -#.asf.yaml - -#----------------------------------------------------------------- -# Serialization -#----------------------------------------------------------------- -geode-serialization/** @echobravopapa @Bill @kirklund @kamilla1201 @jchen21 @pivotal-jbarrett -geode-serialization/**/internal/serialization/filter/* @kirklund @jchen21 -geode-core/**/org/apache/geode/pdx/** @upthewaterspout @dschneider-pivotal @agingade @pivotal-jbarrett -geode-core/**/org/apache/geode/codeAnalysis/** @upthewaterspout @dschneider-pivotal @agingade @kirklund -geode-core/**/org/apache/geode/internal/* @echobravopapa @Bill @kirklund @kamilla1201 @pivotal-jbarrett -geode-core/**/META-INF/**/*SanctionedSerializablesService @kirklund @jchen21 - -#----------------------------------------------------------------- -# Membership -#----------------------------------------------------------------- -geode-membership/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-old-client-support/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-tcp-server/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/distributed/internal/membership/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett - -#----------------------------------------------------------------- -# P2P Messaging -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/tcp/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/distributed/internal/direct/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/net/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/net/** @Bill @mivanac @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/distributed/* @echobravopapa @Bill @kirklund @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/distributed/internal/* @echobravopapa @Bill @kirklund @kamilla1201 @pivotal-jbarrett - -#----------------------------------------------------------------- -# Client/server messaging and cache operations -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/cache/client/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/server/** @echobravopapa @Bill @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/client/internal/** @Bill @echobravopapa @kamilla1201 @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/cache/tier/** @Bill @echobravopapa @agingade @kamilla1201 @pivotal-jbarrett -geode-assembly/**/apache/geode/client/sni/** @Bill @echobravopapa @kamilla1201 @pivotal-jbarrett - -#----------------------------------------------------------------- -# Client Queues -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/ha/** @agingade @boglesby @nabarunnag -geode-core/**/org/apache/geode/internal/cache/**/CacheClient* @agingade @boglesby @nabarunnag - -#----------------------------------------------------------------- -# Core Public API packages - Cache, Region, etc. -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/* @dschneider-pivotal @boglesby @nabarunnag @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/* @dschneider-pivotal @boglesby @nabarunnag @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/util/** @dschneider-pivotal @boglesby @nabarunnag @pivotal-jbarrett - -#----------------------------------------------------------------- -# Distributed Locks -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/distributed/internal/locks/** @dschneider-pivotal @pivotal-eshu @kirklund -geode-core/**/org/apache/geode/distributed/internal/deadlock/** @dschneider-pivotal @pivotal-eshu @kirklund - -#----------------------------------------------------------------- -# Core region implementations and plumbing -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/* @nabarunnag @DonalEvans @jchen21 - -#----------------------------------------------------------------- -# Region entry management -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/entries/** @dschneider-pivotal @upthewaterspout -geode-core/**/org/apache/geode/internal/cache/region/entry/** @dschneider-pivotal @upthewaterspout -geode-core/**/org/apache/geode/internal/cache/map/** @dschneider-pivotal @upthewaterspout -geode-core/**/org/apache/geode/compression/** @dschneider-pivotal @kirklund -geode-core/**/org/apache/geode/internal/cache/compression/** @dschneider-pivotal @kirklund - -#----------------------------------------------------------------- -# Partitioned Regions -#----------------------------------------------------------------- -geode-rebalancer/** @boglesby @BenjaminPerryRoss @nabarunnag -geode-core/**/org/apache/geode/internal/cache/partitioned/** @boglesby @BenjaminPerryRoss @nabarunnag -geode-core/**/org/apache/geode/cache/partition/** @boglesby @BenjaminPerryRoss @nabarunnag - -#----------------------------------------------------------------- -# Event tracking -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/event/** @agingade @nabarunnag @gesterzhou - -#----------------------------------------------------------------- -# Eviction -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/eviction/** @agingade @dschneider-pivotal -geode-core/**/org/apache/geode/internal/size/** @agingade @dschneider-pivotal @kirklund - -#----------------------------------------------------------------- -# Offheap -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/offheap/** @pivotal-eshu @dschneider-pivotal @kirklund - -#----------------------------------------------------------------- -# Transactions -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/tx/** @pivotal-eshu @gesterzhou -geode-core/**/org/apache/geode/internal/jta/** @pivotal-eshu @gesterzhou -geode-core/**/org/apache/geode/internal/cache/locks/** @pivotal-eshu @gesterzhou - -#----------------------------------------------------------------- -# Function Execution -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/execute/** @boglesby @nabarunnag @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/execute/** @boglesby @nabarunnag @pivotal-jbarrett - -#----------------------------------------------------------------- -# Querying -#----------------------------------------------------------------- -geode-cq/** @nabarunnag @DonalEvans @agingade -geode-core/**/org/apache/geode/cache/query/** @nabarunnag @DonalEvans @agingade - -#----------------------------------------------------------------- -# Session State: -#----------------------------------------------------------------- -extensions/** @jdeppe-pivotal @BenjaminPerryRoss @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/modules/util/** @jdeppe-pivotal @BenjaminPerryRoss @pivotal-jbarrett -geode-assembly/**/org/apache/geode/session/** @jdeppe-pivotal @BenjaminPerryRoss @pivotal-jbarrett - -#----------------------------------------------------------------- -# DEV rest API -#----------------------------------------------------------------- -geode-web-api/** @jdeppe-pivotal @jinmeiliao -geode-assembly/**/org/apache/geode/rest/** @jdeppe-pivotal @jinmeiliao - -#----------------------------------------------------------------- -# Lucene integration -#----------------------------------------------------------------- -geode-lucene/** @nabarunnag @DonalEvans - -#----------------------------------------------------------------- -# Memcached integration -#----------------------------------------------------------------- -geode-memcached/** @nabarunnag @DonalEvans - -#----------------------------------------------------------------- -# Misc Utilities -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/util/** @nabarunnag @boglesby @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/util/concurrent/** @nabarunnag @boglesby @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/shared/** @nabarunnag @boglesby @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/sequencelog/** @nabarunnag @boglesby @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/concurrent/** @nabarunnag @boglesby @pivotal-jbarrett -geode-core/**/org/apache/geode/distributed/internal/unsafe/** @nabarunnag @boglesby @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/lang/** @nabarunnag @boglesby @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/monitoring/** @nabarunnag @boglesby @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/exception/** @nabarunnag @boglesby @pivotal-jbarrett -geode-core/**/org/apache/geode/lang/** @nabarunnag @boglesby @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/ra/** @nabarunnag @boglesby @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/cache/vmotion/** @nabarunnag @boglesby @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/jndi/** @nabarunnag @boglesby @pivotal-jbarrett -geode-common/** @nabarunnag @boglesby @kirklund @pivotal-jbarrett -geode-unsafe/** @nabarunnag @boglesby @kirklund @pivotal-jbarrett - -#----------------------------------------------------------------- -# Persistence -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/cache/persistence/** @dschneider-pivotal @jchen21 @upthewaterspout @gesterzhou -geode-core/**/org/apache/geode/internal/cache/persistence/** @dschneider-pivotal @jchen21 @upthewaterspout @gesterzhou -geode-core/**/org/apache/geode/internal/cache/backup/** @dschneider-pivotal @agingade @jchen21 @upthewaterspout @gesterzhou -geode-assembly/**/org/apache/geode/cache/persistence/** @dschneider-pivotal @jchen21 @upthewaterspout @gesterzhou - -#----------------------------------------------------------------- -# Region Version Vectors - used for sychronization on -# member failures and persistent recovery -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/cache/versions/** @dschneider-pivotal @gesterzhou - -#----------------------------------------------------------------- -# WAN messaging and queues -#----------------------------------------------------------------- -geode-wan/** @gesterzhou @boglesby @nabarunnag @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/asyncqueue/** @gesterzhou @boglesby @nabarunnag @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/wan/** @gesterzhou @boglesby @nabarunnag @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/cache/wan/** @gesterzhou @boglesby @nabarunnag @pivotal-jbarrett -geode-assembly/**/apache/geode/cache/wan/** @gesterzhou @boglesby @nabarunnag @pivotal-jbarrett - -#----------------------------------------------------------------- -# Management -#----------------------------------------------------------------- -geode-management/** @jdeppe-pivotal @jinmeiliao -geode-assembly/**/org/apache/geode/management/** @jdeppe-pivotal @jinmeiliao -geode-assembly/**/org/apache/geode/tools/pulse/** @jdeppe-pivotal @jinmeiliao -geode-web-management/** @jdeppe-pivotal @jinmeiliao -geode-gfsh/** @jdeppe-pivotal @jinmeiliao -geode-assembly/**/bin/** @jdeppe-pivotal @jinmeiliao @kirklund -geode-pulse/** @jdeppe-pivotal @jinmeiliao -geode-http-service/** @jdeppe-pivotal @jinmeiliao -geode-web/** @jdeppe-pivotal @jinmeiliao -geode-core/**/org/apache/geode/admin/** @jdeppe-pivotal @jinmeiliao @kirklund -geode-core/**/org/apache/geode/alerting/** @jdeppe-pivotal @jinmeiliao @kirklund -geode-core/**/org/apache/geode/management/** @jdeppe-pivotal @jinmeiliao -geode-core/**/org/apache/geode/cache/configuration/** @jdeppe-pivotal @jinmeiliao -geode-core/**/org/apache/geode/internal/admin/** @jdeppe-pivotal @jinmeiliao @kirklund -geode-core/**/org/apache/geode/internal/cache/xmlcache/** @jdeppe-pivotal @jinmeiliao -geode-core/**/org/apache/geode/internal/cache/extension/** @jdeppe-pivotal @jinmeiliao -geode-core/**/org/apache/geode/internal/config/** @jdeppe-pivotal @jinmeiliao -geode-core/**/org/apache/geode/internal/process/** @jdeppe-pivotal @jinmeiliao @kirklund -geode-core/**/org/apache/geode/cache/internal/* @jdeppe-pivotal @jinmeiliao - -#----------------------------------------------------------------- -# Security -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/examples/** @jdeppe-pivotal @jinmeiliao @pivotal-jbarrett -geode-core/**/org/apache/geode/examples/security/** @jdeppe-pivotal @jinmeiliao @pivotal-jbarrett -geode-core/**/org/apache/geode/security/** @jdeppe-pivotal @jinmeiliao @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/security/** @jdeppe-pivotal @jinmeiliao @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/cache/operations/** @jdeppe-pivotal @jinmeiliao @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/cache/operations/** @jdeppe-pivotal @jinmeiliao @pivotal-jbarrett -geode-assembly/**/apache/geode/ssl/** @jdeppe-pivotal @jinmeiliao @pivotal-jbarrett - -#----------------------------------------------------------------- -# Logging -#----------------------------------------------------------------- -geode-log4j/** @kirklund @gesterzhou -geode-logging/** @kirklund @gesterzhou -geode-core/**/org/apache/geode/logging/** @kirklund @gesterzhou -geode-core/**/org/apache/geode/internal/logging/** @kirklund @gesterzhou -geode-core/**/org/apache/geode/i18n/** @agingade @kirklund -geode-core/**/org/apache/geode/internal/i18n/** @agingade @kirklund -geode-core/**/org/apache/geode/internal/io/** @agingade @kirklund -geode-assembly/**/org/apache/geode/logging/** @agingade @kirklund - -#----------------------------------------------------------------- -# Metrics & Statistics -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/statistics/** @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/internal/stats50/** @kirklund @pivotal-jbarrett -geode-core/**/org/apache/geode/metrics/** @kirklund @pivotal-jbarrett -geode-assembly/**/org/apache/geode/metrics/** @kirklund @pivotal-jbarrett - -#----------------------------------------------------------------- -# Region Snapshots -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/cache/snapshot/** @agingade @jchen21 -geode-core/**/org/apache/geode/internal/cache/snapshot/** @agingade @jchen21 - -#----------------------------------------------------------------- -# JDBC connector -#----------------------------------------------------------------- -geode-connectors/** @agingade @jchen21 -geode-core/**/org/apache/geode/datasource/** @agingade @BenjaminPerryRoss @jchen21 -geode-core/**/org/apache/geode/internal/datasource/** @agingade @BenjaminPerryRoss @jchen21 - -#----------------------------------------------------------------- -# Resource manager -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/cache/control/** @kirklund @DonalEvans -geode-core/**/org/apache/geode/internal/cache/control/** @kirklund @DonalEvans - -#----------------------------------------------------------------- -# Testing utilities -#----------------------------------------------------------------- -#geode-concurrency-test/** -geode-dunit/** @kirklund @demery-pivotal @dschneider-pivotal -geode-junit/** @kirklund @demery-pivotal @dschneider-pivotal -geode-jmh/** @pivotal-jbarrett -geode-junit/**/org/apache/geode/test/util/** @jdeppe-pivotal @kirklund -geode-assembly/**/org/apache/geode/test/junit/** @jdeppe-pivotal @jinmeiliao -geode-assembly/**/org/apache/geode/rules/** @jdeppe-pivotal @jinmeiliao -geode-assembly/**/org/apache/geode/launchers/** @dschneider-pivotal @boglesby @nabarunnag -geode-assembly/**/resources/** @boglesby @nabarunnag @jdeppe-pivotal @jinmeiliao - -#----------------------------------------------------------------- -# Build and tooling -#----------------------------------------------------------------- -#etc/** -*gradle* @rhoughton-pivot @upthewaterspout -build-tools/** @rhoughton-pivot @jdeppe-pivotal -DependencyConstraints.groovy @dickcav @rhoughton-pivot -ci/** @dickcav @rhoughton-pivot @smgoller -ci/scripts/** @dickcav @rhoughton-pivot @smgoller -ci/scripts/repeat-new-tests.sh @dickcav @rhoughton-pivot @smgoller @upthewaterspout @jdeppe-pivotal -dev-tools/dependencies/** @dickcav @onichols-pivotal -dev-tools/release/** @dickcav @onichols-pivotal -docker/** @dickcav @smgoller -geode-management/src/test/script/update-management-wiki.sh @dickcav @onichols-pivotal -#boms/** -static-analysis/** @rhoughton-pivot @upthewaterspout -geode-old-versions/** @dickcav @rhoughton-pivot -KEYS @dickcav @upthewaterspout @pivotal-amurmann -assembly_content.txt @dickcav @rhoughton-pivot -dependency_classpath.txt @dickcav @rhoughton-pivot -expected-pom.xml @dickcav @rhoughton-pivot - -#----------------------------------------------------------------- -# Documentation -#----------------------------------------------------------------- -apache-copyright-notice.txt @upthewaterspout @pivotal-amurmann -BUILDING.md @rhoughton-pivot @upthewaterspout -CODE_OF_CONDUCT.md @upthewaterspout @pivotal-amurmann @nonbinaryprogrammer -LICENSE @dickcav @onichols-pivotal -NOTICE @dickcav @onichols-pivotal -/README.md @upthewaterspout @pivotal-amurmann @kirklund -#TESTING.md diff --git a/CODEWATCHERS b/CODEWATCHERS deleted file mode 100644 index ae1c8fc6962e..000000000000 --- a/CODEWATCHERS +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# This file follows the same format as CODEOWNERS, but adds you as -# an optional reviewer rather than a required code owner reviewer. -# Draft PRs are ignored until ~15 minutes after Ready For Review. -# CODEWATCHERS is a bot; the bot owner will appear as the requestor. -# You must be a Geode committer to use CODEWATCHERS. - - -#----------------------------------------------------------------- -# CODEOWNERS and .asf.yml -#----------------------------------------------------------------- -CODEOWNERS @onichols-pivotal @rhoughton-pivot -CODEWATCHERS @onichols-pivotal @rhoughton-pivot -COMMITWATCHERS @onichols-pivotal -.asf.yaml @onichols-pivotal @rhoughton-pivot - - -#----------------------------------------------------------------- -# Documentation -#----------------------------------------------------------------- -geode-book/** @davebarnes97 -geode-docs/** @davebarnes97 -geode-book/config.yml @onichols-pivotal -geode-book/redirects.rb @onichols-pivotal -LICENSE @metatype -NOTICE @metatype - -#----------------------------------------------------------------- -# Jar Deployment -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/**/classloader/** @kohlmu-pivotal -geode-core/**/org/apache/geode/**/deployment/** @kohlmu-pivotal -geode-deployment/** @kohlmu-pivotal -geode-core/**/org/apache/geode/cache/internal/execute/* @kohlmu-pivotal - -#----------------------------------------------------------------- -# Client/server messaging and cache operations -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/cache/client/** @albertogpz -geode-core/**/org/apache/geode/cache/server/** @albertogpz -geode-core/**/org/apache/geode/internal/cache/tier/** @albertogpz - -#----------------------------------------------------------------- -# WAN messaging and queues -#----------------------------------------------------------------- -geode-wan/** @albertogpz -geode-core/**/org/apache/geode/cache/asyncqueue/** @albertogpz -geode-core/**/org/apache/geode/cache/wan/** @albertogpz -geode-core/**/org/apache/geode/internal/cache/wan/** @albertogpz - -#----------------------------------------------------------------- -# Metrics & Statistics -#----------------------------------------------------------------- -geode-core/**/org/apache/geode/internal/statistics/** @mkevo -geode-core/**/org/apache/geode/internal/stats50/** @mkevo -geode-core/**/org/apache/geode/metrics/** @mkevo - -#----------------------------------------------------------------- -# Querying -#----------------------------------------------------------------- -geode-cq/** @mkevo -geode-core/**/org/apache/geode/cache/query/** @mkevo - -#----------------------------------------------------------------- -# Build and tooling -#----------------------------------------------------------------- -dev-tools/progress/** @demery-pivotal diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 3a332486caab..000000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,7 +0,0 @@ -# Code of Conduct - -* [Code of Conduct for The Apache Software Foundation][1] -* [Code of Conduct for the Geode Project][2] - -[1]: https://www.apache.org/foundation/policies/conduct.html -[2]: https://cwiki.apache.org/confluence/display/GEODE/Code+of+Conduct diff --git a/COMMITWATCHERS b/COMMITWATCHERS deleted file mode 100644 index 1dc898c2ac5c..000000000000 --- a/COMMITWATCHERS +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Add your github username to one or both sections below to opt-in for -# automated feedback on your commit message. - -[Draft] -ezoerner - -[Non-Draft] -kirklund -ezoerner -kohlmu-pivotal - - -# By opting-in, the commit message of the first commit in your PR will -# be compared to the guidelines in -# https://cwiki.apache.org/confluence/display/GEODE/Commit+Message+Format -# and a comment will be added to your PR if improvements are recommended. -# -# Please note that these guidelines are totally optional. What matters -# most is not formatting, but including enough detail for future -# contributors to understand what you changed and why. -# -# Q. Why is this opt-in? Wouldn't this be good feedback for all PRs? -# A. The Geode community embraces diversity of opinions. Rather than -# rules, "we like to work on trust, not unnecessary restrictions". -# -# Q. Why is only my first commit message inspected? -# A. Because it is most likely to be used as the actual commit message -# or the start of the squash commit message when your PR is merged. -# -# Q. Isn't this redundant with the PR title and description in github? -# A. It can be; some contributors copy&paste their commit message -# there as well. Or you can use that space for notes to reviewers, -# such as how you tested or specific feedback you'd like. -# -# Q. How do I push changes to my commit message? -# A. Using interactive rebase (git rebase -i HEAD~n, where n is the -# number of commits in your PR so far), change "pick" to "r" to -# reword the first commit, then git push --force. -# -# Q. Will a force push mess up reviews I've already received? -# A. Yes. Opt-in your draft PRs too to get feedback (and have a chance -# to make fixes) prior to marking your PR as ready for review. -# -# Q. Can I just write or fix my commit message when I merge my PR? -# A. If you're a committer and if you remember to, sure, although this -# will deprive you of review feedback on your actual commit message. -# -# Q. Can I update my commit message after my PR is merged? -# A. No, Geode's branch protection disallows force push on develop. diff --git a/README.md b/README.md index 967acd2c8d24..f4ed990b67b8 100644 --- a/README.md +++ b/README.md @@ -1,246 +1,7 @@
-[![Apache Geode logo](https://geode.apache.org/img/Apache_Geode_logo.png)](http://geode.apache.org) - -[![Build Status](https://concourse.apachegeode-ci.info/api/v1/teams/main/pipelines/apache-develop-main/badge)](https://concourse.apachegeode-ci.info/teams/main/pipelines/apache-develop-main) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) [![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.geode/geode-core/badge.svg)](http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.apache.geode%22) [![homebrew](https://img.shields.io/homebrew/v/apache-geode.svg)](https://formulae.brew.sh/formula/apache-geode) [![Docker Pulls](https://img.shields.io/docker/pulls/apachegeode/geode.svg)](https://hub.docker.com/r/apachegeode/geode/) [![Total alerts](https://img.shields.io/lgtm/alerts/g/apache/geode.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/apache/geode/alerts/) [![Language grade: Java](https://img.shields.io/lgtm/grade/java/g/apache/geode.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/apache/geode/context:java) [![Language grade: JavaScript](https://img.shields.io/lgtm/grade/javascript/g/apache/geode.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/apache/geode/context:javascript) [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/apache/geode.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/apache/geode/context:python) +[![VMware Gemfire logo](https://cspmarketplaceprd.s3.us-west-2.amazonaws.com/943137d7-8955-4f0e-b172-7d4abeb251b7/media-files/logo-vmwt4gf-logo-1626460064701-1.png)](https://docs.vmware.com/en/VMware-Tanzu-GemFire/)
-## Contents -1. [Overview](#overview) -1. [How to Get Apache Geode](#obtaining) -1. [Main Concepts and Components](#concepts) -1. [Location of Directions for Building from Source](#building) -1. [Geode in 5 minutes](#started) -1. [Application Development](#development) -1. [Documentation](https://geode.apache.org/docs/) -1. [Wiki](https://cwiki.apache.org/confluence/display/GEODE/Index) -1. [How to Contribute](https://cwiki.apache.org/confluence/display/GEODE/How+to+Contribute) -1. [Export Control](#export) - -## Overview - -[Apache Geode](http://geode.apache.org/) is -a data management platform that provides real-time, consistent access to -data-intensive applications throughout widely distributed cloud architectures. - -Apache Geode pools memory, CPU, network resources, and optionally local disk -across multiple processes to manage application objects and behavior. It uses -dynamic replication and data partitioning techniques to implement high -availability, improved performance, scalability, and fault tolerance. In -addition to being a distributed data container, Apache Geode is an in-memory -data management system that provides reliable asynchronous event notifications -and guaranteed message delivery. - -Apache Geode is a mature, robust technology originally developed by GemStone -Systems. Commercially available as GemFire™, it was first deployed in the -financial sector as the transactional, low-latency data engine used in Wall -Street trading platforms. Today Apache Geode technology is used by hundreds of -enterprise customers for high-scale business applications that must meet low -latency and 24x7 availability requirements. - -## How to Get Apache Geode - -You can download Apache Geode from the -[website](https://geode.apache.org/releases/), run a Docker -[image](https://hub.docker.com/r/apachegeode/geode/), or install with -[Homebrew](https://formulae.brew.sh/formula/apache-geode) on OSX. Application developers -can load dependencies from [Maven -Central](https://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.apache.geode%22). - -Maven -```xml - - - org.apache.geode - geode-core - $VERSION - - -``` - -Gradle -```groovy -dependencies { - compile "org.apache.geode:geode-core:$VERSION" -} -``` - -## Main Concepts and Components - -_Caches_ are an abstraction that describe a node in an Apache Geode distributed -system. - -Within each cache, you define data _regions_. Data regions are analogous to -tables in a relational database and manage data in a distributed fashion as -name/value pairs. A _replicated_ region stores identical copies of the data on -each cache member of a distributed system. A _partitioned_ region spreads the -data among cache members. After the system is configured, client applications -can access the distributed data in regions without knowledge of the underlying -system architecture. You can define listeners to receive notifications when -data has changed, and you can define expiration criteria to delete obsolete -data in a region. - -_Locators_ provide clients with both discovery and server load balancing -services. Clients are configured with locator information, and the locators -maintain a dynamic list of member servers. The locators provide clients with -connection information to a server. - -Apache Geode includes the following features: - -* Combines redundancy, replication, and a "shared nothing" persistence - architecture to deliver fail-safe reliability and performance. -* Horizontally scalable to thousands of cache members, with multiple cache - topologies to meet different enterprise needs. The cache can be - distributed across multiple computers. -* Asynchronous and synchronous cache update propagation. -* Delta propagation distributes only the difference between old and new - versions of an object (delta) instead of the entire object, resulting in - significant distribution cost savings. -* Reliable asynchronous event notifications and guaranteed message delivery - through optimized, low latency distribution layer. -* Data awareness and real-time business intelligence. If data changes as - you retrieve it, you see the changes immediately. -* Integration with Spring Framework to speed and simplify the development - of scalable, transactional enterprise applications. -* JTA compliant transaction support. -* Cluster-wide configurations that can be persisted and exported to other - clusters. -* Remote cluster management through HTTP. -* REST APIs for REST-enabled application development. -* Rolling upgrades may be possible, but they will be subject to any - limitations imposed by new features. - -## Building this Release from Source - -See [BUILDING.md](./BUILDING.md) for -instructions on how to build the project. - -## Running Tests -See [TESTING.md](./TESTING.md) for -instructions on how to run tests. - -## Geode in 5 minutes - -Geode requires installation of JDK version 1.8. After installing Apache Geode, -start a locator and server: -```console -$ gfsh -gfsh> start locator -gfsh> start server -``` - -Create a region: -```console -gfsh> create region --name=hello --type=REPLICATE -``` - -Write a client application (this example uses a [Gradle](https://gradle.org) -build script): - -_build.gradle_ -```groovy -apply plugin: 'java' -apply plugin: 'application' - -mainClassName = 'HelloWorld' - -repositories { mavenCentral() } -dependencies { - compile 'org.apache.geode:geode-core:1.4.0' - runtime 'org.slf4j:slf4j-log4j12:1.7.24' -} -``` - -_src/main/java/HelloWorld.java_ -```java -import java.util.Map; -import org.apache.geode.cache.Region; -import org.apache.geode.cache.client.*; - -public class HelloWorld { - public static void main(String[] args) throws Exception { - ClientCache cache = new ClientCacheFactory() - .addPoolLocator("localhost", 10334) - .create(); - Region region = cache - .createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY) - .create("hello"); - - region.put("1", "Hello"); - region.put("2", "World"); - - for (Map.Entry entry : region.entrySet()) { - System.out.format("key = %s, value = %s\n", entry.getKey(), entry.getValue()); - } - cache.close(); - } -} -``` - -Build and run the `HelloWorld` example: -```console -$ gradle run -``` - -The application will connect to the running cluster, create a local cache, put -some data in the cache, and print the cached data to the console: -```console -key = 1, value = Hello -key = 2, value = World -``` - -Finally, shutdown the Geode server and locator: -```console -gfsh> shutdown --include-locators=true -``` - -For more information see the [Geode -Examples](https://github.com/apache/geode-examples) repository or the -[documentation](https://geode.apache.org/docs/). - -## Application Development - -Apache Geode applications can be written in these client technologies: - -* Java [client](https://geode.apache.org/docs/guide/18/topologies_and_comm/cs_configuration/chapter_overview.html) - or [peer](https://geode.apache.org/docs/guide/18/topologies_and_comm/p2p_configuration/chapter_overview.html) -* [REST](https://geode.apache.org/docs/guide/18/rest_apps/chapter_overview.html) -* [Memcached](https://cwiki.apache.org/confluence/display/GEODE/Moving+from+memcached+to+gemcached) - -The following libraries are available external to the Apache Geode project: - -* [Spring Data GemFire](https://projects.spring.io/spring-data-gemfire/) -* [Spring Cache](https://docs.spring.io/spring/docs/current/spring-framework-reference/html/cache.html) -* [Python](https://github.com/gemfire/py-gemfire-rest) - -## Export Control - -This distribution includes cryptographic software. -The country in which you currently reside may have restrictions -on the import, possession, use, and/or re-export to another country, -of encryption software. BEFORE using any encryption software, -please check your country's laws, regulations and policies -concerning the import, possession, or use, and re-export of -encryption software, to see if this is permitted. -See for more information. - -The U.S. Government Department of Commerce, Bureau of Industry and Security (BIS), -has classified this software as Export Commodity Control Number (ECCN) 5D002.C.1, -which includes information security software using or performing -cryptographic functions with asymmetric algorithms. -The form and manner of this Apache Software Foundation distribution makes -it eligible for export under the License Exception -ENC Technology Software Unrestricted (TSU) exception -(see the BIS Export Administration Regulations, Section 740.13) -for both object code and source code. - -The following provides more details on the included cryptographic software: - -* Apache Geode is designed to be used with - [Java Secure Socket Extension](https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/JSSERefGuide.html) (JSSE) and - [Java Cryptography Extension](https://docs.oracle.com/javase/8/docs/technotes/guides/security/crypto/CryptoSpec.html) (JCE). - The [JCE Unlimited Strength Jurisdiction Policy](https://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) - may need to be installed separately to use keystore passwords with 7 or more characters. -* Apache Geode links to and uses [OpenSSL](https://www.openssl.org/) ciphers. - +VMware GemFire can be obtained from [Tanzu Network](https://network.tanzu.vmware.com/products/pivotal-gemfire/). diff --git a/boms/geode-all-bom/src/test/resources/expected-pom.xml b/boms/geode-all-bom/src/test/resources/expected-pom.xml index 039b86776024..5b143243ac15 100644 --- a/boms/geode-all-bom/src/test/resources/expected-pom.xml +++ b/boms/geode-all-bom/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -330,7 +330,7 @@ org.apache.shiro shiro-core - 1.9.0 + 1.9.1 org.assertj @@ -415,7 +415,7 @@ org.slf4j slf4j-api - 1.7.32 + 1.7.36 org.springframework.hateoas @@ -733,199 +733,199 @@ 23.0.0 - org.apache.geode + com.vmware.gemfire geode-common ${version} compile - org.apache.geode + com.vmware.gemfire geode-concurrency-test ${version} compile - org.apache.geode + com.vmware.gemfire geode-connectors ${version} compile - org.apache.geode + com.vmware.gemfire geode-core ${version} compile - org.apache.geode + com.vmware.gemfire geode-cq ${version} compile - org.apache.geode + com.vmware.gemfire geode-dunit ${version} compile - org.apache.geode + com.vmware.gemfire geode-gfsh ${version} compile - org.apache.geode + com.vmware.gemfire geode-http-service ${version} compile - org.apache.geode + com.vmware.gemfire geode-jmh ${version} compile - org.apache.geode + com.vmware.gemfire geode-junit ${version} compile - org.apache.geode + com.vmware.gemfire geode-log4j ${version} compile - org.apache.geode + com.vmware.gemfire geode-logging ${version} compile - org.apache.geode + com.vmware.gemfire geode-lucene ${version} compile - org.apache.geode + com.vmware.gemfire geode-management ${version} compile - org.apache.geode + com.vmware.gemfire geode-membership ${version} compile - org.apache.geode + com.vmware.gemfire geode-memcached ${version} compile - org.apache.geode + com.vmware.gemfire geode-old-client-support ${version} compile - org.apache.geode + com.vmware.gemfire geode-pulse ${version} compile - org.apache.geode + com.vmware.gemfire geode-rebalancer ${version} compile - org.apache.geode + com.vmware.gemfire geode-serialization ${version} compile - org.apache.geode + com.vmware.gemfire geode-server-all ${version} compile - org.apache.geode + com.vmware.gemfire geode-tcp-server ${version} compile - org.apache.geode + com.vmware.gemfire geode-unsafe ${version} compile - org.apache.geode + com.vmware.gemfire geode-wan ${version} compile - org.apache.geode + com.vmware.gemfire geode-web ${version} compile - org.apache.geode + com.vmware.gemfire geode-web-api ${version} compile - org.apache.geode + com.vmware.gemfire geode-web-management ${version} compile - org.apache.geode + com.vmware.gemfire geode-modules ${version} compile - org.apache.geode + com.vmware.gemfire geode-modules-tomcat8 ${version} compile - org.apache.geode + com.vmware.gemfire geode-modules-tomcat9 ${version} compile - org.apache.geode + com.vmware.gemfire geode-deployment-legacy ${version} compile - org.apache.geode + com.vmware.gemfire geode-lucene-test ${version} compile - org.apache.geode + com.vmware.gemfire geode-pulse-test ${version} compile diff --git a/boms/geode-client-bom/src/test/resources/expected-pom.xml b/boms/geode-client-bom/src/test/resources/expected-pom.xml index 2fa63c24ee5d..38d7460099c0 100644 --- a/boms/geode-client-bom/src/test/resources/expected-pom.xml +++ b/boms/geode-client-bom/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-client-bom ${version} pom @@ -38,7 +38,7 @@ - org.apache.geode + com.vmware.gemfire geode-core ${version} @@ -53,7 +53,7 @@ - org.apache.geode + com.vmware.gemfire geode-cq ${version} diff --git a/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy b/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy index 2a3ed0143602..a2a429608eb1 100644 --- a/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy +++ b/build-tools/geode-dependency-management/src/main/groovy/org/apache/geode/gradle/plugins/DependencyConstraints.groovy @@ -41,8 +41,8 @@ class DependencyConstraints { deps.put("jgroups.version", "3.6.14.Final") deps.put("log4j.version", "2.17.2") deps.put("micrometer.version", "1.9.1") - deps.put("shiro.version", "1.9.0") - deps.put("slf4j-api.version", "1.7.32") + deps.put("shiro.version", "1.9.1") + deps.put("slf4j-api.version", "1.7.36") deps.put("jboss-modules.version", "1.11.0.Final") deps.put("jackson.version", "2.13.3") deps.put("jackson.databind.version", "2.13.3") @@ -54,8 +54,6 @@ class DependencyConstraints { // in geode-assembly.gradle. Moreover, dependencyManagement does not seem to play nicely when // specifying @zip in a dependency, the manner in which we consume them in custom configurations. // This would possibly be corrected if they were proper source sets. - deps.put("tomcat6.version", "6.0.37") - deps.put("tomcat7.version", "7.0.109") deps.put("tomcat8.version", "8.5.66") deps.put("tomcat9.version", "9.0.62") diff --git a/build-tools/scripts/src/main/groovy/check-pom.gradle b/build-tools/scripts/src/main/groovy/check-pom.gradle index d73492f3a805..ec9a5eadf3b3 100644 --- a/build-tools/scripts/src/main/groovy/check-pom.gradle +++ b/build-tools/scripts/src/main/groovy/check-pom.gradle @@ -36,10 +36,10 @@ tasks.register('checkPom') { // We impose the following rules on our produced poms: // * Versions are to be specified in the block, not the block - // * org.apache.geode versions will be ignored, in favor of this build's version + // * com.vmware.gemfire versions will be ignored, in favor of this build's version // * blocks in produced POMs are as expected (ordering ignored) // * blocks in produced POMs are as expected (ordering ignored) - // * Published groupId = "org.apache.geode" + // * Published groupId = "com.vmware.gemfire" // * Published artifactId = project.artifactName if it exists, else project.name def anyVersionDefinitionNotInDependencyManagement = { pom -> @@ -50,7 +50,7 @@ tasks.register('checkPom') { def ignoreGeodeVersionInExpectedPom = { pom -> pom.dependencyManagement.dependencies.dependency.each { dep -> - if (dep.toString().contains("org.apache.geode")) { + if (dep.toString().contains("com.vmware.gemfire")) { // since the project version is the source of truth, use that for comparison instead of // whatever is stored in the expected pom file dep.get("version")*.setValue([version]) @@ -94,7 +94,7 @@ tasks.register('checkPom') { if (pathologicalArtifactId || pathologicalGroupId || pathologicalVersionedDeps) { def errorSummary = "" errorSummary += pathologicalArtifactId ? "Expected POM header pathologically incorrect. Fix artifactId to match subproject name.\n" : "" - errorSummary += pathologicalGroupId ? "Expected POM header pathologically incorrect. Fix groupId to be 'org.apache.geode'.\n" : "" + errorSummary += pathologicalGroupId ? "Expected POM header pathologically incorrect. Fix groupId to be 'com.vmware.gemfire'.\n" : "" errorSummary += pathologicalVersionedDeps ? "Expected POM should not declare dependency versions outside the Spring dependency-management constraints." : "" throw new GradleException(errorSummary) } @@ -158,7 +158,7 @@ tasks.register('updateExpectedPom', Copy) { doLast { def checkGroupIdAndSetVersion = { elem -> - if (elem.groupId.text().contains("org.apache.geode")) { + if (elem.groupId.text().contains("com.vmware.gemfire")) { elem.get("version")*.setValue('${version}') } } diff --git a/build-tools/scripts/src/main/groovy/geode-publish-common.gradle b/build-tools/scripts/src/main/groovy/geode-publish-common.gradle index fc78d2acc7c8..13f43131a27a 100644 --- a/build-tools/scripts/src/main/groovy/geode-publish-common.gradle +++ b/build-tools/scripts/src/main/groovy/geode-publish-common.gradle @@ -66,7 +66,7 @@ publishing { // Spring dependency-management plugin. We remove version specification as injected by // project dependencies, e.g., compile project(':geode-core') asNode().dependencies.dependency.each { dep -> - if (dep.toString().contains("org.apache.geode")) { + if (dep.toString().contains("com.vmware.gemfire")) { dep.remove(dep["version"]) } } diff --git a/build-tools/scripts/src/main/groovy/geode-rat.gradle b/build-tools/scripts/src/main/groovy/geode-rat.gradle index dca43213de10..083d1b869c8e 100644 --- a/build-tools/scripts/src/main/groovy/geode-rat.gradle +++ b/build-tools/scripts/src/main/groovy/geode-rat.gradle @@ -23,6 +23,10 @@ plugins { rat { inputDir = rootDir + substringMatcher("VMware", "VMware", "VMware, Inc.", "vmware-eula") + approvedLicense("VMware") + approvedLicense("Apache License Version 2.0") + excludes = [ // git '.git/**', @@ -48,9 +52,9 @@ rat { '**/go.sum', // Geode examples - 'geode-examples/.idea/**', - 'geode-examples/gradlew*/**', - 'geode-examples/gradle/wrapper/**', + 'gemfire-examples/.idea/**', + 'gemfire-examples/gradlew*/**', + 'gemfire-examples/gradle/wrapper/**', // IDE 'etc/eclipse-java-google-style.xml', @@ -134,27 +138,9 @@ rat { // Public Domain http://meyerweb.com/eric/tools/css/reset/ 'geode-pulse/src/main/webapp/scripts/lib/tooltip.js', - // JSON License - permissive, used for Good, not Evil - 'geode-json/src/main/java/org/json/CDL.java', - 'geode-json/src/main/java/org/json/Cookie.java', - 'geode-json/src/main/java/org/json/CookieList.java', - 'geode-json/src/main/java/org/json/CDL.java', - 'geode-json/src/main/java/org/json/Cookie.java', - 'geode-json/src/main/java/org/json/CookieList.java', - 'geode-json/src/main/java/org/json/HTTP.java', - 'geode-json/src/main/java/org/json/HTTPTokener.java', - 'geode-json/src/main/java/org/json/JSONArray.java', - 'geode-json/src/main/java/org/json/JSONException.java', - 'geode-json/src/main/java/org/json/JSONML.java', - 'geode-json/src/main/java/org/json/JSONObject.java', - 'geode-json/src/main/java/org/json/JSONString.java', - 'geode-json/src/main/java/org/json/JSONStringer.java', - 'geode-json/src/main/java/org/json/JSONTokener.java', - 'geode-json/src/main/java/org/json/JSONWriter.java', - 'geode-json/src/main/java/org/json/XML.java', - 'geode-json/src/main/java/org/json/XMLTokener.java', - // MIT License + 'geode-pulse/src/main/webapp/scripts/lib/jit.js', + 'geode-pulse/src/main/webapp/scripts/lib/split.js', 'geode-pulse/src/main/webapp/scripts/lib/jquery.jqGrid.src.js', 'geode-pulse/src/main/webapp/scripts/lib/grid.locale-en.js', 'geode-pulse/src/main/webapp/scripts/lib/html5.js', diff --git a/build-tools/scripts/src/main/groovy/geode-test.gradle b/build-tools/scripts/src/main/groovy/geode-test.gradle index 93488986e512..f33ce74535b3 100644 --- a/build-tools/scripts/src/main/groovy/geode-test.gradle +++ b/build-tools/scripts/src/main/groovy/geode-test.gradle @@ -181,83 +181,23 @@ gradle.taskGraph.whenReady({ graph -> jvmArgs += ['-XX:+HeapDumpOnOutOfMemoryError', '-ea'] if (project.hasProperty('testJVMVer') && testJVMVer.toInteger() >= 9) { jvmArgs += [ - "--add-opens=java.base/java.io=ALL-UNNAMED", + // Product: BufferPool uses DirectBuffer + "--add-exports=java.base/sun.nio.ch=ALL-UNNAMED", + // Tests: CertificateBuilder uses numerous types declared here + "--add-exports=java.base/sun.security.x509=ALL-UNNAMED", + // Product: ManagementAgent's custom MBean servers extend types declared here + "--add-exports=java.management/com.sun.jmx.remote.security=ALL-UNNAMED", + + // Product: UnsafeThreadLocal accesses fields and methods of ThreadLocal "--add-opens=java.base/java.lang=ALL-UNNAMED", - "--add-opens=java.base/java.lang.annotation=ALL-UNNAMED", - "--add-opens=java.base/java.lang.module=ALL-UNNAMED", - "--add-opens=java.base/java.lang.ref=ALL-UNNAMED", - "--add-opens=java.base/java.lang.reflect=ALL-UNNAMED", - "--add-opens=java.base/java.math=ALL-UNNAMED", - "--add-opens=java.base/java.net=ALL-UNNAMED", + // Product: AddressableMemoryManager accesses DirectByteBuffer constructor "--add-opens=java.base/java.nio=ALL-UNNAMED", - "--add-opens=java.base/java.nio.channels=ALL-UNNAMED", - "--add-opens=java.base/java.nio.channels.spi=ALL-UNNAMED", - "--add-opens=java.base/java.nio.charset=ALL-UNNAMED", - "--add-opens=java.base/java.nio.file.attribute=ALL-UNNAMED", - "--add-opens=java.base/java.nio.file.spi=ALL-UNNAMED", - "--add-opens=java.base/java.security=ALL-UNNAMED", - "--add-opens=java.base/java.text=ALL-UNNAMED", - "--add-opens=java.base/java.time=ALL-UNNAMED", - "--add-opens=java.base/java.time.chrono=ALL-UNNAMED", - "--add-opens=java.base/java.time.format=ALL-UNNAMED", - "--add-opens=java.base/java.time.temporal=ALL-UNNAMED", - "--add-opens=java.base/java.time.zone=ALL-UNNAMED", + // Tests: EnvironmentVariables rule accesses Collections$UnmodifiableMap.m "--add-opens=java.base/java.util=ALL-UNNAMED", - "--add-opens=java.base/java.util.concurrent=ALL-UNNAMED", - "--add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED", - "--add-opens=java.base/java.util.concurrent.locks=ALL-UNNAMED", - "--add-opens=java.base/java.util.jar=ALL-UNNAMED", - "--add-opens=java.base/java.util.regex=ALL-UNNAMED", - "--add-opens=java.base/java.util.zip=ALL-UNNAMED", - "--add-opens=java.base/javax.net.ssl=ALL-UNNAMED", - "--add-opens=java.base/jdk.internal.loader=ALL-UNNAMED", - "--add-opens=java.base/jdk.internal.misc=ALL-UNNAMED", - "--add-opens=java.base/jdk.internal.module=ALL-UNNAMED", - "--add-opens=java.base/jdk.internal.platform=ALL-UNNAMED", - "--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED", - "--add-opens=java.base/jdk.internal.reflect=ALL-UNNAMED", - "--add-opens=java.base/jdk.internal.util.jar=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.annotation=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.factory=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.reflectiveObjects=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.repository=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.scope=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.tree=ALL-UNNAMED", - "--add-opens=java.base/sun.net.www=ALL-UNNAMED", - "--add-opens=java.base/sun.net.www.protocol.file=ALL-UNNAMED", - "--add-opens=java.base/sun.net.www.protocol.jar=ALL-UNNAMED", - "--add-opens=java.base/sun.nio.ch=ALL-UNNAMED", - "--add-opens=java.base/sun.nio.cs=ALL-UNNAMED", - "--add-opens=java.base/sun.nio.fs=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.factory=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.reflectiveObjects=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.repository=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.scope=ALL-UNNAMED", - "--add-opens=java.base/sun.reflect.generics.tree=ALL-UNNAMED", - "--add-opens=java.base/sun.security.provider=ALL-UNNAMED", + // Tests: SecurityTestUtils resets SSL-related fields "--add-opens=java.base/sun.security.ssl=ALL-UNNAMED", - "--add-opens=java.base/sun.security.util=ALL-UNNAMED", - "--add-opens=java.base/sun.util.calendar=ALL-UNNAMED", - "--add-opens=java.base/sun.util.locale=ALL-UNNAMED", - "--add-opens=java.logging/java.util.logging=ALL-UNNAMED", - "--add-opens=java.management/javax.management=ALL-UNNAMED", - "--add-opens=java.management/javax.management.openmbean=ALL-UNNAMED", - "--add-opens=java.management/sun.management=ALL-UNNAMED", - "--add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED", - "--add-opens=java.xml/jdk.xml.internal=ALL-UNNAMED", - "--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED", - - "--add-exports=java.base/sun.security.x509=ALL-UNNAMED", - "--add-exports=java.management/com.sun.jmx.remote.security=ALL-UNNAMED", + "--add-opens=java.base/javax.net.ssl=ALL-UNNAMED", ] - - if (System.getProperty("os.name").startsWith('Linux')) { - jvmArgs += "--add-opens=java.base/jdk.internal.platform=ALL-UNNAMED" - jvmArgs += "--add-opens=java.base/jdk.internal.platform.cgroupv1=ALL-UNNAMED" - } - if(testJVMVer.toInteger() >= 17) { - jvmArgs += "--add-exports=java.base/jdk.internal.util.random=ALL-UNNAMED" - } } if (project.hasProperty('testJVM') && !testJVM.trim().isEmpty()) { executable = "${testJVM}/bin/java" diff --git a/ci/pipelines/examples/jinja.template.yml b/ci/pipelines/examples/jinja.template.yml index b5e33ce45cf1..f545305462d7 100644 --- a/ci/pipelines/examples/jinja.template.yml +++ b/ci/pipelines/examples/jinja.template.yml @@ -80,20 +80,22 @@ resources: bucket: ((artifact-bucket)) json_key: ((concourse-gcp-key)) versioned_file: semvers/((pipeline-prefix))((geode-build-branch))/passing-build-tokens.json -- name: geode-examples +- name: gemfire-examples icon: github-circle type: git source: - uri: https://github.com/{{repository.fork}}/geode-examples.git + uri: git@github.com:{{repository.fork}}/gemfire-examples.git branch: {{ repository.branch }} + private_key: ((gemfire-ci-private-key)) depth: 10 - name: geode-ci icon: github-circle type: git source: depth: 1 - uri: https://github.com/{{repository.fork}}/geode.git + uri: git@github.com:{{repository.fork}}/geode-support.git branch: {{ repository.branch }} + private_key: ((gemfire-ci-private-key)) paths: - ci/* - name: concourse-metadata-resource @@ -129,7 +131,7 @@ jobs: - get: linux-builder-image-family - get: geode-passing-tokens trigger: true - - get: geode-examples + - get: gemfire-examples trigger: true - get: daily trigger: true @@ -167,7 +169,7 @@ jobs: path: geode-ci/ci/scripts/rsync_code_up.sh inputs: - name: geode-ci - - name: geode-examples + - name: gemfire-examples path: geode - name: instance-data timeout: 5m @@ -185,7 +187,7 @@ jobs: SSHKEY_FILE="instance-data/sshkey" SSH_OPTIONS="-i ${SSHKEY_FILE} -o ConnectionAttempts=60 -o StrictHostKeyChecking=no" INSTANCE_IP_ADDRESS="$(cat instance-data/instance-ip-address)" - ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "set -x ; mv geode geode-examples" + ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "set -x ; mv geode gemfire-examples" - task: build image: alpine-tools-image config: @@ -199,7 +201,7 @@ jobs: path: geode-ci/ci/scripts/execute_build_examples.sh inputs: - name: geode-ci - - name: geode-examples + - name: gemfire-examples - name: geode-passing-tokens - name: instance-data outputs: diff --git a/ci/pipelines/geode-build/jinja.template.yml b/ci/pipelines/geode-build/jinja.template.yml index 59dc0fbfce64..3b929f429cab 100644 --- a/ci/pipelines/geode-build/jinja.template.yml +++ b/ci/pipelines/geode-build/jinja.template.yml @@ -23,8 +23,6 @@ {% macro plan_resource_gets(test) %} - get: geode-ci - passed: - - build - in_parallel: - get: geode trigger: true @@ -95,9 +93,6 @@ GRADLE_GLOBAL_ARGS: ((gradle-global-args)) - {{test.name}}-test-{{java_test_version.name}} {%- endfor -%} {%- endfor -%} - {%- for run_var in (benchmarks.flavors) %} -- benchmark-{{ run_var.title }} - {%- endfor -%} {% endmacro %} groups: @@ -183,7 +178,8 @@ resources: source: branch: {{benchmarks.benchmark_branch}} depth: 1 - uri: https://github.com/((geode-fork))/geode-benchmarks.git + private_key: ((gemfire-ci-private-key)) + uri: git@github.com:gemfire/gemfire-benchmarks.git - name: geode-build-version type: semver source: @@ -443,26 +439,24 @@ jobs: args: - -cx - | - pushd geode - GEODE_SHA=$(git rev-parse HEAD) - popd GEODE_SEMVER=$(cat geode-build-version/number) + cd geode + GEODE_SHA=$(git rev-parse HEAD) GS_PATH=gs://((artifact-bucket))/semvers/((pipeline-prefix))((geode-build-branch))/passing-build-tokens.json CURRENT_PASSING_SHA=$(gsutil cat ${GS_PATH} | jq -r .ref) set -e # Check that the incoming GEODE_SHA is a descendent of the currently stored value. # Keeps us from winding back the repository in the case of an out-of-order pipeline pass - if [ -n "${CURRENT_PASSING_SHA}" ]; then - cd geode - if git merge-base --is-ancestor ${CURRENT_PASSING_SHA} ${GEODE_SHA}; then - cat > ../geode-passing-tokens/passing-build-tokens.json < ../geode-passing-tokens/passing-build-tokens.json < ../geode-passing-tokens/passing-build-tokens.json fi - in_parallel: - put: geode-passing-tokens @@ -506,6 +500,10 @@ jobs: TAG_POSTFIX: -{{ run_var.title }} TEST_OPTIONS: {{ run_var.options }} PURPOSE: ((pipeline-prefix))geode-benchmarks + GITHUB_CREDENTIALS: ((gemfire-ci-private-key)) + GCP_CREDENTIALS: ((maven-access-service-account)) + ORG_GRADLE_PROJECT_mavenUser: ((commercial-repo-username)) + ORG_GRADLE_PROJECT_mavenPassword: ((commercial-repo-password)) run: path: geode-ci/ci/scripts/run_benchmarks.sh inputs: diff --git a/ci/pipelines/images/jinja.template.yml b/ci/pipelines/images/jinja.template.yml index dec32d3750ce..6a091dc6f70f 100644 --- a/ci/pipelines/images/jinja.template.yml +++ b/ci/pipelines/images/jinja.template.yml @@ -68,10 +68,11 @@ resources: - name: geode-benchmarks-image type: git source: - branch: ((geode-build-branch)) + branch: develop paths: - infrastructure/scripts/aws/image - uri: https://github.com/((geode-fork))/geode-benchmarks.git + uri: git@github.com:gemfire/gemfire-benchmarks.git + private_key: ((gemfire-ci-private-key)) - name: packer-145-image icon: docker diff --git a/ci/pipelines/meta/deploy_meta.sh b/ci/pipelines/meta/deploy_meta.sh index 23b600916179..8b6ec9ad6d99 100755 --- a/ci/pipelines/meta/deploy_meta.sh +++ b/ci/pipelines/meta/deploy_meta.sh @@ -267,7 +267,6 @@ function enableFeature { NAME=$1 driveToGreen $META_PIPELINE set-$NAME-pipeline unpausePipeline ${PIPELINE_PREFIX}$NAME - exposePipeline ${PIPELINE_PREFIX}$NAME } set -e @@ -298,7 +297,6 @@ unpausePipeline ${PIPELINE_PREFIX}main if [[ "$GEODE_FORK" == "${UPSTREAM_FORK}" ]]; then if [[ "${PUBLIC}" == "true" ]]; then - exposePipelines ${PIPELINE_PREFIX}main ${PIPELINE_PREFIX}images enableFeature examples enableFeature pr fi diff --git a/ci/pipelines/meta/jinja.template.yml b/ci/pipelines/meta/jinja.template.yml index 6f1116fb8045..9fcd89a70414 100644 --- a/ci/pipelines/meta/jinja.template.yml +++ b/ci/pipelines/meta/jinja.template.yml @@ -70,7 +70,7 @@ resources: password: ((docker-password)) repository: gcr.io/((gcp-project))/((sanitized-geode-fork))-((sanitized-geode-build-branch))-meta-img {% if repository.fork == repository.upstream_fork %} -- name: geode-examples-pipeline +- name: gemfire-examples-pipeline type: git source: {{ github_access() | indent(4) }} @@ -174,7 +174,7 @@ jobs: serial: true public: ((public-pipelines)) plan: - - get: geode-examples-pipeline + - get: gemfire-examples-pipeline trigger: true - get: meta-mini-image trigger: true @@ -184,7 +184,7 @@ jobs: config: platform: linux inputs: - - name: geode-examples-pipeline + - name: gemfire-examples-pipeline outputs: - name: results params: @@ -203,7 +203,7 @@ jobs: MAVEN_SNAPSHOT_BUCKET: ((maven-snapshot-bucket)) SEMVER_PRERELEASE_TOKEN: ((semver-prerelease-token)) run: - path: geode-examples-pipeline/ci/pipelines/examples/deploy_pipeline.sh + path: gemfire-examples-pipeline/ci/pipelines/examples/deploy_pipeline.sh - set_pipeline: ((pipeline-prefix))examples file: results/generated-pipeline.yml var_files: diff --git a/ci/pipelines/meta/meta.properties b/ci/pipelines/meta/meta.properties index cee250ad61e7..d34895a87621 100644 --- a/ci/pipelines/meta/meta.properties +++ b/ci/pipelines/meta/meta.properties @@ -14,15 +14,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -UPSTREAM_FORK=apache -GEODE_FORK=apache -GEODE_REPO_NAME=geode -CONCOURSE_HOST=concourse.apachegeode-ci.info + +# This file lives in gemfire-assembly so that the pipeline building Geode-As-GemFire can remain +# an otherwise-perfect mirror of Apache Geode. +# Maven rebranding in group, version, et al is the result of pipeline variables set below. + +# Repository targets +UPSTREAM_FORK=gemfire +GEODE_FORK=gemfire +GEODE_REPO_NAME=geode-support + +# Concourse things +CONCOURSE_HOST=concourse.gemfire-ci.info CONCOURSE_TEAM=main -GCP_PROJECT=apachegeode-ci -ARTIFACT_BUCKET=files.apachegeode-ci.info -PUBLIC=true -REPOSITORY_PUBLIC=true -GRADLE_GLOBAL_ARGS="" -MAVEN_SNAPSHOT_BUCKET=gcs://maven.apachegeode-ci.info/snapshots/ +PUBLIC=false +REPOSITORY_PUBLIC=false +CONCOURSE_SCHEME=https SEMVER_PRERELEASE_TOKEN=build + +# GCS targets +GCP_PROJECT=gemfire-dev +ARTIFACT_BUCKET=gemfire-test-artifacts +MAVEN_SNAPSHOT_BUCKET=gcs://gemfire-build-resources/maven/${GEODE_FORK} + diff --git a/ci/pipelines/pull-request/deploy_pr_pipeline.sh b/ci/pipelines/pull-request/deploy_pr_pipeline.sh index 3f3e4f8d8b5e..82fc7adef627 100755 --- a/ci/pipelines/pull-request/deploy_pr_pipeline.sh +++ b/ci/pipelines/pull-request/deploy_pr_pipeline.sh @@ -76,7 +76,7 @@ pushd ${SCRIPTDIR} 2>&1 > /dev/null cat > repository.yml < instance-data/cost-data.json { "pipeline": "${PIPELINE_NAME}", diff --git a/ci/scripts/execute_build_examples.sh b/ci/scripts/execute_build_examples.sh index e490c5ecfbf6..44f444b58e18 100755 --- a/ci/scripts/execute_build_examples.sh +++ b/ci/scripts/execute_build_examples.sh @@ -55,4 +55,4 @@ GRADLE_COMMAND="./gradlew \ clean runAll" echo "${GRADLE_COMMAND}" -ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "set -x && mkdir -p tmp && cd geode-examples && ${SET_JAVA_HOME} && ${GRADLE_COMMAND}" +ssh ${SSH_OPTIONS} geode@${INSTANCE_IP_ADDRESS} "set -x && mkdir -p tmp && cd gemfire-examples && ${SET_JAVA_HOME} && ${GRADLE_COMMAND}" diff --git a/ci/scripts/run_benchmarks.sh b/ci/scripts/run_benchmarks.sh index 3314bf37cdd6..c772a131de68 100755 --- a/ci/scripts/run_benchmarks.sh +++ b/ci/scripts/run_benchmarks.sh @@ -31,6 +31,12 @@ SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" source concourse-metadata-resource/concourse_metadata +mkdir -p ${HOME}/.ssh +cat < ${HOME}/.ssh/config +SendEnv ORG_GRADLE_PROJECT_mavenUser +SendEnv ORG_GRADLE_PROJECT_mavenPassword +EOF + CLUSTER_TAG="${BUILD_PIPELINE_NAME}-${BUILD_JOB_NAME}-${BUILD_NAME}-${BUILD_ID}${TAG_POSTFIX}" RESULTS_DIR=$(pwd)/results/benchmarks-${CLUSTER_TAG} @@ -45,14 +51,18 @@ GEODE_REPO=${GEODE_REPO:-$(cd geode && git remote get-url origin)} BENCHMARKS_REPO=${BENCHMARKS_REPO:-$(cd geode-benchmarks && git remote get-url origin)} BASELINE_REPO=${BASELINE_REPO:-${GEODE_REPO}} -pushd geode +pushd geode; GEODE_REPO=rsync:$(pwd); BASELINE_REPO=rsync:$(pwd) GEODE_SHA=$(git rev-parse --verify HEAD) popd input="$(pwd)/results/failedTests" pushd geode-benchmarks/infrastructure/scripts/aws/ -./launch_cluster.sh -t ${CLUSTER_TAG} -c ${CLUSTER_COUNT} ${PURPOSE_OPTION} --ci +set +x +echo "${GCP_CREDENTIALS}" > google-credentials.json +echo "${GITHUB_CREDENTIALS}" > github-credentials.pem +set -x +./launch_cluster.sh -t ${CLUSTER_TAG} -c ${CLUSTER_COUNT} ${PURPOSE_OPTION} --ci --gh $(pwd)/github-credentials.pem --gc $(pwd)/google-credentials.json # test retry loop - Check if any tests have failed. If so, overwrite the TEST_OPTIONS with only the # failed tests. Test failures only result in an exit code of 1 when on the last iteration of loop. diff --git a/dev-tools/dependencies/README.md b/dev-tools/dependencies/README.md index 313d13a9a2b8..ca4505be5ef4 100644 --- a/dev-tools/dependencies/README.md +++ b/dev-tools/dependencies/README.md @@ -14,7 +14,6 @@ dev-tools/dependencies/bump.sh -l Step 2: In some cases, maven suggests beta releases, which Geode should not use. Manually search for those dependencies on mavencentral to see if there is a better choice. Special cases: -- tomcat6 (do not upgrade) - tomcat (upgrade to latest patch only for each of 7, 8.5, and 9) Step 3: Create a PR and start bumping dependencies. Push to the PR every few to run PR diff --git a/dev-tools/dependencies/bump.sh b/dev-tools/dependencies/bump.sh index b8931dc443f6..7b1bd4729726 100755 --- a/dev-tools/dependencies/bump.sh +++ b/dev-tools/dependencies/bump.sh @@ -26,7 +26,7 @@ if [ "$2" = "-l" ] ; then find . | grep build/dependencyUpdates/report.txt | xargs rm -f ./gradlew dependencyUpdates -Drevision=release find . | grep build/dependencyUpdates/report.txt | xargs cat \ - | grep ' -> ' | egrep -v '(Gradle|antlr|lucene|JUnitParams|docker-compose-rule|javax.servlet-api|springdoc|derby|selenium|jgroups|jmh|\[6.0.37|commons-collections|jaxb|testcontainers|gradle-tooling-api|slf4j|archunit)' \ + | grep ' -> ' | egrep -v '(Gradle|antlr|lucene|JUnitParams|docker-compose-rule|javax.servlet-api|springdoc|derby|selenium|jgroups|jmh|\[commons-collections|jaxb|testcontainers|gradle-tooling-api|slf4j|archunit)' \ | sort -u | tr -d '][' | sed -e 's/ -> / /' -e 's#.*:#'"$0 $1"' #' echo "cd .. ; geode/dev-tools/release/license_review.sh -v HEAD ; cd $(pwd)" echo "#Also: manually check for newer version of plugins listed in build.gradle (search on https://plugins.gradle.org/)" diff --git a/dev-tools/progress/README.md b/dev-tools/progress/README.md index d9a5d9f3a39e..da91067c8366 100644 --- a/dev-tools/progress/README.md +++ b/dev-tools/progress/README.md @@ -140,13 +140,13 @@ The default format displays commonly useful information about each test: Example: ``` -org.apache.geode.modules.session.catalina.Tomcat7CommitSessionValveTest.recycledResponseObjectDoesNotWrapAlreadyWrappedOutputBuffer +org.apache.geode.modules.session.catalina.Tomcat8CommitSessionValveTest.recycledResponseObjectDoesNotWrapAlreadyWrappedOutputBuffer Iteration: 1 Start: 2021-05-20 22:16:18.699 +0000 End: 2021-05-20 22:16:20.585 +0000 Duration: 1.886s Status: success -org.apache.geode.modules.session.catalina.Tomcat7CommitSessionValveTest.wrappedOutputBufferForwardsToDelegate +org.apache.geode.modules.session.catalina.Tomcat8CommitSessionValveTest.wrappedOutputBufferForwardsToDelegate Iteration: 1 Start: 2021-05-20 22:16:20.585 +0000 End: 2021-05-20 22:16:20.589 +0000 @@ -199,14 +199,14 @@ description: progress -j progress -j | jq # Use jq to pretty-print the JSON - progress -j -c Tomcat7CommitSessionValveTest + progress -j -c Tomcat8CommitSessionValveTest The output (if pretty-printed) looks like this: ```json { - "/path/to/geode/project/extensions/geode-modules-tomcat7/build/test/test-progress.txt": { - "org.apache.geode.modules.session.catalina.Tomcat7CommitSessionValveTest": { + "/path/to/geode/project/extensions/geode-modules-tomcat8/build/test/test-progress.txt": { + "org.apache.geode.modules.session.catalina.Tomcat8CommitSessionValveTest": { "recycledResponseObjectDoesNotWrapAlreadyWrappedOutputBuffer": [ { "Iteration": 1, diff --git a/etc/intellij-apache-copyright-notice.xml b/etc/intellij-apache-copyright-notice.xml deleted file mode 100644 index 940ad4fd6ec0..000000000000 --- a/etc/intellij-apache-copyright-notice.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - \ No newline at end of file diff --git a/etc/intellij-vmware-copyright-notice.xml b/etc/intellij-vmware-copyright-notice.xml new file mode 100644 index 000000000000..41a479b51c2c --- /dev/null +++ b/etc/intellij-vmware-copyright-notice.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/extensions/geode-modules-assembly/build.gradle b/extensions/geode-modules-assembly/build.gradle index 9a21957aa887..618a6ec0518d 100644 --- a/extensions/geode-modules-assembly/build.gradle +++ b/extensions/geode-modules-assembly/build.gradle @@ -20,7 +20,6 @@ plugins { id 'maven-publish' } evaluationDependsOn(':extensions:geode-modules') -evaluationDependsOn(':extensions:geode-modules-tomcat7') evaluationDependsOn(':extensions:geode-modules-tomcat8') evaluationDependsOn(':extensions:geode-modules-tomcat9') evaluationDependsOn(':extensions:geode-modules-session') @@ -50,7 +49,6 @@ def configureTcServerAssembly = { // All client-server files into('geode-cs/lib') { from project(':extensions:geode-modules').tasks.named('jar') - from project(':extensions:geode-modules-tomcat7').tasks.named('jar') from project(':extensions:geode-modules-tomcat8').tasks.named('jar') from project(':extensions:geode-modules-tomcat9').tasks.named('jar') from configurations.slf4jDeps @@ -88,7 +86,6 @@ def configureTcServerAssembly = { // All peer-to-peer files into('geode-p2p/lib') { from project(':extensions:geode-modules').tasks.named('jar') - from project(':extensions:geode-modules-tomcat7').tasks.named('jar') from project(':extensions:geode-modules-tomcat8').tasks.named('jar') from project(':extensions:geode-modules-tomcat9').tasks.named('jar') from configurations.slf4jDeps @@ -173,7 +170,6 @@ tasks.register('distTomcat', Zip) { // All client-server files into('lib') { from project(':extensions:geode-modules').tasks.named('jar') - from project(':extensions:geode-modules-tomcat7').tasks.named('jar') from project(':extensions:geode-modules-tomcat8').tasks.named('jar') from project(':extensions:geode-modules-tomcat9').tasks.named('jar') from configurations.slf4jDeps diff --git a/extensions/geode-modules-assembly/release/tcserver/geode-cs-tomcat-7/context-fragment.xml b/extensions/geode-modules-assembly/release/tcserver/geode-cs-tomcat-7/context-fragment.xml deleted file mode 100644 index b7ad94e521cf..000000000000 --- a/extensions/geode-modules-assembly/release/tcserver/geode-cs-tomcat-7/context-fragment.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - diff --git a/extensions/geode-modules-assembly/release/tcserver/geode-cs-tomcat-8/context-fragment.xml b/extensions/geode-modules-assembly/release/tcserver/geode-cs-tomcat-8/context-fragment.xml index c8ff83d510be..038b5ae53df6 100644 --- a/extensions/geode-modules-assembly/release/tcserver/geode-cs-tomcat-8/context-fragment.xml +++ b/extensions/geode-modules-assembly/release/tcserver/geode-cs-tomcat-8/context-fragment.xml @@ -1,8 +1,6 @@ - - - - - - - - - - - diff --git a/extensions/geode-modules-assembly/release/tcserver/geode-cs/server-fragment.xml b/extensions/geode-modules-assembly/release/tcserver/geode-cs/server-fragment.xml deleted file mode 100644 index 2ffccf636e9b..000000000000 --- a/extensions/geode-modules-assembly/release/tcserver/geode-cs/server-fragment.xml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - diff --git a/extensions/geode-modules-assembly/release/tcserver/geode-p2p-tomcat-7/context-fragment.xml b/extensions/geode-modules-assembly/release/tcserver/geode-p2p-tomcat-7/context-fragment.xml deleted file mode 100644 index 252fa8f0320a..000000000000 --- a/extensions/geode-modules-assembly/release/tcserver/geode-p2p-tomcat-7/context-fragment.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - diff --git a/extensions/geode-modules-assembly/release/tcserver/geode-p2p-tomcat-8/context-fragment.xml b/extensions/geode-modules-assembly/release/tcserver/geode-p2p-tomcat-8/context-fragment.xml index 48bfcbb485c2..7db27c565700 100644 --- a/extensions/geode-modules-assembly/release/tcserver/geode-p2p-tomcat-8/context-fragment.xml +++ b/extensions/geode-modules-assembly/release/tcserver/geode-p2p-tomcat-8/context-fragment.xml @@ -1,8 +1,6 @@ - - - - - - - - - - - diff --git a/extensions/geode-modules-assembly/release/tcserver/geode-p2p/server-fragment.xml b/extensions/geode-modules-assembly/release/tcserver/geode-p2p/server-fragment.xml deleted file mode 100644 index 478ee37478b7..000000000000 --- a/extensions/geode-modules-assembly/release/tcserver/geode-p2p/server-fragment.xml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - diff --git a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/GemfireHttpSession.java b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/GemfireHttpSession.java index ab1256e86a06..6eccc494aff2 100644 --- a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/GemfireHttpSession.java +++ b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/GemfireHttpSession.java @@ -15,6 +15,8 @@ package org.apache.geode.modules.session.internal.filter; +import static org.apache.geode.internal.JvmSizeUtils.memoryOverhead; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInput; @@ -38,6 +40,7 @@ import org.apache.geode.Delta; import org.apache.geode.Instantiator; import org.apache.geode.InvalidDeltaException; +import org.apache.geode.internal.size.Sizeable; import org.apache.geode.modules.session.internal.filter.attributes.AbstractSessionAttributes; import org.apache.geode.modules.session.internal.filter.attributes.SessionAttributes; import org.apache.geode.modules.util.ClassLoaderObjectInputStream; @@ -46,7 +49,7 @@ * Class which implements a Gemfire persisted {@code HttpSession} */ @SuppressWarnings("deprecation") -public class GemfireHttpSession implements HttpSession, DataSerializable, Delta { +public class GemfireHttpSession implements HttpSession, DataSerializable, Delta, Sizeable { private static final transient Logger LOG = LoggerFactory.getLogger(GemfireHttpSession.class.getName()); @@ -56,6 +59,8 @@ public class GemfireHttpSession implements HttpSession, DataSerializable, Delta */ private static final long serialVersionUID = 238915238964017823L; + private static final int MEMORY_OVERHEAD = memoryOverhead(GemfireHttpSession.class); + /** * Id for the session */ @@ -451,4 +456,20 @@ String getJvmOwnerId() { return null; } + + private static final int ATOMIC_BOOLEAN_SIZE = memoryOverhead(AtomicBoolean.class); + + @Override + public int getSizeInBytes() { + // The 'manager' and 'context' field are not sized + // since they reference a shared instance. + int attributesSize = 0; + if (attributes != null) { + attributesSize = attributes.getSizeInBytes(); + } + return MEMORY_OVERHEAD + + memoryOverhead(id) + + ATOMIC_BOOLEAN_SIZE // for the 'serialized' field + + attributesSize; + } } diff --git a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractDeltaSessionAttributes.java b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractDeltaSessionAttributes.java index cf355579cb4c..f5a097284500 100644 --- a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractDeltaSessionAttributes.java +++ b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractDeltaSessionAttributes.java @@ -15,6 +15,8 @@ package org.apache.geode.modules.session.internal.filter.attributes; +import static org.apache.geode.internal.JvmSizeUtils.getReferenceSize; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -41,7 +43,7 @@ public abstract class AbstractDeltaSessionAttributes extends AbstractSessionAttr /** * This map holds the updates to attributes */ - protected transient Map deltas = + protected final transient Map deltas = Collections.synchronizedMap(new HashMap<>()); @Override @@ -93,4 +95,28 @@ public void fromDelta(DataInput in) throws IOException, InvalidDeltaException { } jvmOwnerId = in.readUTF(); } + + @Override + public int getSizeInBytes() { + // Field size not accounted for here since + // this is done in non-abstract subclasses. + return deltasSizeInBytes() + super.getSizeInBytes(); + } + + /** + * Sizing the "deltas" map is a bit tricky. + * Both the key and value of the map refer + * to objects that are also referenced by the + * attributes field in our super class. + * But each entry's value is wrapped by a DeltaEvent + * so the size of that class is accounted for here. + * Also, the HashMap itself has overhead per entry + * (NODE_OVERHEAD) which accounted for here. + * HashMap does have extra overhead depending on its + * loadFactor and that is not accounted for here. + */ + private int deltasSizeInBytes() { + return deltas.size() * (HASH_MAP_ENTRY_OVERHEAD + + getReferenceSize() + DeltaEvent.MEMORY_OVERHEAD); + } } diff --git a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractSessionAttributes.java b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractSessionAttributes.java index d4d085e76ac1..59aa38260251 100644 --- a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractSessionAttributes.java +++ b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/AbstractSessionAttributes.java @@ -15,6 +15,10 @@ package org.apache.geode.modules.session.internal.filter.attributes; +import static org.apache.geode.internal.JvmSizeUtils.getReferenceSize; +import static org.apache.geode.internal.JvmSizeUtils.memoryOverhead; +import static org.apache.geode.internal.JvmSizeUtils.roundUpSize; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -27,6 +31,7 @@ import org.slf4j.LoggerFactory; import org.apache.geode.DataSerializer; +import org.apache.geode.cache.util.ObjectSizer; import org.apache.geode.internal.util.BlobHelper; import org.apache.geode.modules.session.internal.filter.GemfireHttpSession; @@ -43,7 +48,7 @@ public abstract class AbstractSessionAttributes implements SessionAttributes { /** * Internal attribute store. */ - protected Map attributes = + protected final Map attributes = Collections.synchronizedMap(new HashMap<>()); /** @@ -204,4 +209,36 @@ public void setJvmOwnerId(String jvmId) { public String getJvmOwnerId() { return jvmOwnerId; } + + @Override + public int getSizeInBytes() { + // Field size not accounted for here since + // this is done in non-abstract subclasses. + // jvmOwnerId is not sized since it is a shared object + // session is not sized since it is a shared object + return attributesSizeInBytes(); + } + + private int attributesSizeInBytes() { + int size = 0; + synchronized (attributes) { + for (Map.Entry entry : attributes.entrySet()) { + size += HASH_MAP_ENTRY_OVERHEAD; + size += memoryOverhead(entry.getKey()); + size += ObjectSizer.DEFAULT.sizeof(entry.getValue()); + } + } + return size; + } + + /** + * Memory overhead for a single entry in a HashMap + */ + protected static final int HASH_MAP_ENTRY_OVERHEAD = getReferenceSize() // for ref to HashMap.Node + // the following are for the fields on HashMap.Node an internal JDK class + + roundUpSize(4 /* for int hash field */ + + getReferenceSize() /* for key field */ + + getReferenceSize() /* for value field */ + + getReferenceSize() /* for next field */); + } diff --git a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaEvent.java b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaEvent.java index 4b0115dd02e2..c85e2dea49bc 100644 --- a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaEvent.java +++ b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaEvent.java @@ -15,6 +15,8 @@ package org.apache.geode.modules.session.internal.filter.attributes; +import static org.apache.geode.internal.JvmSizeUtils.memoryOverhead; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -33,6 +35,7 @@ public class DeltaEvent implements DataSerializable { private static final Logger LOG = LoggerFactory.getLogger(DeltaEvent.class.getName()); + public static final int MEMORY_OVERHEAD = memoryOverhead(DeltaEvent.class); /** * The event is either an update (true) or a remove (false) */ diff --git a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaQueuedSessionAttributes.java b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaQueuedSessionAttributes.java index e588adcc851c..4ad738b82b73 100644 --- a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaQueuedSessionAttributes.java +++ b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaQueuedSessionAttributes.java @@ -15,6 +15,8 @@ package org.apache.geode.modules.session.internal.filter.attributes; +import static org.apache.geode.internal.JvmSizeUtils.memoryOverhead; + import org.apache.geode.DataSerializable; import org.apache.geode.Instantiator; @@ -24,6 +26,8 @@ */ public class DeltaQueuedSessionAttributes extends AbstractDeltaSessionAttributes { + private static final int MEMORY_OVERHEAD = memoryOverhead(DeltaQueuedSessionAttributes.class); + private Trigger trigger = Trigger.SET; private enum Trigger { @@ -82,4 +86,10 @@ public Object removeAttribute(String attr) { deltas.put(attr, new DeltaEvent(false, attr, null)); return obj; } + + @Override + public int getSizeInBytes() { + return MEMORY_OVERHEAD + super.getSizeInBytes(); + } + } diff --git a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaSessionAttributes.java b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaSessionAttributes.java index 3b84241f85f5..79b5bfe7bf45 100644 --- a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaSessionAttributes.java +++ b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/DeltaSessionAttributes.java @@ -15,6 +15,8 @@ package org.apache.geode.modules.session.internal.filter.attributes; +import static org.apache.geode.internal.JvmSizeUtils.memoryOverhead; + import org.apache.geode.DataSerializable; import org.apache.geode.Instantiator; @@ -24,6 +26,8 @@ */ public class DeltaSessionAttributes extends AbstractDeltaSessionAttributes { + private static final int MEMORY_OVERHEAD = memoryOverhead(DeltaSessionAttributes.class); + // Register ourselves for de-serialization static { registerInstantiator(); @@ -66,4 +70,10 @@ public Object removeAttribute(String attr) { flush(); return obj; } + + @Override + public int getSizeInBytes() { + return MEMORY_OVERHEAD + super.getSizeInBytes(); + } + } diff --git a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/SessionAttributes.java b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/SessionAttributes.java index e928eda266c1..211a9b072e5a 100644 --- a/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/SessionAttributes.java +++ b/extensions/geode-modules-session-internal/src/main/java/org/apache/geode/modules/session/internal/filter/attributes/SessionAttributes.java @@ -18,6 +18,7 @@ import java.util.Set; import org.apache.geode.DataSerializable; +import org.apache.geode.internal.size.Sizeable; import org.apache.geode.modules.session.internal.filter.GemfireHttpSession; /** @@ -25,7 +26,7 @@ * in the session which needs to be propagated for caching - as such it also includes other * 'attributes' such as maxInactiveInterval and lastAccessedTime */ -public interface SessionAttributes extends DataSerializable { +public interface SessionAttributes extends DataSerializable, Sizeable { /** * Set the session to which these attributes belong. diff --git a/extensions/geode-modules-session/build.gradle b/extensions/geode-modules-session/build.gradle index 36ec77f3ece7..bdda9075a7ec 100644 --- a/extensions/geode-modules-session/build.gradle +++ b/extensions/geode-modules-session/build.gradle @@ -40,7 +40,7 @@ dependencies { integrationTestImplementation(project(':geode-logging')) implementation('javax.servlet:javax.servlet-api') - implementation('org.apache.tomcat:servlet-api:' + DependencyConstraints.get('tomcat6.version')) + implementation('org.apache.tomcat:tomcat-servlet-api:' + DependencyConstraints.get('tomcat8.version')) implementation('org.slf4j:slf4j-api') integrationTestImplementation('com.mockrunner:mockrunner-servlet') { @@ -50,7 +50,7 @@ dependencies { integrationTestImplementation('commons-io:commons-io') integrationTestImplementation('javax.servlet:javax.servlet-api') integrationTestImplementation('junit:junit') - integrationTestImplementation('org.apache.tomcat:jasper:' + DependencyConstraints.get('tomcat6.version')) + integrationTestImplementation('org.apache.tomcat:tomcat-jasper:' + DependencyConstraints.get('tomcat8.version')) integrationTestImplementation('org.assertj:assertj-core') integrationTestImplementation('org.eclipse.jetty:jetty-http:' + DependencyConstraints.get('jetty.version') + ':tests') integrationTestImplementation('org.eclipse.jetty:jetty-server') diff --git a/extensions/geode-modules-session/src/integrationTest/java/org/apache/geode/modules/session/internal/filter/GemfireHttpSessionTest.java b/extensions/geode-modules-session/src/integrationTest/java/org/apache/geode/modules/session/internal/filter/GemfireHttpSessionTest.java new file mode 100644 index 000000000000..357a7504bded --- /dev/null +++ b/extensions/geode-modules-session/src/integrationTest/java/org/apache/geode/modules/session/internal/filter/GemfireHttpSessionTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2022 VMware, Inc. + * https://network.tanzu.vmware.com/legal_documents/vmware_eula + */ +package org.apache.geode.modules.session.internal.filter; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; + +import javax.servlet.ServletContext; + +import org.junit.Test; + +import org.apache.geode.cache.EvictionAction; +import org.apache.geode.cache.EvictionAlgorithm; +import org.apache.geode.internal.cache.eviction.EvictionCounters; +import org.apache.geode.internal.cache.eviction.HeapLRUController; +import org.apache.geode.modules.session.internal.filter.attributes.DeltaQueuedSessionAttributes; +import org.apache.geode.modules.session.internal.filter.attributes.DeltaSessionAttributes; + +public class GemfireHttpSessionTest { + + @Test + public void getSizeInBytesAccountsForId() { + GemfireHttpSession session1 = new GemfireHttpSession("id", mock(ServletContext.class)); + GemfireHttpSession session2 = + new GemfireHttpSession("BIGGGGGGGGGGGGGG id", mock(ServletContext.class)); + int sizeWithSmallId = session1.getSizeInBytes(); + int sizeWithBigId = session2.getSizeInBytes(); + assertThat(sizeWithBigId).isGreaterThan(sizeWithSmallId); + } + + @Test + public void getSizeInBytesAccountsForDeltaSessionAttributes() { + GemfireHttpSession session1 = new GemfireHttpSession("id", mock(ServletContext.class)); + GemfireHttpSession session2 = new GemfireHttpSession("id", mock(ServletContext.class)); + session2.setManager(mock(SessionManager.class)); + DeltaSessionAttributes attributes = new DeltaSessionAttributes(); + attributes.setSession(session2); + session2.setAttributes(attributes); + session2.putValue("attributeName", "attributeValue"); + int sizeWithNoAttributes = session1.getSizeInBytes(); + int sizeWithAttributes = session2.getSizeInBytes(); + assertThat(sizeWithAttributes).isGreaterThan(sizeWithNoAttributes); + } + + @Test + public void getSizeInBytesAccountsForDeltaQueuedSessionAttributes() { + GemfireHttpSession session1 = new GemfireHttpSession("id", mock(ServletContext.class)); + GemfireHttpSession session2 = new GemfireHttpSession("id", mock(ServletContext.class)); + session2.setManager(mock(SessionManager.class)); + DeltaQueuedSessionAttributes attributes = new DeltaQueuedSessionAttributes(); + attributes.setSession(session2); + session2.setAttributes(attributes); + session2.putValue("attributeName", "attributeValue"); + int sizeWithNoAttributes = session1.getSizeInBytes(); + int sizeWithAttributes = session2.getSizeInBytes(); + assertThat(sizeWithAttributes).isGreaterThan(sizeWithNoAttributes); + } + + @Test + public void verifyHeapLRUControllerWillDetectSizeChanges() { + GemfireHttpSession session = new GemfireHttpSession("id", mock(ServletContext.class)); + session.setManager(mock(SessionManager.class)); + DeltaSessionAttributes attributes = new DeltaSessionAttributes(); + attributes.setSession(session); + session.setAttributes(attributes); + HeapLRUController controller = new HeapLRUController(mock(EvictionCounters.class), + EvictionAction.DEFAULT_EVICTION_ACTION, null, + EvictionAlgorithm.LRU_HEAP); + int sizeWithNoAttributes = controller.entrySize("key", session); + session.putValue("attributeName", "attributeValue"); + int sizeWithAttributes = controller.entrySize("key", session); + assertThat(sizeWithAttributes).isGreaterThan(sizeWithNoAttributes); + } +} diff --git a/extensions/geode-modules-test/build.gradle b/extensions/geode-modules-test/build.gradle index 58154fc30466..80a6b997ab36 100644 --- a/extensions/geode-modules-test/build.gradle +++ b/extensions/geode-modules-test/build.gradle @@ -32,5 +32,5 @@ dependencies { api(project(':extensions:geode-modules')) compileOnly(platform(project(':boms:geode-all-bom'))) - compileOnly('org.apache.tomcat:catalina-ha:' + DependencyConstraints.get('tomcat6.version')) + compileOnly('org.apache.tomcat:tomcat-catalina-ha:' + DependencyConstraints.get('tomcat8.version')) } diff --git a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/AbstractSessionsTest.java b/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/AbstractSessionsTest.java deleted file mode 100644 index da06fef3faf5..000000000000 --- a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/AbstractSessionsTest.java +++ /dev/null @@ -1,442 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session; - -import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL; -import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.beans.PropertyChangeEvent; -import java.io.File; -import java.io.IOException; -import java.io.PrintWriter; -import java.nio.file.Paths; - -import javax.servlet.http.HttpSession; - -import com.meterware.httpunit.GetMethodWebRequest; -import com.meterware.httpunit.WebConversation; -import com.meterware.httpunit.WebRequest; -import com.meterware.httpunit.WebResponse; -import org.apache.catalina.core.StandardWrapper; -import org.apache.commons.io.FileUtils; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.Test; -import org.springframework.util.SocketUtils; -import org.xml.sax.SAXException; - -import org.apache.geode.cache.Region; -import org.apache.geode.modules.session.catalina.DeltaSessionManager; -import org.apache.geode.modules.session.catalina.PeerToPeerCacheLifecycleListener; - -public abstract class AbstractSessionsTest { - protected static int port; - private static EmbeddedTomcat server; - private static StandardWrapper servlet; - private static Region region; - protected static DeltaSessionManager sessionManager; - - // Set up the servers we need - protected static void setupServer(final DeltaSessionManager manager) throws Exception { - FileUtils.copyDirectory( - Paths.get("..", "..", "resources", "integrationTest", "tomcat").toFile(), - new File("./tomcat")); - port = SocketUtils.findAvailableTcpPort(); - server = new EmbeddedTomcat(port, "JVM-1"); - - final PeerToPeerCacheLifecycleListener p2pListener = new PeerToPeerCacheLifecycleListener(); - p2pListener.setProperty(MCAST_PORT, "0"); - p2pListener.setProperty(LOG_LEVEL, "config"); - server.getEmbedded().addLifecycleListener(p2pListener); - sessionManager = manager; - sessionManager.setEnableCommitValve(true); - server.getRootContext().setManager(sessionManager); - - servlet = server.addServlet("/test/*", "default", CommandServlet.class.getName()); - server.startContainer(); - - /* - * Can only retrieve the region once the container has started up (and the cache has started - * too). - */ - region = sessionManager.getSessionCache().getSessionRegion(); - } - - @AfterClass - public static void teardownClass() { - server.stopContainer(); - } - - /** - * Reset some data - */ - @Before - public void setup() { - sessionManager.setMaxInactiveInterval(30); - region.clear(); - } - - private WebResponse setCallbackAndExecuteGet(final Callback callback) - throws IOException, SAXException { - servlet.getServletContext().setAttribute("callback", callback); - - final WebConversation wc = new WebConversation(); - final WebRequest req = new GetMethodWebRequest(String.format("http://localhost:%d/test", port)); - req.setParameter("cmd", QueryCommand.CALLBACK.name()); - req.setParameter("param", "callback"); - - return wc.getResponse(req); - } - - private WebRequest prepareRequest(final String key, final String value) { - final WebRequest req = new GetMethodWebRequest(String.format("http://localhost:%d/test", port)); - req.setParameter("cmd", QueryCommand.SET.name()); - req.setParameter("param", key); - req.setParameter("value", value); - - return req; - } - - /* - * Check that the basics are working - */ - @Test - public void testSanity() throws Exception { - final WebConversation wc = new WebConversation(); - final WebRequest req = new GetMethodWebRequest(String.format("http://localhost:%d/test", port)); - req.setParameter("cmd", QueryCommand.GET.name()); - req.setParameter("param", "null"); - final WebResponse response = wc.getResponse(req); - - assertEquals("JSESSIONID", response.getNewCookieNames()[0]); - } - - /* - * Test callback functionality. This is here really just as an example. Callbacks are useful to - * implement per test actions which can be defined within the actual test method instead of in a - * separate servlet class. - */ - @Test - public void testCallback() throws Exception { - final String helloWorld = "Hello World"; - final Callback c = (request, response) -> { - final PrintWriter out = response.getWriter(); - out.write(helloWorld); - }; - - final WebResponse response = setCallbackAndExecuteGet(c); - assertEquals(helloWorld, response.getText()); - } - - /* - * Test that calling session.isNew() works for the initial as well as subsequent requests. - */ - @Test - public void testIsNew() throws Exception { - final Callback c = (request, response) -> { - final HttpSession session = request.getSession(); - response.getWriter().write(Boolean.toString(session.isNew())); - }; - servlet.getServletContext().setAttribute("callback", c); - - final WebConversation wc = new WebConversation(); - final WebRequest req = new GetMethodWebRequest(String.format("http://localhost:%d/test", port)); - - req.setParameter("cmd", QueryCommand.CALLBACK.name()); - req.setParameter("param", "callback"); - WebResponse response = wc.getResponse(req); - - assertEquals("true", response.getText()); - response = wc.getResponse(req); - - assertEquals("false", response.getText()); - } - - /* - * Check that our session persists. The values we pass in as query params are used to set - * attributes on the session. - */ - @Test - public void testSessionPersists1() throws Exception { - final String key = "value_testSessionPersists1"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = prepareRequest(key, value); - WebResponse response = wc.getResponse(req); - final String sessionId = response.getNewCookieValue("JSESSIONID"); - - assertNotNull("No apparent session cookie", sessionId); - - // The request retains the cookie from the prior response... - req.setParameter("cmd", QueryCommand.GET.name()); - req.setParameter("param", key); - req.removeParameter("value"); - response = wc.getResponse(req); - - assertEquals(value, response.getText()); - } - - /* - * Test that invalidating a session makes it's attributes inaccessible. - */ - @Test - public void testInvalidate() throws Exception { - final String key = "value_testInvalidate"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = prepareRequest(key, value); - wc.getResponse(req); - - // Invalidate the session - req.removeParameter("param"); - req.removeParameter("value"); - req.setParameter("cmd", QueryCommand.INVALIDATE.name()); - wc.getResponse(req); - - // The attribute should not be accessible now... - req.setParameter("cmd", QueryCommand.GET.name()); - req.setParameter("param", key); - final WebResponse response = wc.getResponse(req); - - assertEquals("", response.getText()); - } - - /* - * Test setting the session expiration - */ - @Test - public void testSessionExpiration1() throws Exception { - // TestSessions only live for a second - sessionManager.setMaxInactiveInterval(1); - - final String key = "value_testSessionExpiration1"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = prepareRequest(key, value); - wc.getResponse(req); - - // Sleep a while - Thread.sleep(2000); - - // The attribute should not be accessible now... - req.setParameter("cmd", QueryCommand.GET.name()); - req.setParameter("param", key); - final WebResponse response = wc.getResponse(req); - - assertEquals("", response.getText()); - } - - /** - * Test setting the session expiration via a property change as would happen under normal - * deployment conditions. - */ - @Test - public void testSessionExpiration2() { - // TestSessions only live for a minute - sessionManager.propertyChange(new PropertyChangeEvent(server.getRootContext(), "sessionTimeout", - 30, 1)); - - // Check that the value has been set to 60 seconds - assertEquals(60, sessionManager.getMaxInactiveInterval()); - } - - /* - * Test expiration of a session by the tomcat container, rather than gemfire expiration - */ - @Test - public void testSessionExpirationByContainer() throws Exception { - - final String key = "value_testSessionExpiration1"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = prepareRequest(key, value); - wc.getResponse(req); - - // Set the session timeout of this one session. - req.setParameter("cmd", QueryCommand.SET_MAX_INACTIVE.name()); - req.setParameter("value", "1"); - wc.getResponse(req); - - // Wait until the session should expire - Thread.sleep(2000); - - // Do a request, which should cause the session to be expired - req.setParameter("cmd", QueryCommand.GET.name()); - req.setParameter("param", key); - final WebResponse response = wc.getResponse(req); - - assertEquals("", response.getText()); - } - - /* - * Test that removing a session attribute also removes it from the region - */ - @Test - public void testRemoveAttribute() throws Exception { - final String key = "value_testRemoveAttribute"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = prepareRequest(key, value); - WebResponse response = wc.getResponse(req); - final String sessionId = response.getNewCookieValue("JSESSIONID"); - - // Implicitly remove the attribute - req.removeParameter("value"); - wc.getResponse(req); - - // The attribute should not be accessible now... - req.setParameter("cmd", QueryCommand.GET.name()); - req.setParameter("param", key); - response = wc.getResponse(req); - - assertEquals("", response.getText()); - assertNull(region.get(sessionId).getAttribute(key)); - } - - /* - * Test that a session attribute gets set into the region too. - */ - @Test - public void testBasicRegion() throws Exception { - final String key = "value_testBasicRegion"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = prepareRequest(key, value); - final WebResponse response = wc.getResponse(req); - final String sessionId = response.getNewCookieValue("JSESSIONID"); - - assertEquals(value, region.get(sessionId).getAttribute(key)); - } - - /* - * Test that a session attribute gets removed from the region when the session is invalidated. - */ - @Test - public void testRegionInvalidate() throws Exception { - final String key = "value_testRegionInvalidate"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = prepareRequest(key, value); - final WebResponse response = wc.getResponse(req); - final String sessionId = response.getNewCookieValue("JSESSIONID"); - - // Invalidate the session - req.removeParameter("param"); - req.removeParameter("value"); - req.setParameter("cmd", QueryCommand.INVALIDATE.name()); - wc.getResponse(req); - - assertNull("The region should not have an entry for this session", region.get(sessionId)); - } - - /* - * Test that multiple attribute updates, within the same request result in only the latest one - * being effective. - */ - @Test - public void testMultipleAttributeUpdates() throws Exception { - final String key = "value_testMultipleAttributeUpdates"; - final Callback c = (request, response) -> { - final HttpSession session = request.getSession(); - for (int i = 0; i < 1000; i++) { - session.setAttribute(key, Integer.toString(i)); - } - }; - - final WebResponse response = setCallbackAndExecuteGet(c); - final String sessionId = response.getNewCookieValue("JSESSIONID"); - assertEquals("999", region.get(sessionId).getAttribute(key)); - } - - /* - * Test for issue #38 CommitSessionValve throws exception on invalidated sessions - */ - @Test - public void testCommitSessionValveInvalidSession() throws Exception { - final Callback c = (request, response) -> { - final HttpSession session = request.getSession(); - session.invalidate(); - response.getWriter().write("done"); - }; - - final WebResponse response = setCallbackAndExecuteGet(c); - assertEquals("done", response.getText()); - } - - /* - * Test for issue #45 Sessions are being created for every request - */ - @Test - public void testExtraSessionsNotCreated() throws Exception { - final Callback c = (request, response) -> { - // Do nothing with sessions - response.getWriter().write("done"); - }; - - final WebResponse response = setCallbackAndExecuteGet(c); - assertEquals("done", response.getText()); - assertEquals("The region should be empty", 0, region.size()); - } - - /* - * Test for issue #46 lastAccessedTime is not updated at the start of the request, but only at the - * end. - */ - @Test - public void testLastAccessedTime() throws Exception { - final Callback c = (request, response) -> { - final HttpSession session = request.getSession(); - // Hack to expose the session to our test context - session.getServletContext().setAttribute("session", session); - session.setAttribute("lastAccessTime", session.getLastAccessedTime()); - try { - Thread.sleep(100); - } catch (final InterruptedException ex) { - // Ignore. - } - session.setAttribute("somethingElse", 1); - request.getSession(); - response.getWriter().write("done"); - }; - servlet.getServletContext().setAttribute("callback", c); - - final WebConversation wc = new WebConversation(); - final WebRequest req = new GetMethodWebRequest(String.format("http://localhost:%d/test", port)); - - // Execute the callback - req.setParameter("cmd", QueryCommand.CALLBACK.name()); - req.setParameter("param", "callback"); - wc.getResponse(req); - - final HttpSession session = (HttpSession) servlet.getServletContext().getAttribute("session"); - final Long lastAccess = (Long) session.getAttribute("lastAccessTime"); - - assertTrue( - "Last access time not set correctly: " + lastAccess + " not <= " - + session.getLastAccessedTime(), - lastAccess <= session.getLastAccessedTime()); - } -} diff --git a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/EmbeddedTomcat.java b/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/EmbeddedTomcat.java deleted file mode 100644 index ec1e0a8360f4..000000000000 --- a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/EmbeddedTomcat.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session; - -import java.io.File; -import java.net.InetAddress; -import java.net.MalformedURLException; - -import org.apache.catalina.Context; -import org.apache.catalina.Engine; -import org.apache.catalina.Host; -import org.apache.catalina.LifecycleException; -import org.apache.catalina.connector.Connector; -import org.apache.catalina.core.StandardEngine; -import org.apache.catalina.core.StandardService; -import org.apache.catalina.core.StandardWrapper; -import org.apache.catalina.loader.WebappLoader; -import org.apache.catalina.realm.MemoryRealm; -import org.apache.catalina.startup.Embedded; -import org.apache.catalina.valves.ValveBase; -import org.apache.juli.logging.Log; -import org.apache.juli.logging.LogFactory; - -import org.apache.geode.modules.session.catalina.JvmRouteBinderValve; - -public class EmbeddedTomcat { - private final Log logger = LogFactory.getLog(getClass()); - private final int port; - private final Embedded container; - private final Context rootContext; - - EmbeddedTomcat(int port, String jvmRoute) throws MalformedURLException { - this.port = port; - - // create server - container = new Embedded(); - - // The directory to create the Tomcat server configuration under. - container.setCatalinaHome("tomcat"); - container.setRealm(new MemoryRealm()); - - // create webapp loader - WebappLoader loader = new WebappLoader(getClass().getClassLoader()); - // The classes directory for the web application being run. - loader.addRepository(new File("target/classes").toURI().toURL().toString()); - - // The web resources directory for the web application being run. - String webappDir = ""; - rootContext = container.createContext("", webappDir); - rootContext.setLoader(loader); - rootContext.setReloadable(true); - - // Otherwise we get NPE when instantiating servlets - rootContext.setIgnoreAnnotations(true); - - // create host - Host localHost = container.createHost("127.0.0.1", new File("").getAbsolutePath()); - localHost.addChild(rootContext); - - localHost.setDeployOnStartup(true); - - // create engine - Engine engine = container.createEngine(); - engine.setName("localEngine"); - engine.addChild(localHost); - engine.setDefaultHost(localHost.getName()); - engine.setJvmRoute(jvmRoute); - engine.setService(new StandardService()); - container.addEngine(engine); - - // create http connector - Connector httpConnector = container.createConnector((InetAddress) null, port, false); - container.addConnector(httpConnector); - container.setAwait(true); - - // Create the JVMRoute valve for session failover - ValveBase valve = new JvmRouteBinderValve(); - ((StandardEngine) engine).addValve(valve); - } - - /** - * Starts the embedded Tomcat server. - */ - void startContainer() throws LifecycleException { - // start server - container.start(); - - // add shutdown hook to stop server - Runtime.getRuntime().addShutdownHook(new Thread(this::stopContainer)); - } - - /** - * Stops the embedded Tomcat server. - */ - void stopContainer() { - try { - if (container != null) { - container.stop(); - logger.info("Stopped container"); - } - } catch (LifecycleException exception) { - logger.warn("Cannot Stop Tomcat" + exception.getMessage()); - } - } - - StandardWrapper addServlet(String path, String name, String clazz) { - StandardWrapper servlet = (StandardWrapper) rootContext.createWrapper(); - servlet.setName(name); - servlet.setServletClass(clazz); - servlet.setLoadOnStartup(1); - - rootContext.addChild(servlet); - rootContext.addServletMapping(path, name); - - servlet.setParent(rootContext); - - return servlet; - } - - Embedded getEmbedded() { - return container; - } - - Context getRootContext() { - return rootContext; - } - - public int getPort() { - return port; - } -} diff --git a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractCommitSessionValveIntegrationTest.java b/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractCommitSessionValveIntegrationTest.java index 66fcd0800828..5c1e46a3b79d 100644 --- a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractCommitSessionValveIntegrationTest.java +++ b/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractCommitSessionValveIntegrationTest.java @@ -64,8 +64,8 @@ protected void parameterizedSetUp(final RegionShortcut regionShortcut) { deltaSessionFacade = new DeltaSessionFacade(deltaSession); // Valve use the context to log messages - when(deltaSessionManager.getTheContext()).thenReturn(mock(Context.class)); - when(deltaSessionManager.getTheContext().getLogger()).thenReturn(mock(Log.class)); + when(deltaSessionManager.getContext()).thenReturn(mock(Context.class)); + when(deltaSessionManager.getContext().getLogger()).thenReturn(mock(Log.class)); } @Test diff --git a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractDeltaSessionManagerTest.java b/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractDeltaSessionManagerTest.java index c28256cbb680..3bffc49172af 100644 --- a/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractDeltaSessionManagerTest.java +++ b/extensions/geode-modules-test/src/main/java/org/apache/geode/modules/session/catalina/AbstractDeltaSessionManagerTest.java @@ -15,9 +15,8 @@ package org.apache.geode.modules.session.catalina; -import static org.apache.geode.modules.util.RegionConfiguration.DEFAULT_MAX_INACTIVE_INTERVAL; +import static java.util.concurrent.TimeUnit.MINUTES; import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; @@ -25,7 +24,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.beans.PropertyChangeEvent; import java.io.IOException; import java.util.HashSet; import java.util.Set; @@ -35,7 +33,7 @@ import org.apache.catalina.Context; import org.apache.catalina.Session; import org.apache.juli.logging.Log; -import org.junit.Test; +import org.junit.jupiter.api.Test; import org.apache.geode.cache.Cache; import org.apache.geode.cache.Region; @@ -53,7 +51,7 @@ public abstract class AbstractDeltaSessionManagerTest operatingRegion; - public void initTest() { + void initTest() { sessionCache = mock(AbstractSessionCache.class); cache = mock(GemFireCacheImpl.class); logger = mock(Log.class); @@ -64,14 +62,14 @@ public void initTest() { doReturn(sessionCache).when(manager).getSessionCache(); doReturn(logger).when(manager).getLogger(); - doReturn(context).when(manager).getTheContext(); + doReturn(context).when(manager).getContext(); doReturn(managerStats).when(manager).getStatistics(); doReturn(cacheStats).when(sessionCache).getStatistics(); doReturn(operatingRegion).when(sessionCache).getOperatingRegion(); } @Test - public void getRegionAttributesIdSetsIdFromSessionCacheWhenAttributesIdIsNull() { + void getRegionAttributesIdSetsIdFromSessionCacheWhenAttributesIdIsNull() { final String regionAttributesId = "attributesIdFromSessionCache"; doReturn(regionAttributesId).when(sessionCache).getDefaultRegionAttributesId(); @@ -82,7 +80,7 @@ public void getRegionAttributesIdSetsIdFromSessionCacheWhenAttributesIdIsNull() } @Test - public void getEnableLocalCacheSetsIdFromSessionCacheWhenEnableLocalCacheIsNull() { + void getEnableLocalCacheSetsIdFromSessionCacheWhenEnableLocalCacheIsNull() { final boolean isLocalCacheEnabled = true; doReturn(isLocalCacheEnabled).when(sessionCache).getDefaultEnableLocalCache(); @@ -93,14 +91,14 @@ public void getEnableLocalCacheSetsIdFromSessionCacheWhenEnableLocalCacheIsNull( } @Test - public void findSessionsReturnsNullWhenIdIsNull() throws IOException { + void findSessionsReturnsNullWhenIdIsNull() throws IOException { final Session session = manager.findSession(null); assertThat(session).isNull(); } @Test - public void findSessionsReturnsNullAndLogsMessageWhenContextNameIsNotValid() throws IOException { + void findSessionsReturnsNullAndLogsMessageWhenContextNameIsNotValid() throws IOException { final String sessionId = "sessionId"; final String contextName = "contextName"; final String invalidContextName = "invalidContextName"; @@ -117,7 +115,7 @@ public void findSessionsReturnsNullAndLogsMessageWhenContextNameIsNotValid() thr } @Test - public void findSessionsReturnsNullWhenIdIsNotFound() throws IOException { + void findSessionsReturnsNullWhenIdIsNotFound() throws IOException { final String sessionId = "sessionId"; when(sessionCache.getSession(sessionId)).thenReturn(null); @@ -128,7 +126,7 @@ public void findSessionsReturnsNullWhenIdIsNotFound() throws IOException { } @Test - public void findSessionsReturnsProperSessionByIdWhenIdAndContextNameIsValid() throws IOException { + void findSessionsReturnsProperSessionByIdWhenIdAndContextNameIsValid() throws IOException { final String sessionId = "sessionId"; final String contextName = "contextName"; @@ -143,7 +141,7 @@ public void findSessionsReturnsProperSessionByIdWhenIdAndContextNameIsValid() th } @Test - public void removeProperlyDestroysSessionFromSessionCacheWhenSessionIsNotExpired() { + void removeProperlyDestroysSessionFromSessionCacheWhenSessionIsNotExpired() { final DeltaSession sessionToDestroy = mock(DeltaSession.class); final String sessionId = "sessionId"; @@ -156,7 +154,7 @@ public void removeProperlyDestroysSessionFromSessionCacheWhenSessionIsNotExpired } @Test - public void removeDoesNotDestroySessionFromSessionCacheWhenSessionIsExpired() { + void removeDoesNotDestroySessionFromSessionCacheWhenSessionIsExpired() { final DeltaSession sessionToDestroy = mock(DeltaSession.class); final String sessionId = "sessionId"; @@ -169,7 +167,7 @@ public void removeDoesNotDestroySessionFromSessionCacheWhenSessionIsExpired() { } @Test - public void addPutsSessionIntoSessionCacheAndIncrementsStats() { + void addPutsSessionIntoSessionCacheAndIncrementsStats() { final DeltaSession sessionToPut = mock(DeltaSession.class); manager.add(sessionToPut); @@ -179,7 +177,7 @@ public void addPutsSessionIntoSessionCacheAndIncrementsStats() { } @Test - public void listIdsListsAllPresentIds() { + void listIdsListsAllPresentIds() { final Set ids = new HashSet<>(); ids.add("id1"); ids.add("id2"); @@ -195,7 +193,7 @@ public void listIdsListsAllPresentIds() { } @Test - public void successfulUnloadWithClientServerSessionCachePerformsLocalDestroy() + void successfulUnloadWithClientServerSessionCachePerformsLocalDestroy() throws IOException { when(sessionCache.getCache()).thenReturn(cache); when(context.getPath()).thenReturn("contextPath"); @@ -207,51 +205,17 @@ public void successfulUnloadWithClientServerSessionCachePerformsLocalDestroy() } @Test - public void propertyChangeSetsMaxInactiveIntervalWithCorrectPropertyNameAndValue() { - final String propertyName = "sessionTimeout"; - final PropertyChangeEvent event = mock(PropertyChangeEvent.class); - final Context eventContext = mock(Context.class); - final Integer newValue = 1; + void getMaxInactiveIntervalReturnsNegativeOneWhenSessionTimeoutIsNegativeOne() { + when(context.getSessionTimeout()).thenReturn(-1); - when(event.getSource()).thenReturn(eventContext); - when(event.getPropertyName()).thenReturn(propertyName); - when(event.getNewValue()).thenReturn(newValue); - - manager.propertyChange(event); - - verify(manager).setMaxInactiveInterval(newValue * 60); - } - - @Test - public void propertyChangeDoesNotSetMaxInactiveIntervalWithIncorrectPropertyName() { - final String propertyName = "wrong name"; - final PropertyChangeEvent event = mock(PropertyChangeEvent.class); - final Context eventContext = mock(Context.class); - - when(event.getSource()).thenReturn(eventContext); - when(event.getPropertyName()).thenReturn(propertyName); - - manager.propertyChange(event); - - verify(manager, times(0)).setMaxInactiveInterval(anyInt()); + assertThat(manager.getMaxInactiveInterval()).isEqualTo(-1); } @Test - public void propertyChangeDoesNotSetNewMaxInactiveIntervalWithCorrectPropertyNameAndInvalidPropertyValue() { - final String propertyName = "sessionTimeout"; - final PropertyChangeEvent event = mock(PropertyChangeEvent.class); - final Context eventContext = mock(Context.class); - final Integer newValue = -2; - final Integer oldValue = DEFAULT_MAX_INACTIVE_INTERVAL; - - when(event.getSource()).thenReturn(eventContext); - when(event.getPropertyName()).thenReturn(propertyName); - when(event.getNewValue()).thenReturn(newValue); - when(event.getOldValue()).thenReturn(oldValue); - - manager.propertyChange(event); + void getMaxInactiveIntervalReturnsSeconds() { + when(context.getSessionTimeout()).thenReturn(20); - verify(manager).setMaxInactiveInterval(oldValue); + assertThat(manager.getMaxInactiveInterval()).isEqualTo(MINUTES.toSeconds(20)); } } diff --git a/extensions/geode-modules-tomcat7/build.gradle b/extensions/geode-modules-tomcat7/build.gradle deleted file mode 100644 index e1e75b52a10f..000000000000 --- a/extensions/geode-modules-tomcat7/build.gradle +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.geode.gradle.plugins.DependencyConstraints - -plugins { - id 'standard-subproject-configuration' - id 'warnings' -} - -evaluationDependsOn(":geode-core") - -dependencies { - //main - implementation(platform(project(':boms:geode-all-bom'))) - - api(project(':geode-core')) - api(project(':extensions:geode-modules')) - - compileOnly(platform(project(':boms:geode-all-bom'))) - compileOnly('org.apache.tomcat:tomcat-catalina:' + DependencyConstraints.get('tomcat7.version')) - compileOnly('org.apache.tomcat:tomcat-coyote:' + DependencyConstraints.get('tomcat7.version')) - - - // test - testImplementation(project(':extensions:geode-modules-test')) - testImplementation('junit:junit') - testImplementation('org.assertj:assertj-core') - testImplementation('org.mockito:mockito-core') - testImplementation('org.apache.tomcat:tomcat-catalina:' + DependencyConstraints.get('tomcat7.version')) - testImplementation('org.apache.tomcat:tomcat-coyote:' + DependencyConstraints.get('tomcat7.version')) - - - // integrationTest - integrationTestImplementation(project(':extensions:geode-modules-test')) - integrationTestImplementation(project(':geode-dunit')) - integrationTestImplementation('org.httpunit:httpunit') - integrationTestImplementation('org.apache.tomcat:tomcat-coyote:' + DependencyConstraints.get('tomcat7.version')) - integrationTestImplementation('org.apache.tomcat:tomcat-catalina:' + DependencyConstraints.get('tomcat7.version')) -} - -sonarqube { - skipProject = true -} diff --git a/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/Tomcat7SessionsTest.java b/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/Tomcat7SessionsTest.java deleted file mode 100644 index f37eedd8593b..000000000000 --- a/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/Tomcat7SessionsTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session; - -import static org.junit.Assert.assertEquals; - -import com.meterware.httpunit.GetMethodWebRequest; -import com.meterware.httpunit.WebConversation; -import com.meterware.httpunit.WebRequest; -import com.meterware.httpunit.WebResponse; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import org.apache.geode.modules.session.catalina.Tomcat7DeltaSessionManager; -import org.apache.geode.test.junit.categories.HttpSessionTest; - -@Category({HttpSessionTest.class}) -public class Tomcat7SessionsTest extends AbstractSessionsTest { - - // Set up the session manager we need - @BeforeClass - public static void setupClass() throws Exception { - setupServer(new Tomcat7DeltaSessionManager()); - } - - /** - * Test setting the session expiration - */ - @Test - @Override - public void testSessionExpiration1() throws Exception { - // TestSessions only live for a minute - sessionManager.getTheContext().setSessionTimeout(1); - - final String key = "value_testSessionExpiration1"; - final String value = "Foo"; - - final WebConversation wc = new WebConversation(); - final WebRequest req = new GetMethodWebRequest(String.format("http://localhost:%d/test", port)); - - // Set an attribute - req.setParameter("cmd", QueryCommand.SET.name()); - req.setParameter("param", key); - req.setParameter("value", value); - WebResponse response = wc.getResponse(req); - - // Sleep a while - Thread.sleep(65000); - - // The attribute should not be accessible now... - req.setParameter("cmd", QueryCommand.GET.name()); - req.setParameter("param", key); - response = wc.getResponse(req); - - assertEquals("", response.getText()); - } -} diff --git a/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/catalina/CommitSessionValveIntegrationTest.java b/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/catalina/CommitSessionValveIntegrationTest.java deleted file mode 100644 index b64e86219071..000000000000 --- a/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/catalina/CommitSessionValveIntegrationTest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; - -import org.apache.catalina.Context; -import org.apache.catalina.connector.Connector; -import org.apache.catalina.connector.Request; -import org.apache.catalina.connector.Response; -import org.apache.coyote.OutputBuffer; -import org.apache.juli.logging.Log; -import org.junit.Before; - -public class CommitSessionValveIntegrationTest - extends AbstractCommitSessionValveIntegrationTest { - - @Before - public void setUp() { - final Context context = mock(Context.class); - doReturn(mock(Log.class)).when(context).getLogger(); - - request = mock(Request.class); - doReturn(context).when(request).getContext(); - - final OutputBuffer outputBuffer = mock(OutputBuffer.class); - - final org.apache.coyote.Response coyoteResponse = new org.apache.coyote.Response(); - coyoteResponse.setOutputBuffer(outputBuffer); - - response = new Response(); - response.setConnector(mock(Connector.class)); - response.setRequest(request); - response.setCoyoteResponse(coyoteResponse); - } - - - @Override - protected Tomcat7CommitSessionValve createCommitSessionValve() { - return new Tomcat7CommitSessionValve(); - } - -} diff --git a/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/catalina/DeltaSession7Test.java b/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/catalina/DeltaSession7Test.java deleted file mode 100644 index 7af5a91c3e15..000000000000 --- a/extensions/geode-modules-tomcat7/src/integrationTest/java/org/apache/geode/modules/session/catalina/DeltaSession7Test.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session.catalina; - -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - - - -public class DeltaSession7Test - extends AbstractDeltaSessionIntegrationTest { - - public DeltaSession7Test() { - super(mock(Tomcat7DeltaSessionManager.class)); - } - - @Override - public void before() { - super.before(); - when(manager.getContainer()).thenReturn(context); - } - - @Override - protected DeltaSession7 newSession(Tomcat7DeltaSessionManager manager) { - return new DeltaSession7(manager); - } - -} diff --git a/extensions/geode-modules-tomcat7/src/integrationTest/resources/tomcat/conf/tomcat-users.xml b/extensions/geode-modules-tomcat7/src/integrationTest/resources/tomcat/conf/tomcat-users.xml deleted file mode 100644 index 6c9f21730f15..000000000000 --- a/extensions/geode-modules-tomcat7/src/integrationTest/resources/tomcat/conf/tomcat-users.xml +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/extensions/geode-modules-tomcat7/src/integrationTest/resources/tomcat/logs/.gitkeep b/extensions/geode-modules-tomcat7/src/integrationTest/resources/tomcat/logs/.gitkeep deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/extensions/geode-modules-tomcat7/src/integrationTest/resources/tomcat/temp/.gitkeep b/extensions/geode-modules-tomcat7/src/integrationTest/resources/tomcat/temp/.gitkeep deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java b/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java deleted file mode 100644 index 1371e121e5c8..000000000000 --- a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession7.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session.catalina; - -import org.apache.catalina.Manager; - -@SuppressWarnings("serial") -public class DeltaSession7 extends DeltaSession { - - /** - * Construct a new Session associated with no Manager. The - * Manager will be assigned later using {@link #setOwner(Object)}. - */ - @SuppressWarnings("unused") - public DeltaSession7() { - super(); - } - - /** - * Construct a new Session associated with the specified Manager. - * - * @param manager The manager with which this Session is associated - */ - DeltaSession7(Manager manager) { - super(manager); - } -} diff --git a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionOutputBuffer.java b/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionOutputBuffer.java deleted file mode 100644 index fcf01b2e3e5a..000000000000 --- a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionOutputBuffer.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - -import java.io.IOException; - -import org.apache.coyote.OutputBuffer; -import org.apache.coyote.Response; -import org.apache.tomcat.util.buf.ByteChunk; - -/** - * Delegating {@link OutputBuffer} that commits sessions on write through. Output data is buffered - * ahead of this object and flushed through this interface when full or explicitly flushed. - */ -class Tomcat7CommitSessionOutputBuffer implements OutputBuffer { - - private final SessionCommitter sessionCommitter; - private final OutputBuffer delegate; - - public Tomcat7CommitSessionOutputBuffer(final SessionCommitter sessionCommitter, - final OutputBuffer delegate) { - this.sessionCommitter = sessionCommitter; - this.delegate = delegate; - } - - @Override - public int doWrite(final ByteChunk chunk, final Response response) throws IOException { - sessionCommitter.commit(); - return delegate.doWrite(chunk, response); - } - - @Override - public long getBytesWritten() { - return delegate.getBytesWritten(); - } - - OutputBuffer getDelegate() { - return delegate; - } -} diff --git a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionValve.java b/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionValve.java deleted file mode 100644 index f6a483973f45..000000000000 --- a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionValve.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - -import java.lang.reflect.Field; - -import org.apache.catalina.connector.Request; -import org.apache.catalina.connector.Response; -import org.apache.coyote.OutputBuffer; - -public class Tomcat7CommitSessionValve - extends AbstractCommitSessionValve { - - private static final Field outputBufferField; - - static { - try { - outputBufferField = org.apache.coyote.Response.class.getDeclaredField("outputBuffer"); - outputBufferField.setAccessible(true); - } catch (final NoSuchFieldException e) { - throw new IllegalStateException(e); - } - } - - @Override - Response wrapResponse(final Response response) { - final org.apache.coyote.Response coyoteResponse = response.getCoyoteResponse(); - final OutputBuffer delegateOutputBuffer = getOutputBuffer(coyoteResponse); - if (!(delegateOutputBuffer instanceof Tomcat7CommitSessionOutputBuffer)) { - final Request request = response.getRequest(); - final OutputBuffer sessionCommitOutputBuffer = - new Tomcat7CommitSessionOutputBuffer(() -> commitSession(request), delegateOutputBuffer); - coyoteResponse.setOutputBuffer(sessionCommitOutputBuffer); - } - return response; - } - - static OutputBuffer getOutputBuffer(final org.apache.coyote.Response coyoteResponse) { - try { - return (OutputBuffer) outputBufferField.get(coyoteResponse); - } catch (final IllegalAccessException e) { - throw new IllegalStateException(e); - } - } -} diff --git a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7DeltaSessionManager.java b/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7DeltaSessionManager.java deleted file mode 100644 index ec2e00db9bfb..000000000000 --- a/extensions/geode-modules-tomcat7/src/main/java/org/apache/geode/modules/session/catalina/Tomcat7DeltaSessionManager.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session.catalina; - -import java.io.IOException; - -import org.apache.catalina.LifecycleException; -import org.apache.catalina.LifecycleListener; -import org.apache.catalina.LifecycleState; -import org.apache.catalina.session.StandardSession; - -public class Tomcat7DeltaSessionManager extends DeltaSessionManager { - - /** - * The LifecycleSupport for this component. - */ - @SuppressWarnings("deprecation") - protected org.apache.catalina.util.LifecycleSupport lifecycle = - new org.apache.catalina.util.LifecycleSupport(this); - - /** - * Prepare for the beginning of active use of the public methods of this component. This method - * should be called after configure(), and before any of the public methods of the - * component are utilized. - * - * @throws LifecycleException if this component detects a fatal error that prevents this component - * from being used - */ - @Override - public void startInternal() throws LifecycleException { - startInternalBase(); - if (getLogger().isDebugEnabled()) { - getLogger().debug(this + ": Starting"); - } - if (started.get()) { - return; - } - - lifecycle.fireLifecycleEvent(START_EVENT, null); - - // Register our various valves - registerJvmRouteBinderValve(); - - if (isCommitValveEnabled()) { - registerCommitSessionValve(); - } - - // Initialize the appropriate session cache interface - initializeSessionCache(); - - try { - load(); - } catch (ClassNotFoundException | IOException e) { - throw new LifecycleException("Exception starting manager", e); - } - - // Create the timer and schedule tasks - scheduleTimerTasks(); - - started.set(true); - setLifecycleState(LifecycleState.STARTING); - } - - void setLifecycleState(LifecycleState newState) throws LifecycleException { - setState(newState); - } - - void startInternalBase() throws LifecycleException { - super.startInternal(); - } - - /** - * Gracefully terminate the active use of the public methods of this component. This method should - * be the last one called on a given instance of this component. - * - * @throws LifecycleException if this component detects a fatal error that needs to be reported - */ - @Override - public void stopInternal() throws LifecycleException { - stopInternalBase(); - if (getLogger().isDebugEnabled()) { - getLogger().debug(this + ": Stopping"); - } - - try { - unload(); - } catch (IOException e) { - getLogger().error("Unable to unload sessions", e); - } - - started.set(false); - lifecycle.fireLifecycleEvent(STOP_EVENT, null); - - // StandardManager expires all Sessions here. - // All Sessions are not known by this Manager. - - super.destroyInternal(); - - // Clear any sessions to be touched - getSessionsToTouch().clear(); - - // Cancel the timer - cancelTimer(); - - // Unregister the JVM route valve - unregisterJvmRouteBinderValve(); - - if (isCommitValveEnabled()) { - unregisterCommitSessionValve(); - } - - setLifecycleState(LifecycleState.STOPPING); - } - - void stopInternalBase() throws LifecycleException { - super.stopInternal(); - } - - void destroyInternalBase() throws LifecycleException { - super.destroyInternal(); - } - - /** - * Add a lifecycle event listener to this component. - * - * @param listener The listener to add - */ - @Override - public void addLifecycleListener(LifecycleListener listener) { - lifecycle.addLifecycleListener(listener); - } - - /** - * Get the lifecycle listeners associated with this lifecycle. If this Lifecycle has no listeners - * registered, a zero-length array is returned. - */ - @Override - public LifecycleListener[] findLifecycleListeners() { - return lifecycle.findLifecycleListeners(); - } - - /** - * Remove a lifecycle event listener from this component. - * - * @param listener The listener to remove - */ - @Override - public void removeLifecycleListener(LifecycleListener listener) { - lifecycle.removeLifecycleListener(listener); - } - - @Override - protected StandardSession getNewSession() { - return new DeltaSession7(this); - } - - @Override - protected Tomcat7CommitSessionValve createCommitSessionValve() { - return new Tomcat7CommitSessionValve(); - } - -} diff --git a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/DeltaSession7Test.java b/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/DeltaSession7Test.java deleted file mode 100644 index dd53c9c99b25..000000000000 --- a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/DeltaSession7Test.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.when; - -import java.io.IOException; - -import javax.servlet.http.HttpSessionAttributeListener; -import javax.servlet.http.HttpSessionBindingEvent; - -import org.apache.catalina.Context; -import org.apache.catalina.Manager; -import org.apache.juli.logging.Log; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; - -import org.apache.geode.internal.util.BlobHelper; - -public class DeltaSession7Test extends AbstractDeltaSessionTest { - final HttpSessionAttributeListener listener = mock(HttpSessionAttributeListener.class); - - @Before - @Override - public void setup() { - super.setup(); - - final Context context = mock(Context.class); - when(manager.getContainer()).thenReturn(context); - when(context.getApplicationEventListeners()).thenReturn(new Object[] {listener}); - when(context.getLogger()).thenReturn(mock(Log.class)); - } - - @Override - protected DeltaSession7 newDeltaSession(Manager manager) { - return new DeltaSession7(manager); - } - - @Test - public void serializedAttributesNotLeakedInAttributeReplaceEvent() throws IOException { - final DeltaSession7 session = spy(new DeltaSession7(manager)); - session.setValid(true); - final String name = "attribute"; - final Object value1 = "value1"; - final byte[] serializedValue1 = BlobHelper.serializeToBlob(value1); - // simulates initial deserialized state with serialized attribute values. - session.getAttributes().put(name, serializedValue1); - - final Object value2 = "value2"; - session.setAttribute(name, value2); - - final ArgumentCaptor event = - ArgumentCaptor.forClass(HttpSessionBindingEvent.class); - verify(listener).attributeReplaced(event.capture()); - verifyNoMoreInteractions(listener); - assertThat(event.getValue().getValue()).isEqualTo(value1); - } - - @Test - public void serializedAttributesNotLeakedInAttributeRemovedEvent() throws IOException { - final DeltaSession7 session = spy(new DeltaSession7(manager)); - session.setValid(true); - final String name = "attribute"; - final Object value1 = "value1"; - final byte[] serializedValue1 = BlobHelper.serializeToBlob(value1); - // simulates initial deserialized state with serialized attribute values. - session.getAttributes().put(name, serializedValue1); - - session.removeAttribute(name); - - final ArgumentCaptor event = - ArgumentCaptor.forClass(HttpSessionBindingEvent.class); - verify(listener).attributeRemoved(event.capture()); - verifyNoMoreInteractions(listener); - assertThat(event.getValue().getValue()).isEqualTo(value1); - } - - @Test - public void serializedAttributesLeakedInAttributeReplaceEventWhenPreferDeserializedFormFalse() - throws IOException { - setPreferDeserializedFormFalse(); - - final DeltaSession7 session = spy(new DeltaSession7(manager)); - session.setValid(true); - final String name = "attribute"; - final Object value1 = "value1"; - final byte[] serializedValue1 = BlobHelper.serializeToBlob(value1); - // simulates initial deserialized state with serialized attribute values. - session.getAttributes().put(name, serializedValue1); - - final Object value2 = "value2"; - session.setAttribute(name, value2); - - final ArgumentCaptor event = - ArgumentCaptor.forClass(HttpSessionBindingEvent.class); - verify(listener).attributeReplaced(event.capture()); - verifyNoMoreInteractions(listener); - assertThat(event.getValue().getValue()).isInstanceOf(byte[].class); - } - - @Test - public void serializedAttributesLeakedInAttributeRemovedEventWhenPreferDeserializedFormFalse() - throws IOException { - setPreferDeserializedFormFalse(); - - final DeltaSession7 session = spy(new DeltaSession7(manager)); - session.setValid(true); - final String name = "attribute"; - final Object value1 = "value1"; - final byte[] serializedValue1 = BlobHelper.serializeToBlob(value1); - // simulates initial deserialized state with serialized attribute values. - session.getAttributes().put(name, serializedValue1); - - session.removeAttribute(name); - - final ArgumentCaptor event = - ArgumentCaptor.forClass(HttpSessionBindingEvent.class); - verify(listener).attributeRemoved(event.capture()); - verifyNoMoreInteractions(listener); - assertThat(event.getValue().getValue()).isInstanceOf(byte[].class); - } - - @SuppressWarnings("deprecation") - protected void setPreferDeserializedFormFalse() { - when(manager.getPreferDeserializedForm()).thenReturn(false); - } - -} diff --git a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionOutputBufferTest.java b/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionOutputBufferTest.java deleted file mode 100644 index 20facaf916a2..000000000000 --- a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionOutputBufferTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.io.IOException; - -import org.apache.coyote.OutputBuffer; -import org.apache.coyote.Response; -import org.apache.tomcat.util.buf.ByteChunk; -import org.junit.Test; -import org.mockito.InOrder; - -public class Tomcat7CommitSessionOutputBufferTest { - - final SessionCommitter sessionCommitter = mock(SessionCommitter.class); - final OutputBuffer delegate = mock(OutputBuffer.class); - - final Tomcat7CommitSessionOutputBuffer commitSesssionOutputBuffer = - new Tomcat7CommitSessionOutputBuffer(sessionCommitter, delegate); - - @Test - public void doWrite() throws IOException { - final ByteChunk byteChunk = new ByteChunk(); - final Response response = new Response(); - - commitSesssionOutputBuffer.doWrite(byteChunk, response); - - final InOrder inOrder = inOrder(sessionCommitter, delegate); - inOrder.verify(sessionCommitter).commit(); - inOrder.verify(delegate).doWrite(byteChunk, response); - inOrder.verifyNoMoreInteractions(); - } - - - @Test - public void getBytesWritten() { - when(delegate.getBytesWritten()).thenReturn(42L); - - assertThat(commitSesssionOutputBuffer.getBytesWritten()).isEqualTo(42L); - - final InOrder inOrder = inOrder(sessionCommitter, delegate); - inOrder.verify(delegate).getBytesWritten(); - inOrder.verifyNoMoreInteractions(); - } -} diff --git a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionValveTest.java b/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionValveTest.java deleted file mode 100644 index c9be9b26fded..000000000000 --- a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionValveTest.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - -import static org.apache.geode.modules.session.catalina.Tomcat7CommitSessionValve.getOutputBuffer; -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.inOrder; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.reset; - -import java.io.IOException; -import java.io.OutputStream; - -import org.apache.catalina.Context; -import org.apache.catalina.connector.Connector; -import org.apache.catalina.connector.Request; -import org.apache.catalina.connector.Response; -import org.apache.coyote.OutputBuffer; -import org.apache.tomcat.util.buf.ByteChunk; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.InOrder; - - -public class Tomcat7CommitSessionValveTest { - - private final Tomcat7CommitSessionValve valve = new Tomcat7CommitSessionValve(); - private final OutputBuffer outputBuffer = mock(OutputBuffer.class); - private Response response; - private org.apache.coyote.Response coyoteResponse; - - @Before - public void before() { - final Connector connector = mock(Connector.class); - - final Context context = mock(Context.class); - - final Request request = mock(Request.class); - doReturn(context).when(request).getContext(); - - coyoteResponse = new org.apache.coyote.Response(); - coyoteResponse.setOutputBuffer(outputBuffer); - - response = new Response(); - response.setConnector(connector); - response.setRequest(request); - response.setCoyoteResponse(coyoteResponse); - } - - @Test - public void wrappedOutputBufferForwardsToDelegate() throws IOException { - wrappedOutputBufferForwardsToDelegate(new byte[] {'a', 'b', 'c'}); - } - - @Test - public void recycledResponseObjectDoesNotWrapAlreadyWrappedOutputBuffer() throws IOException { - wrappedOutputBufferForwardsToDelegate(new byte[] {'a', 'b', 'c'}); - response.recycle(); - reset(outputBuffer); - wrappedOutputBufferForwardsToDelegate(new byte[] {'d', 'e', 'f'}); - } - - private void wrappedOutputBufferForwardsToDelegate(final byte[] bytes) throws IOException { - final OutputStream outputStream = - valve.wrapResponse(response).getResponse().getOutputStream(); - outputStream.write(bytes); - outputStream.flush(); - - final ArgumentCaptor byteChunk = ArgumentCaptor.forClass(ByteChunk.class); - - final InOrder inOrder = inOrder(outputBuffer); - inOrder.verify(outputBuffer).doWrite(byteChunk.capture(), any()); - inOrder.verifyNoMoreInteractions(); - - final OutputBuffer wrappedOutputBuffer = getOutputBuffer(coyoteResponse); - assertThat(wrappedOutputBuffer).isInstanceOf(Tomcat7CommitSessionOutputBuffer.class); - assertThat(((Tomcat7CommitSessionOutputBuffer) wrappedOutputBuffer).getDelegate()) - .isNotInstanceOf(Tomcat7CommitSessionOutputBuffer.class); - - assertThat(byteChunk.getValue().getBytes()).contains(bytes); - } -} diff --git a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7DeltaSessionManagerTest.java b/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7DeltaSessionManagerTest.java deleted file mode 100644 index 2d900bda902d..000000000000 --- a/extensions/geode-modules-tomcat7/src/test/java/org/apache/geode/modules/session/catalina/Tomcat7DeltaSessionManagerTest.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; - -import java.io.IOException; - -import org.apache.catalina.Context; -import org.apache.catalina.LifecycleException; -import org.apache.catalina.LifecycleState; -import org.apache.catalina.Pipeline; -import org.junit.Before; -import org.junit.Test; - -import org.apache.geode.internal.cache.GemFireCacheImpl; - -public class Tomcat7DeltaSessionManagerTest - extends AbstractDeltaSessionManagerTest { - private Pipeline pipeline; - - @Before - public void setup() { - manager = spy(new Tomcat7DeltaSessionManager()); - initTest(); - pipeline = mock(Pipeline.class); - } - - @Test - public void startInternalSucceedsInitialRun() - throws LifecycleException, IOException, ClassNotFoundException { - doNothing().when(manager).startInternalBase(); - doReturn(true).when(manager).isCommitValveEnabled(); - doReturn(cache).when(manager).getAnyCacheInstance(); - doReturn(true).when((GemFireCacheImpl) cache).isClient(); - doNothing().when(manager).initSessionCache(); - doReturn(pipeline).when(manager).getPipeline(); - - // Unit testing for load is handled in the parent DeltaSessionManagerJUnitTest class - doNothing().when(manager).load(); - - doNothing().when(manager) - .setLifecycleState(LifecycleState.STARTING); - - assertThat(manager.started).isFalse(); - manager.startInternal(); - assertThat(manager.started).isTrue(); - verify(manager).setLifecycleState(LifecycleState.STARTING); - } - - @Test - public void startInternalDoesNotReinitializeManagerOnSubsequentCalls() - throws LifecycleException, IOException, ClassNotFoundException { - doNothing().when(manager).startInternalBase(); - doReturn(true).when(manager).isCommitValveEnabled(); - doReturn(cache).when(manager).getAnyCacheInstance(); - doReturn(true).when((GemFireCacheImpl) cache).isClient(); - doNothing().when(manager).initSessionCache(); - doReturn(pipeline).when(manager).getPipeline(); - - // Unit testing for load is handled in the parent DeltaSessionManagerJUnitTest class - doNothing().when(manager).load(); - - doNothing().when(manager) - .setLifecycleState(LifecycleState.STARTING); - - assertThat(manager.started).isFalse(); - manager.startInternal(); - - // Verify that various initialization actions were performed - assertThat(manager.started).isTrue(); - verify(manager).initializeSessionCache(); - verify(manager).setLifecycleState(LifecycleState.STARTING); - - // Rerun startInternal - manager.startInternal(); - - // Verify that the initialization actions were still only performed one time - verify(manager).initializeSessionCache(); - verify(manager).setLifecycleState(LifecycleState.STARTING); - } - - @Test - public void stopInternal() throws LifecycleException, IOException { - doNothing().when(manager).startInternalBase(); - doNothing().when(manager).destroyInternalBase(); - doReturn(true).when(manager).isCommitValveEnabled(); - - // Unit testing for unload is handled in the parent DeltaSessionManagerJUnitTest class - doNothing().when(manager).unload(); - - doNothing().when(manager) - .setLifecycleState(LifecycleState.STOPPING); - - manager.stopInternal(); - - assertThat(manager.started).isFalse(); - verify(manager).setLifecycleState(LifecycleState.STOPPING); - } - - @Test - public void setContainerSetsProperContainerAndMaxInactiveInterval() { - final Context container = mock(Context.class); - final int containerMaxInactiveInterval = 3; - - doReturn(containerMaxInactiveInterval).when(container).getSessionTimeout(); - - manager.setContainer(container); - verify(manager).setMaxInactiveInterval(containerMaxInactiveInterval * 60); - } -} diff --git a/extensions/geode-modules-tomcat8/build.gradle b/extensions/geode-modules-tomcat8/build.gradle index a24651dd4469..2564d754870a 100644 --- a/extensions/geode-modules-tomcat8/build.gradle +++ b/extensions/geode-modules-tomcat8/build.gradle @@ -37,12 +37,17 @@ dependencies { // test + testCompileOnly(platform(project(':boms:geode-all-bom'))) + testCompileOnly('junit:junit') + testImplementation(project(':extensions:geode-modules-test')) - testImplementation('junit:junit') + testImplementation('org.junit.jupiter:junit-jupiter-api') testImplementation('org.assertj:assertj-core') testImplementation('org.mockito:mockito-core') testImplementation('org.apache.tomcat:tomcat-catalina:' + DependencyConstraints.get('tomcat8.version')) + testRuntimeOnly('org.junit.vintage:junit-vintage-engine') + testRuntimeOnly('org.junit.jupiter:junit-jupiter-engine') // integrationTest integrationTestImplementation(project(':extensions:geode-modules-test')) diff --git a/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/TestSessionsTomcat8Base.java b/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/TestSessionsTomcat8Base.java index e7cec09ebf4a..20c851ee2713 100644 --- a/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/TestSessionsTomcat8Base.java +++ b/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/TestSessionsTomcat8Base.java @@ -14,9 +14,9 @@ */ package org.apache.geode.modules.session; +import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; -import java.beans.PropertyChangeEvent; import java.io.PrintWriter; import java.io.Serializable; @@ -34,7 +34,7 @@ import org.apache.geode.cache.Region; import org.apache.geode.logging.internal.log4j.api.LogService; -import org.apache.geode.modules.session.catalina.DeltaSessionManager; +import org.apache.geode.modules.session.catalina.Tomcat8DeltaSessionManager; import org.apache.geode.test.dunit.rules.CacheRule; import org.apache.geode.test.dunit.rules.DistributedRule; @@ -51,7 +51,7 @@ public abstract class TestSessionsTomcat8Base implements Serializable { EmbeddedTomcat8 server; StandardWrapper servlet; Region region; - DeltaSessionManager sessionManager; + Tomcat8DeltaSessionManager sessionManager; public void basicConnectivityCheck() throws Exception { WebConversation wc = new WebConversation(); @@ -177,8 +177,8 @@ public void testInvalidate() throws Exception { */ @Test public void testSessionExpiration1() throws Exception { - // TestSessions only live for a second - sessionManager.setMaxInactiveInterval(1); + // TestSessions only live for a minute + sessionManager.getContext().setSessionTimeout(1); String key = "value_testSessionExpiration1"; String value = "Foo"; @@ -193,7 +193,7 @@ public void testSessionExpiration1() throws Exception { wc.getResponse(req); // Sleep a while - Thread.sleep(65000); + SECONDS.sleep(65); // The attribute should not be accessible now... req.setParameter("cmd", QueryCommand.GET.name()); @@ -203,20 +203,6 @@ public void testSessionExpiration1() throws Exception { assertThat(response.getText()).isEmpty(); } - /** - * Test setting the session expiration via a property change as would happen under normal - * deployment conditions. - */ - @Test - public void testSessionExpiration2() { - // TestSessions only live for a minute - sessionManager - .propertyChange(new PropertyChangeEvent(server.getRootContext(), "sessionTimeout", 30, 1)); - - // Check that the value has been set to 60 seconds - assertThat(sessionManager.getMaxInactiveInterval()).isEqualTo(60); - } - /** * Test expiration of a session by the tomcat container, rather than gemfire expiration */ diff --git a/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java b/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java index 9de6885dec38..c833b50f177d 100644 --- a/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java +++ b/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsClientServerDUnitTest.java @@ -31,14 +31,11 @@ import org.apache.geode.cache.client.ClientCacheFactory; import org.apache.geode.internal.AvailablePortHelper; import org.apache.geode.modules.session.catalina.ClientServerCacheLifecycleListener; -import org.apache.geode.modules.session.catalina.DeltaSessionManager; import org.apache.geode.modules.session.catalina.Tomcat8DeltaSessionManager; import org.apache.geode.test.dunit.rules.ClusterStartupRule; import org.apache.geode.test.dunit.rules.MemberVM; import org.apache.geode.test.junit.categories.SessionTest; - - @Category(SessionTest.class) public class Tomcat8SessionsClientServerDUnitTest extends TestSessionsTomcat8Base { @@ -63,7 +60,6 @@ public void setUp() throws Exception { assertThat(port).isGreaterThan(0); server = new EmbeddedTomcat8(port, "JVM-1"); - assertThat(server).isNotNull(); ClientCacheFactory cacheFactory = new ClientCacheFactory(); assertThat(cacheFactory).isNotNull(); @@ -72,17 +68,13 @@ public void setUp() throws Exception { clientCache = cacheFactory.create(); assertThat(clientCache).isNotNull(); - DeltaSessionManager manager = new Tomcat8DeltaSessionManager(); - assertThat(manager).isNotNull(); - ClientServerCacheLifecycleListener listener = new ClientServerCacheLifecycleListener(); - assertThat(listener).isNotNull(); listener.setProperty(MCAST_PORT, "0"); listener.setProperty(LOG_LEVEL, "config"); server.addLifecycleListener(listener); - sessionManager = manager; + sessionManager = new Tomcat8DeltaSessionManager(); sessionManager.setEnableCommitValve(true); server.getRootContext().setManager(sessionManager); @@ -96,7 +88,7 @@ public void setUp() throws Exception { region = sessionManager.getSessionCache().getSessionRegion(); assertThat(region).isNotNull(); - sessionManager.getTheContext().setSessionTimeout(30); + sessionManager.getContext().setSessionTimeout(30); await().until(() -> sessionManager.getState() == LifecycleState.STARTED); basicConnectivityCheck(); diff --git a/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsDUnitTest.java b/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsDUnitTest.java index 67db3227c1ed..584463941f97 100644 --- a/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsDUnitTest.java +++ b/extensions/geode-modules-tomcat8/src/distributedTest/java/org/apache/geode/modules/session/Tomcat8SessionsDUnitTest.java @@ -51,7 +51,7 @@ public void setUp() throws Exception { // Can only retrieve the region once the container has started up (& the cache has started too). region = sessionManager.getSessionCache().getSessionRegion(); - sessionManager.getTheContext().setSessionTimeout(30); + sessionManager.getContext().setSessionTimeout(30); region.clear(); basicConnectivityCheck(); } diff --git a/extensions/geode-modules-tomcat8/src/main/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManager.java b/extensions/geode-modules-tomcat8/src/main/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManager.java index 520846403832..36eab0b435c5 100644 --- a/extensions/geode-modules-tomcat8/src/main/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManager.java +++ b/extensions/geode-modules-tomcat8/src/main/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManager.java @@ -17,7 +17,6 @@ import java.io.IOException; -import org.apache.catalina.Context; import org.apache.catalina.LifecycleException; import org.apache.catalina.LifecycleState; import org.apache.catalina.Pipeline; @@ -128,14 +127,9 @@ void destroyInternalBase() throws LifecycleException { super.destroyInternal(); } - @Override - public int getMaxInactiveInterval() { - return getContext().getSessionTimeout(); - } - @Override protected Pipeline getPipeline() { - return getTheContext().getPipeline(); + return getContext().getPipeline(); } @Override @@ -143,16 +137,6 @@ protected Tomcat8CommitSessionValve createCommitSessionValve() { return new Tomcat8CommitSessionValve(); } - @Override - public Context getTheContext() { - return getContext(); - } - - @Override - public void setMaxInactiveInterval(final int interval) { - getContext().setSessionTimeout(interval); - } - @Override protected StandardSession getNewSession() { return new DeltaSession8(this); diff --git a/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManagerTest.java b/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManagerTest.java index 9741af87b474..34ab05a0a0cb 100644 --- a/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManagerTest.java +++ b/extensions/geode-modules-tomcat8/src/test/java/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManagerTest.java @@ -15,7 +15,6 @@ package org.apache.geode.modules.session.catalina; - import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -28,8 +27,8 @@ import org.apache.catalina.LifecycleException; import org.apache.catalina.LifecycleState; import org.apache.catalina.Pipeline; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.geode.internal.cache.GemFireCacheImpl; @@ -37,8 +36,8 @@ public class Tomcat8DeltaSessionManagerTest extends AbstractDeltaSessionManagerTest { private Pipeline pipeline; - @Before - public void setup() { + @BeforeEach + void setup() { manager = spy(new Tomcat8DeltaSessionManager()); initTest(); pipeline = mock(Pipeline.class); @@ -46,7 +45,7 @@ public void setup() { } @Test - public void startInternalSucceedsInitialRun() + void startInternalSucceedsInitialRun() throws LifecycleException, IOException, ClassNotFoundException { doNothing().when(manager).startInternalBase(); doReturn(true).when(manager).isCommitValveEnabled(); @@ -68,7 +67,7 @@ public void startInternalSucceedsInitialRun() } @Test - public void startInternalDoesNotReinitializeManagerOnSubsequentCalls() + void startInternalDoesNotReinitializeManagerOnSubsequentCalls() throws LifecycleException, IOException, ClassNotFoundException { doNothing().when(manager).startInternalBase(); doReturn(true).when(manager).isCommitValveEnabled(); @@ -100,7 +99,7 @@ public void startInternalDoesNotReinitializeManagerOnSubsequentCalls() } @Test - public void stopInternal() throws LifecycleException, IOException { + void stopInternal() throws LifecycleException, IOException { doNothing().when(manager).startInternalBase(); doNothing().when(manager).destroyInternalBase(); doReturn(true).when(manager).isCommitValveEnabled(); diff --git a/extensions/geode-modules-tomcat8/src/test/resources/expected-pom.xml b/extensions/geode-modules-tomcat8/src/test/resources/expected-pom.xml index 5819c519f638..1f5188219495 100644 --- a/extensions/geode-modules-tomcat8/src/test/resources/expected-pom.xml +++ b/extensions/geode-modules-tomcat8/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-modules-tomcat8 ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,12 +47,12 @@ - org.apache.geode + com.vmware.gemfire geode-core compile - org.apache.geode + com.vmware.gemfire geode-modules compile diff --git a/extensions/geode-modules-tomcat9/build.gradle b/extensions/geode-modules-tomcat9/build.gradle index 542ba93137a4..58f04a79566f 100644 --- a/extensions/geode-modules-tomcat9/build.gradle +++ b/extensions/geode-modules-tomcat9/build.gradle @@ -37,12 +37,18 @@ dependencies { // test + testCompileOnly(platform(project(':boms:geode-all-bom'))) + testCompileOnly('junit:junit') + testImplementation(project(':extensions:geode-modules-test')) - testImplementation('junit:junit') + testImplementation('org.junit.jupiter:junit-jupiter-api') testImplementation('org.assertj:assertj-core') testImplementation('org.mockito:mockito-core') testImplementation('org.apache.tomcat:tomcat-catalina:' + DependencyConstraints.get('tomcat9.version')) + testRuntimeOnly('org.junit.vintage:junit-vintage-engine') + testRuntimeOnly('org.junit.jupiter:junit-jupiter-engine') + // integrationTest integrationTestImplementation(project(':extensions:geode-modules-test')) diff --git a/extensions/geode-modules-tomcat9/src/main/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManager.java b/extensions/geode-modules-tomcat9/src/main/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManager.java index e3ce830d60b9..5432c9204a2f 100644 --- a/extensions/geode-modules-tomcat9/src/main/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManager.java +++ b/extensions/geode-modules-tomcat9/src/main/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManager.java @@ -16,7 +16,6 @@ import java.io.IOException; -import org.apache.catalina.Context; import org.apache.catalina.LifecycleException; import org.apache.catalina.LifecycleState; import org.apache.catalina.Pipeline; @@ -127,14 +126,9 @@ void destroyInternalBase() throws LifecycleException { super.destroyInternal(); } - @Override - public int getMaxInactiveInterval() { - return getContext().getSessionTimeout(); - } - @Override protected Pipeline getPipeline() { - return getTheContext().getPipeline(); + return getContext().getPipeline(); } @Override @@ -142,16 +136,6 @@ protected Tomcat9CommitSessionValve createCommitSessionValve() { return new Tomcat9CommitSessionValve(); } - @Override - public Context getTheContext() { - return getContext(); - } - - @Override - public void setMaxInactiveInterval(final int interval) { - getContext().setSessionTimeout(interval); - } - @Override protected StandardSession getNewSession() { return new DeltaSession9(this); diff --git a/extensions/geode-modules-tomcat9/src/test/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManagerTest.java b/extensions/geode-modules-tomcat9/src/test/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManagerTest.java index 4513f781d39a..255146820cf5 100644 --- a/extensions/geode-modules-tomcat9/src/test/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManagerTest.java +++ b/extensions/geode-modules-tomcat9/src/test/java/org/apache/geode/modules/session/catalina/Tomcat9DeltaSessionManagerTest.java @@ -15,7 +15,6 @@ package org.apache.geode.modules.session.catalina; - import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; @@ -28,8 +27,8 @@ import org.apache.catalina.LifecycleException; import org.apache.catalina.LifecycleState; import org.apache.catalina.Pipeline; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.apache.geode.internal.cache.GemFireCacheImpl; @@ -37,8 +36,8 @@ public class Tomcat9DeltaSessionManagerTest extends AbstractDeltaSessionManagerTest { private Pipeline pipeline; - @Before - public void setup() { + @BeforeEach + void setup() { manager = spy(new Tomcat9DeltaSessionManager()); initTest(); pipeline = mock(Pipeline.class); @@ -46,7 +45,7 @@ public void setup() { } @Test - public void startInternalSucceedsInitialRun() + void startInternalSucceedsInitialRun() throws LifecycleException, IOException, ClassNotFoundException { doNothing().when(manager).startInternalBase(); doReturn(true).when(manager).isCommitValveEnabled(); @@ -68,7 +67,7 @@ public void startInternalSucceedsInitialRun() } @Test - public void startInternalDoesNotReinitializeManagerOnSubsequentCalls() + void startInternalDoesNotReinitializeManagerOnSubsequentCalls() throws LifecycleException, IOException, ClassNotFoundException { doNothing().when(manager).startInternalBase(); doReturn(true).when(manager).isCommitValveEnabled(); @@ -100,7 +99,7 @@ public void startInternalDoesNotReinitializeManagerOnSubsequentCalls() } @Test - public void stopInternal() throws LifecycleException, IOException { + void stopInternal() throws LifecycleException, IOException { doNothing().when(manager).startInternalBase(); doNothing().when(manager).destroyInternalBase(); doReturn(true).when(manager).isCommitValveEnabled(); diff --git a/extensions/geode-modules-tomcat9/src/test/resources/expected-pom.xml b/extensions/geode-modules-tomcat9/src/test/resources/expected-pom.xml index 6187a17ffdb4..320b8c0c8752 100644 --- a/extensions/geode-modules-tomcat9/src/test/resources/expected-pom.xml +++ b/extensions/geode-modules-tomcat9/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-modules-tomcat9 ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,12 +47,12 @@ - org.apache.geode + com.vmware.gemfire geode-core compile - org.apache.geode + com.vmware.gemfire geode-modules compile diff --git a/extensions/geode-modules/build.gradle b/extensions/geode-modules/build.gradle index d32ad3315341..534f31a8f524 100644 --- a/extensions/geode-modules/build.gradle +++ b/extensions/geode-modules/build.gradle @@ -27,39 +27,44 @@ evaluationDependsOn(":geode-core") dependencies { // main implementation(platform(project(':boms:geode-all-bom'))) - api(project(':geode-logging')) implementation(project(':geode-membership')) - api(project(':geode-common')) implementation(project(':geode-serialization')) implementation('org.slf4j:slf4j-api') + implementation('org.apache.commons:commons-lang3') + api(project(':geode-logging')) + api(project(':geode-common')) api(project(':geode-core')) compileOnly(platform(project(':boms:geode-all-bom'))) compileOnly('javax.servlet:javax.servlet-api') - compileOnly('org.apache.tomcat:catalina-ha:' + DependencyConstraints.get('tomcat6.version')) + compileOnly('org.apache.tomcat:tomcat-catalina-ha:' + DependencyConstraints.get('tomcat8.version')) - implementation('org.apache.commons:commons-lang3') // test + testCompileOnly(platform(project(':boms:geode-all-bom'))) + testCompileOnly('junit:junit') + testImplementation('org.apache.bcel:bcel') - testImplementation('junit:junit') - testRuntimeOnly('org.junit.vintage:junit-vintage-engine') + testImplementation('org.junit.jupiter:junit-jupiter-api') testImplementation('org.assertj:assertj-core') testImplementation('org.mockito:mockito-core') - testImplementation('org.apache.tomcat:catalina-ha:' + DependencyConstraints.get('tomcat6.version')) + testImplementation('org.apache.tomcat:tomcat-catalina-ha:' + DependencyConstraints.get('tomcat8.version')) + + testRuntimeOnly('org.junit.vintage:junit-vintage-engine') + testRuntimeOnly('org.junit.jupiter:junit-jupiter-engine') // integrationTest integrationTestImplementation(project(':extensions:geode-modules-test')) integrationTestImplementation(project(':geode-dunit')) integrationTestImplementation('pl.pragmatists:JUnitParams') - integrationTestImplementation('org.apache.tomcat:catalina-ha:' + DependencyConstraints.get('tomcat6.version')) + integrationTestImplementation('org.apache.tomcat:tomcat-catalina-ha:' + DependencyConstraints.get('tomcat8.version')) // distributedTest distributedTestImplementation(project(':geode-dunit')) - distributedTestImplementation('org.apache.tomcat:catalina-ha:' + DependencyConstraints.get('tomcat6.version')) + distributedTestImplementation('org.apache.tomcat:tomcat-catalina-ha:' + DependencyConstraints.get('tomcat8.version')) } sonarqube { diff --git a/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/Tomcat6SessionsTest.java b/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/Tomcat6SessionsTest.java deleted file mode 100644 index 47da3f4c8618..000000000000 --- a/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/Tomcat6SessionsTest.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session; - -import org.junit.BeforeClass; -import org.junit.experimental.categories.Category; - -import org.apache.geode.modules.session.catalina.Tomcat6DeltaSessionManager; -import org.apache.geode.test.junit.categories.SessionTest; - -@Category(SessionTest.class) -@Deprecated -public class Tomcat6SessionsTest extends AbstractSessionsTest { - - @BeforeClass - public static void setupClass() throws Exception { - setupServer(new Tomcat6DeltaSessionManager()); - } -} diff --git a/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/JvmRouteBinderValveIntegrationTest.java b/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/JvmRouteBinderValveIntegrationTest.java index cf338673762e..37e85836ee67 100644 --- a/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/JvmRouteBinderValveIntegrationTest.java +++ b/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/JvmRouteBinderValveIntegrationTest.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -51,6 +52,8 @@ public class JvmRouteBinderValveIntegrationTest extends AbstractSessionValveInte @Before public void setUp() { request = spy(Request.class); + doNothing().when(request).changeSessionId(anyString()); + response = spy(Response.class); testValve = new TestValve(false); @@ -157,8 +160,8 @@ public void invokeShouldCorrectlyHandleSessionFailover(RegionShortcut regionShor parameterizedSetUp(regionShortcut); when(deltaSessionManager.getJvmRoute()).thenReturn("jvmRoute"); when(deltaSessionManager.getContextName()).thenReturn(TEST_CONTEXT); - when(deltaSessionManager.getContainer()).thenReturn(mock(Context.class)); - when(((Context) deltaSessionManager.getContainer()).getApplicationLifecycleListeners()) + when(deltaSessionManager.getContext()).thenReturn(mock(Context.class)); + when(deltaSessionManager.getContext().getApplicationLifecycleListeners()) .thenReturn(new Object[] {}); doCallRealMethod().when(deltaSessionManager).findSession(anyString()); diff --git a/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/internal/AbstractDeltaSessionIntegrationTest.java b/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/internal/AbstractDeltaSessionIntegrationTest.java index 31668d0b42a7..bddc58c0082b 100644 --- a/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/internal/AbstractDeltaSessionIntegrationTest.java +++ b/extensions/geode-modules/src/integrationTest/java/org/apache/geode/modules/session/catalina/internal/AbstractDeltaSessionIntegrationTest.java @@ -66,7 +66,7 @@ void mockDeltaSessionManager() { when(deltaSessionManager.getLogger()).thenReturn(mock(Log.class)); when(deltaSessionManager.getRegionName()).thenReturn(REGION_NAME); when(deltaSessionManager.isBackingCacheAvailable()).thenReturn(true); - when(deltaSessionManager.getContainer()).thenReturn(mock(Context.class)); + when(deltaSessionManager.getContext()).thenReturn(mock(Context.class)); when(deltaSessionManager.getSessionCache()).thenReturn(mock(SessionCache.class)); when(deltaSessionManager.getSessionCache().getOperatingRegion()).thenReturn(httpSessionRegion); } diff --git a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java index 9fe63bc6be6e..d3c7c2b08e3a 100644 --- a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java +++ b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSession.java @@ -34,7 +34,6 @@ import javax.servlet.http.HttpSession; import org.apache.catalina.Manager; -import org.apache.catalina.ha.session.SerializablePrincipal; import org.apache.catalina.realm.GenericPrincipal; import org.apache.catalina.security.SecurityUtil; import org.apache.catalina.session.StandardSession; @@ -138,9 +137,8 @@ public Principal getPrincipal() { if (principal == null && serializedPrincipal != null) { final Log logger = deltaSessionManager.getLogger(); - final SerializablePrincipal sp; try { - sp = (SerializablePrincipal) BlobHelper.deserializeBlob(serializedPrincipal); + principal = (GenericPrincipal) BlobHelper.deserializeBlob(serializedPrincipal); } catch (Exception e) { logger.warn(this + ": Serialized principal contains a byte[] that cannot be deserialized due to the following exception", @@ -148,8 +146,6 @@ public Principal getPrincipal() { return null; } - principal = sp.getPrincipal(deltaSessionManager.getTheContext().getRealm()); - if (logger.isDebugEnabled()) { logger.debug(this + ": Deserialized principal: " + principal); } @@ -177,12 +173,11 @@ private byte[] getSerializedPrincipal() { if (serializedPrincipal == null) { if (principal != null && principal instanceof GenericPrincipal) { GenericPrincipal gp = (GenericPrincipal) principal; - SerializablePrincipal sp = SerializablePrincipal.createPrincipal(gp); - serializedPrincipal = serialize(sp); + serializedPrincipal = serialize(gp); if (manager != null) { final Log logger = getDeltaSessionManager().getLogger(); if (logger.isDebugEnabled()) { - logger.debug(this + ": Serialized principal: " + sp); + logger.debug(this + ": Serialized principal: " + gp); } } } diff --git a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSessionManager.java b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSessionManager.java index 99ef7d26c450..674d42e563f9 100644 --- a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSessionManager.java +++ b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/DeltaSessionManager.java @@ -14,8 +14,10 @@ */ package org.apache.geode.modules.session.catalina; -import java.beans.PropertyChangeEvent; -import java.beans.PropertyChangeListener; +import static java.lang.Math.max; +import static java.lang.Math.min; +import static java.util.concurrent.TimeUnit.MINUTES; + import java.io.IOException; import java.util.Collections; import java.util.HashSet; @@ -27,8 +29,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.catalina.Container; -import org.apache.catalina.Context; import org.apache.catalina.Lifecycle; import org.apache.catalina.Pipeline; import org.apache.catalina.Session; @@ -43,16 +43,12 @@ import org.apache.geode.internal.cache.GemFireCacheImpl; import org.apache.geode.modules.session.catalina.internal.DeltaSessionStatistics; import org.apache.geode.modules.util.ContextMapper; -import org.apache.geode.modules.util.RegionConfiguration; import org.apache.geode.modules.util.RegionHelper; public abstract class DeltaSessionManager> extends ManagerBase - implements Lifecycle, PropertyChangeListener, SessionManager, DeltaSessionManagerConfiguration { + implements Lifecycle, SessionManager, DeltaSessionManagerConfiguration { - static final String catalinaBaseSystemProperty = "catalina.base"; - static final String javaTempDirSystemProperty = "java.io.tmpdir"; - static final String fileSeparatorSystemProperty = "file.separator"; /** * The number of rejected sessions. */ @@ -149,8 +145,9 @@ public void setRegionName(String regionName) { } @Override - public void setMaxInactiveInterval(final int interval) { - super.setMaxInactiveInterval(interval); + public int getMaxInactiveInterval() { + return (int) max(min(MINUTES.toSeconds(getContext().getSessionTimeout()), Integer.MAX_VALUE), + -1); } @Override @@ -261,9 +258,10 @@ public boolean isBackingCacheAvailable() { @Deprecated @Override public void setPreferDeserializedForm(boolean enable) { - log.warn("Use of deprecated preferDeserializedForm property to be removed in future release."); + getLogger() + .warn("Use of deprecated preferDeserializedForm property to be removed in future release."); if (!enable) { - log.warn( + getLogger().warn( "Use of HttpSessionAttributeListener may result in serialized form in HttpSessionBindingEvent."); } preferDeserializedForm = enable; @@ -307,33 +305,6 @@ public boolean isClientServer() { return getSessionCache().isClientServer(); } - /** - * This method was taken from StandardManager to set the default maxInactiveInterval based on the - * container (to 30 minutes). - *

- * Set the Container with which this Manager has been associated. If it is a Context (the usual - * case), listen for changes to the session timeout property. - * - * @param container The associated Container - */ - @Override - public void setContainer(Container container) { - // De-register from the old Container (if any) - if ((this.container != null) && (this.container instanceof Context)) { - this.container.removePropertyChangeListener(this); - } - - // Default processing provided by our superclass - super.setContainer(container); - - // Register with the new Container (if any) - if ((this.container != null) && (this.container instanceof Context)) { - // Overwrite the max inactive interval with the context's session timeout. - setMaxInactiveInterval(((Context) this.container).getSessionTimeout() * 60); - this.container.addPropertyChangeListener(this); - } - } - @Override public Session findSession(String id) { if (id == null) { @@ -454,11 +425,6 @@ public int getRejectedSessions() { return rejectedSessions.get(); } - @Override - public void setRejectedSessions(int rejectedSessions) { - this.rejectedSessions.set(rejectedSessions); - } - /** * Returns the number of active sessions * @@ -589,7 +555,7 @@ protected void registerJvmRouteBinderValve() { } Pipeline getPipeline() { - return getContainer().getPipeline(); + return getContext().getPipeline(); } protected void unregisterJvmRouteBinderValve() { @@ -620,58 +586,6 @@ protected void unregisterCommitSessionValve() { } } - // ------------------------------ Lifecycle Methods - - /** - * Process property change events from our associated Context. - *

- * Part of this method implementation was taken from StandardManager. The sessionTimeout can be - * changed in the web.xml which is processed after the context.xml. The context (and the default - * session timeout) would already have been set in this Manager. This is the way to get the new - * session timeout value specified in the web.xml. - *

- * The precedence order for setting the session timeout value is: - *

    - *
  1. the max inactive interval is set based on the Manager defined in the context.xml - *
  2. the max inactive interval is then overwritten by the value of the Context's session timeout - * when setContainer is called - *
  3. the max inactive interval is then overwritten by the value of the session-timeout specified - * in the web.xml (if any) - *
- * - * @param event The property change event that has occurred - */ - @Override - public void propertyChange(PropertyChangeEvent event) { - - // Validate the source of this event - if (!(event.getSource() instanceof Context)) { - return; - } - - // Process a relevant property change - if (event.getPropertyName().equals("sessionTimeout")) { - try { - int interval = (Integer) event.getNewValue(); - if (interval < RegionConfiguration.DEFAULT_MAX_INACTIVE_INTERVAL) { - getLogger().warn("The configured session timeout of " + interval - + " minutes is invalid. Using the original value of " + event.getOldValue() - + " minutes."); - interval = (Integer) event.getOldValue(); - } - // StandardContext.setSessionTimeout passes -1 if the configured timeout - // is 0; otherwise it passes the value set in web.xml. If the interval - // parameter equals the default, set the max inactive interval to the - // default (no expiration); otherwise set it in seconds. - setMaxInactiveInterval(interval == RegionConfiguration.DEFAULT_MAX_INACTIVE_INTERVAL - ? RegionConfiguration.DEFAULT_MAX_INACTIVE_INTERVAL : interval * 60); - } catch (NumberFormatException e) { - getLogger() - .error(sm.getString("standardManager.sessionTimeout", event.getNewValue().toString())); - } - } - } - /** * Clear the local cache to avoid ClassCastException if container is being reloaded. */ @@ -694,21 +608,12 @@ private void clearLocalCache() { @Override public String toString() { return getClass().getSimpleName() + "[" + "container=" - + getTheContext() + "; regionName=" + regionName + + getContext() + "; regionName=" + regionName + "; regionAttributesId=" + regionAttributesId + "]"; } String getContextName() { - return getTheContext().getName(); + return getContext().getName(); } - public Context getTheContext() { - if (getContainer() instanceof Context) { - return (Context) getContainer(); - } else { - getLogger().error("Unable to unload sessions - container is of type " - + getContainer().getClass().getName() + " instead of StandardContext"); - return null; - } - } } diff --git a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/Tomcat6CommitSessionValve.java b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/Tomcat6CommitSessionValve.java deleted file mode 100644 index adb0c88bc280..000000000000 --- a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/Tomcat6CommitSessionValve.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.geode.modules.session.catalina; - -import org.apache.catalina.connector.Response; - -@Deprecated -public final class Tomcat6CommitSessionValve - extends AbstractCommitSessionValve { - - @Override - protected Response wrapResponse(Response response) { - return response; - } -} diff --git a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/Tomcat6DeltaSessionManager.java b/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/Tomcat6DeltaSessionManager.java deleted file mode 100644 index 8eef4316a23e..000000000000 --- a/extensions/geode-modules/src/main/java/org/apache/geode/modules/session/catalina/Tomcat6DeltaSessionManager.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.modules.session.catalina; - -import org.apache.catalina.LifecycleListener; -import org.apache.catalina.util.LifecycleSupport; - -/** - * @deprecated Tomcat 6 has reached its end of life and support for Tomcat 6 will be removed - * from a future Geode release. - */ -@Deprecated -public class Tomcat6DeltaSessionManager extends DeltaSessionManager { - - /** - * The LifecycleSupport for this component. - */ - private final LifecycleSupport lifecycle = new LifecycleSupport(this); - - /** - * Prepare for the beginning of active use of the public methods of this component. This method - * should be called after configure(), and before any of the public methods of the - * component are utilized. - * - */ - @Override - public synchronized void start() { - if (getLogger().isDebugEnabled()) { - getLogger().debug(this + ": Starting"); - } - if (started.get()) { - return; - } - lifecycle.fireLifecycleEvent(START_EVENT, null); - try { - init(); - } catch (Throwable t) { - getLogger().error(t.getMessage(), t); - } - - // Register our various valves - registerJvmRouteBinderValve(); - - if (isCommitValveEnabled()) { - registerCommitSessionValve(); - } - - // Initialize the appropriate session cache interface - initializeSessionCache(); - - // Create the timer and schedule tasks - scheduleTimerTasks(); - - started.set(true); - } - - /** - * Gracefully terminate the active use of the public methods of this component. This method should - * be the last one called on a given instance of this component. - * - */ - @Override - public synchronized void stop() { - if (getLogger().isDebugEnabled()) { - getLogger().debug(this + ": Stopping"); - } - started.set(false); - lifecycle.fireLifecycleEvent(STOP_EVENT, null); - - // StandardManager expires all Sessions here. - // All Sessions are not known by this Manager. - - // Require a new random number generator if we are restarted - random = null; - - // Remove from RMI registry - if (initialized) { - destroy(); - } - - // Clear any sessions to be touched - getSessionsToTouch().clear(); - - // Cancel the timer - cancelTimer(); - - // Unregister the JVM route valve - unregisterJvmRouteBinderValve(); - - if (isCommitValveEnabled()) { - unregisterCommitSessionValve(); - } - } - - /** - * Add a lifecycle event listener to this component. - * - * @param listener The listener to add - */ - @Override - public void addLifecycleListener(LifecycleListener listener) { - lifecycle.addLifecycleListener(listener); - } - - /** - * Get the lifecycle listeners associated with this lifecycle. If this Lifecycle has no listeners - * registered, a zero-length array is returned. - */ - @Override - public LifecycleListener[] findLifecycleListeners() { - return lifecycle.findLifecycleListeners(); - } - - /** - * Remove a lifecycle event listener from this component. - * - * @param listener The listener to remove - */ - @Override - public void removeLifecycleListener(LifecycleListener listener) { - lifecycle.removeLifecycleListener(listener); - } - - @Override - protected Tomcat6CommitSessionValve createCommitSessionValve() { - return new Tomcat6CommitSessionValve(); - } -} diff --git a/extensions/geode-modules/src/test/resources/expected-pom.xml b/extensions/geode-modules/src/test/resources/expected-pom.xml index 4cd26469146d..f30f863320d4 100644 --- a/extensions/geode-modules/src/test/resources/expected-pom.xml +++ b/extensions/geode-modules/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-modules ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,27 +47,27 @@ - org.apache.geode + com.vmware.gemfire geode-logging compile - org.apache.geode + com.vmware.gemfire geode-common compile - org.apache.geode + com.vmware.gemfire geode-core compile - org.apache.geode + com.vmware.gemfire geode-membership runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime diff --git a/geode-assembly/build.gradle b/geode-assembly/build.gradle index 8e62fabea912..6dfaba7eb4d7 100755 --- a/geode-assembly/build.gradle +++ b/geode-assembly/build.gradle @@ -53,8 +53,6 @@ configurations { geodeLibdirJarsDeprecated // Configurations used to download and cache web application servers for session module testing - webServerTomcat6 - webServerTomcat7 webServerTomcat8 webServerTomcat9 webServerJetty @@ -95,16 +93,6 @@ artifacts { } repositories { - //This "repository" only exists to download tomcat-6, because the zip for tomcat 6 is - //not in a maven repo. Later versions of tomcat are. - ivy { - url 'https://archive.apache.org/' - patternLayout { - artifact '/dist/tomcat/tomcat-6/v6.0.37/bin/[organisation]-[module]-[revision].[ext]' - } - // Infer the metadata from the presence of the artifact - metadataSources { artifact() } - } // For gradle tooling dependencies maven { url 'https://repo.gradle.org/gradle/libs-releases' @@ -158,7 +146,6 @@ dependencies { javadocOnly(project(':geode-server-all')) javadocOnly(project(':extensions:geode-modules')) javadocOnly(project(':extensions:geode-modules-session')) - javadocOnly(project(':extensions:geode-modules-tomcat7')) javadocOnly(project(':extensions:geode-modules-tomcat9')) javadocOnly(project(':extensions:geode-modules-tomcat8')) @@ -279,8 +266,6 @@ dependencies { upgradeTestRuntimeOnly files({ downloadWebServers } ) //Web servers used for session module testing - webServerTomcat6('apache:tomcat:' + DependencyConstraints.get('tomcat6.version') + '@zip') - webServerTomcat7('org.apache.tomcat:tomcat:' + DependencyConstraints.get('tomcat7.version') + '@zip') webServerTomcat8('org.apache.tomcat:tomcat:' + DependencyConstraints.get('tomcat8.version') + '@zip') webServerTomcat9('org.apache.tomcat:tomcat:' + DependencyConstraints.get('tomcat9.version') + '@zip') webServerJetty('org.eclipse.jetty:jetty-distribution:' + DependencyConstraints.get('jetty.version') + '@zip') diff --git a/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/TomcatInstall.java b/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/TomcatInstall.java index bec094c415e5..e8f8fda365dc 100644 --- a/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/TomcatInstall.java +++ b/geode-assembly/geode-assembly-test/src/main/java/org/apache/geode/session/tests/TomcatInstall.java @@ -43,8 +43,6 @@ public class TomcatInstall extends ContainerInstall { * version, and other properties or XML attributes needed to setup tomcat containers within Cargo */ public enum TomcatVersion { - TOMCAT6(6, "tomcat-6.0.37.zip"), - TOMCAT7(7, "tomcat-7.0.109.zip"), TOMCAT8(8, "tomcat-8.5.66.zip"), TOMCAT9(9, "tomcat-9.0.62.zip"); @@ -80,10 +78,6 @@ public String getDownloadURL() { public String jarSkipPropertyName() { switch (this) { - case TOMCAT6: - return null; - case TOMCAT7: - return "tomcat.util.scan.DefaultJarScanner.jarsToSkip"; case TOMCAT8: case TOMCAT9: return "tomcat.util.scan.StandardJarScanFilter.jarsToSkip"; diff --git a/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/JdkEncapsulationTest.java b/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/JdkEncapsulationTest.java index e500f5e625c9..143033a7f6cb 100644 --- a/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/JdkEncapsulationTest.java +++ b/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/JdkEncapsulationTest.java @@ -1,3 +1,4 @@ +// Copyright (c) VMware, Inc. 2022. All rights reserved. /* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding @@ -17,6 +18,7 @@ package org.apache.geode.jdk; import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPort; +import static org.apache.geode.jdk.ReflectEncapsulatedJdkObject.OBJECT; import static org.apache.geode.test.util.JarUtils.createJarWithClasses; import static org.assertj.core.api.AssertionsForClassTypes.assertThat; import static org.assertj.core.api.Assumptions.assumeThat; @@ -36,7 +38,7 @@ import org.apache.geode.test.version.JavaVersions; /** - * Test several ways to make normally inaccessible JDK packages accessible on JDK 17. + * Test several ways to make encapsulated types in JDK packages accessible for reflection on JDK 17. */ public class JdkEncapsulationTest { @Rule(order = 0) @@ -46,26 +48,26 @@ public class JdkEncapsulationTest { public final GfshRule gfshRule = new GfshRule(folderRule::getFolder); private String startServer; - private GfshScript traverseEncapsulatedJdkObject; + private GfshScript reflectEncapsulatedJdkObject; @BeforeClass - public static void validOnlyOnJdk17AndLater() { + public static void enableTheseTestsOnlyOnJdk17AndLater() { assumeThat(JavaVersions.current().specificationVersion()) .isGreaterThanOrEqualTo(17); } @Before - public void startLocatorWithObjectTraverserFunction() throws IOException { - Path jarPath = folderRule.getFolder().toPath().resolve("traverse-encapsulated-jdk-object.jar"); - createJarWithClasses(jarPath, TraverseEncapsulatedJdkObject.class); + public void startLocatorAndDeployReflectionFunction() throws IOException { + Path jarPath = folderRule.getFolder().toPath().resolve("reflect-encapsulated-jdk-object.jar"); + createJarWithClasses(jarPath, ReflectEncapsulatedJdkObject.class); int locatorPort = getRandomAvailableTCPPort(); String locators = "localhost[" + locatorPort + "]"; startServer = "start server --name=server --disable-default-server --locators=" + locators; - traverseEncapsulatedJdkObject = GfshScript + reflectEncapsulatedJdkObject = GfshScript .of("connect --locator=" + locators) - .and("execute function --id=" + TraverseEncapsulatedJdkObject.ID); + .and("execute function --id=" + ReflectEncapsulatedJdkObject.ID); GfshScript .of("start locator --port=" + locatorPort) @@ -73,34 +75,36 @@ public void startLocatorWithObjectTraverserFunction() throws IOException { .execute(gfshRule); } - // If this test fails, it means the object we're trying to traverse has no inaccessible fields, - // and so is not useful for the other tests. If it fails, update TraverseInaccessibleJdkObject - // to use a type that actually has inaccessible fields. + /** + * If this test fails, it means the object we're trying to reflect has no inaccessible fields, + * and so is not useful for the other tests. If it fails, update + * {@link ReflectEncapsulatedJdkObject} to use a type that has inaccessible fields. + */ @Test public void cannotMakeEncapsulatedFieldsAccessibleByDefault() { gfshRule.execute(startServer); // No JDK options - String traversalResult = traverseEncapsulatedJdkObject + String reflectionResult = reflectEncapsulatedJdkObject .expectExitCode(1) // Because we did not open any JDK packages. .execute(gfshRule) .getOutputText(); - assertThat(traversalResult) - .as("result of traversing %s", TraverseEncapsulatedJdkObject.OBJECT.getClass()) + assertThat(reflectionResult) + .as("result of reflecting %s", OBJECT.getClass()) .contains("Exception: java.lang.reflect.InaccessibleObjectException"); } @Test public void canMakeEncapsulatedFieldsAccessibleInExplicitlyOpenedPackages() { - String objectPackage = TraverseEncapsulatedJdkObject.OBJECT.getClass().getPackage().getName(); - String objectModule = TraverseEncapsulatedJdkObject.MODULE; + String objectPackage = OBJECT.getClass().getPackage().getName(); + String objectModule = ReflectEncapsulatedJdkObject.MODULE; String openThePackageOfTheEncapsulatedJdkObject = String.format(" --J=--add-opens=%s/%s=ALL-UNNAMED", objectModule, objectPackage); gfshRule.execute(startServer + openThePackageOfTheEncapsulatedJdkObject); - traverseEncapsulatedJdkObject + reflectEncapsulatedJdkObject .expectExitCode(0) // Because we opened the encapsulated object's package. .execute(gfshRule); } @@ -117,7 +121,7 @@ public void canMakeEncapsulatedFieldsAccessibleInPackagesOpenedByArgumentFile() gfshRule.execute(startServer + useArgumentFile); - traverseEncapsulatedJdkObject + reflectEncapsulatedJdkObject .expectExitCode(0) // Because the argument file opens all JDK packages. .execute(gfshRule); } diff --git a/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/TraverseEncapsulatedJdkObject.java b/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/ReflectEncapsulatedJdkObject.java similarity index 70% rename from geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/TraverseEncapsulatedJdkObject.java rename to geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/ReflectEncapsulatedJdkObject.java index a2239d3116e8..3f3f4c800bf4 100644 --- a/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/TraverseEncapsulatedJdkObject.java +++ b/geode-assembly/src/acceptanceTest/java/org/apache/geode/jdk/ReflectEncapsulatedJdkObject.java @@ -1,3 +1,4 @@ +// Copyright (c) VMware, Inc. 2022. All rights reserved. /* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding @@ -16,23 +17,19 @@ package org.apache.geode.jdk; +import java.lang.reflect.Field; import java.math.BigDecimal; import org.apache.geode.cache.execute.Function; import org.apache.geode.cache.execute.FunctionContext; -import org.apache.geode.internal.size.ObjectTraverser; -import org.apache.geode.internal.size.ObjectTraverser.Visitor; - -public class TraverseEncapsulatedJdkObject implements Function { - private static final Visitor TRAVERSE_ENTIRE_OBJECT_GRAPH = (parent, object) -> true; - private final ObjectTraverser traverser = new ObjectTraverser(); +public class ReflectEncapsulatedJdkObject implements Function { // OBJECT must have a JDK type with inaccessible fields, defined in a package that Gfsh does // not open by default. static final BigDecimal OBJECT = BigDecimal.ONE; // MODULE must be the module that defines OBJECT's type. static final String MODULE = "java.base"; - static final String ID = "traverse-big-decimal"; + static final String ID = "reflect-encapsulated-jdk-object"; @Override public String getId() { @@ -41,11 +38,10 @@ public String getId() { @Override public void execute(FunctionContext context) { - try { - traverser.breadthFirstSearch(OBJECT, TRAVERSE_ENTIRE_OBJECT_GRAPH, false); - } catch (IllegalAccessException e) { - context.getResultSender().sendException(e); - return; + for (Field f : OBJECT.getClass().getDeclaredFields()) { + // Throws InaccessibleObjectException on JDK 17 if the field is inaccessible and the declaring + // class's package is not open to Geode. + f.setAccessible(true); } context.getResultSender().lastResult("OK"); } diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6CachingClientServerTest.java b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6CachingClientServerTest.java deleted file mode 100644 index 1c6f9d09c60c..000000000000 --- a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6CachingClientServerTest.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import static org.apache.geode.session.tests.ContainerInstall.ConnectionType.CACHING_CLIENT_SERVER; -import static org.apache.geode.session.tests.TomcatInstall.TomcatVersion.TOMCAT6; - -import java.util.function.IntSupplier; - -public class Tomcat6CachingClientServerTest extends TomcatClientServerTest { - @Override - public ContainerInstall getInstall(IntSupplier portSupplier) throws Exception { - return new TomcatInstall(getClass().getSimpleName(), TOMCAT6, CACHING_CLIENT_SERVER, - portSupplier, TomcatInstall.CommitValve.DEFAULT); - } -} diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6ClientServerTest.java b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6ClientServerTest.java deleted file mode 100644 index 75d853d26536..000000000000 --- a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6ClientServerTest.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import static org.apache.geode.session.tests.ContainerInstall.ConnectionType.CLIENT_SERVER; -import static org.apache.geode.session.tests.TomcatInstall.TomcatVersion.TOMCAT6; - -import java.util.function.IntSupplier; - -public class Tomcat6ClientServerTest extends TomcatClientServerTest { - @Override - public ContainerInstall getInstall(IntSupplier portSupplier) throws Exception { - return new TomcatInstall(getClass().getSimpleName(), TOMCAT6, CLIENT_SERVER, portSupplier, - TomcatInstall.CommitValve.DEFAULT); - } -} diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6Test.java b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6Test.java deleted file mode 100644 index 50487d0dfaed..000000000000 --- a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat6Test.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import static org.apache.geode.session.tests.ContainerInstall.ConnectionType.PEER_TO_PEER; -import static org.apache.geode.session.tests.TomcatInstall.TomcatVersion.TOMCAT6; - -import java.util.function.IntSupplier; - -public class Tomcat6Test extends CargoTestBase { - @Override - public ContainerInstall getInstall(IntSupplier portSupplier) throws Exception { - return new TomcatInstall(getClass().getSimpleName(), TOMCAT6, PEER_TO_PEER, portSupplier, - TomcatInstall.CommitValve.DEFAULT); - } -} diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7CachingClientServerTest.java b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7CachingClientServerTest.java deleted file mode 100644 index 4401bfe616d4..000000000000 --- a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7CachingClientServerTest.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import static org.apache.geode.session.tests.ContainerInstall.ConnectionType.CACHING_CLIENT_SERVER; -import static org.apache.geode.session.tests.TomcatInstall.TomcatVersion.TOMCAT7; - -import java.util.function.IntSupplier; - -public class Tomcat7CachingClientServerTest extends TomcatClientServerTest { - @Override - public ContainerInstall getInstall(IntSupplier portSupplier) throws Exception { - return new TomcatInstall(getClass().getSimpleName(), TOMCAT7, CACHING_CLIENT_SERVER, - portSupplier, TomcatInstall.CommitValve.DEFAULT); - } -} diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7ClientServerTest.java b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7ClientServerTest.java deleted file mode 100644 index f2cacf5da62c..000000000000 --- a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7ClientServerTest.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - - -import static org.apache.geode.session.tests.ContainerInstall.ConnectionType.CLIENT_SERVER; -import static org.apache.geode.session.tests.TomcatInstall.TomcatVersion.TOMCAT7; - -import java.util.function.IntSupplier; - -public class Tomcat7ClientServerTest extends TomcatClientServerTest { - @Override - public ContainerInstall getInstall(IntSupplier portSupplier) throws Exception { - return new TomcatInstall(getClass().getSimpleName(), TOMCAT7, CLIENT_SERVER, portSupplier, - TomcatInstall.CommitValve.DEFAULT); - } -} diff --git a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7Test.java b/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7Test.java deleted file mode 100644 index 5e93e1f453af..000000000000 --- a/geode-assembly/src/distributedTest/java/org/apache/geode/session/tests/Tomcat7Test.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import static org.apache.geode.session.tests.ContainerInstall.ConnectionType.PEER_TO_PEER; -import static org.apache.geode.session.tests.TomcatInstall.TomcatVersion.TOMCAT7; - -import java.util.function.IntSupplier; - -public class Tomcat7Test extends CargoTestBase { - @Override - public ContainerInstall getInstall(IntSupplier portSupplier) throws Exception { - return new TomcatInstall(getClass().getSimpleName(), TOMCAT7, PEER_TO_PEER, portSupplier, - TomcatInstall.CommitValve.DEFAULT); - } -} diff --git a/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/DevRestSwaggerVerificationIntegrationTest.java b/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/DevRestSwaggerVerificationIntegrationTest.java index e543020fad80..2a2f25982df0 100644 --- a/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/DevRestSwaggerVerificationIntegrationTest.java +++ b/geode-assembly/src/integrationTest/java/org/apache/geode/rest/internal/web/DevRestSwaggerVerificationIntegrationTest.java @@ -20,6 +20,9 @@ import static org.hamcrest.MatcherAssert.assertThat; import com.fasterxml.jackson.databind.JsonNode; +import org.apache.http.Header; +import org.apache.http.HttpResponse; +import org.assertj.core.api.Assertions; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -47,8 +50,11 @@ public class DevRestSwaggerVerificationIntegrationTest { @Test public void isSwaggerRunning() throws Exception { - // Check the UI - assertResponse(client.get("/geode/swagger-ui.html")).hasStatusCode(200); + // Check the UI connects and hides server info + HttpResponse response = client.get("/geode/swagger-ui.html"); + Header server = response.getFirstHeader("server"); + Assertions.assertThat(server).isNull(); + assertResponse(response).hasStatusCode(200); // Check the JSON JsonNode json = diff --git a/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityConfigDefaultProfileTest.java b/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityConfigDefaultProfileTest.java index d121363aa5ac..5a36697f3d02 100644 --- a/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityConfigDefaultProfileTest.java +++ b/geode-assembly/src/integrationTest/java/org/apache/geode/tools/pulse/PulseSecurityConfigDefaultProfileTest.java @@ -16,7 +16,9 @@ package org.apache.geode.tools.pulse; import static org.apache.geode.test.junit.rules.HttpResponseAssert.assertResponse; +import static org.assertj.core.api.Assertions.assertThat; +import org.apache.http.Header; import org.apache.http.HttpResponse; import org.junit.ClassRule; import org.junit.Rule; @@ -45,8 +47,10 @@ public void testLogin() throws Exception { } @Test - public void loginPage() throws Exception { + public void loginPageRevealsNoServerInfoAndResponds() throws Exception { HttpResponse response = client.get("/pulse/login.html"); + Header server = response.getFirstHeader("server"); + assertThat(server).isNull(); assertResponse(response).hasStatusCode(200).hasResponseBody().contains(""); } diff --git a/geode-assembly/src/integrationTest/resources/assembly_content.txt b/geode-assembly/src/integrationTest/resources/assembly_content.txt index f19575b92114..cc581ff58bbe 100644 --- a/geode-assembly/src/integrationTest/resources/assembly_content.txt +++ b/geode-assembly/src/integrationTest/resources/assembly_content.txt @@ -825,7 +825,6 @@ javadoc/org/apache/geode/modules/session/catalina/AbstractSessionCache.html javadoc/org/apache/geode/modules/session/catalina/ClientServerCacheLifecycleListener.html javadoc/org/apache/geode/modules/session/catalina/ClientServerSessionCache.html javadoc/org/apache/geode/modules/session/catalina/DeltaSession.html -javadoc/org/apache/geode/modules/session/catalina/DeltaSession7.html javadoc/org/apache/geode/modules/session/catalina/DeltaSession8.html javadoc/org/apache/geode/modules/session/catalina/DeltaSession9.html javadoc/org/apache/geode/modules/session/catalina/DeltaSessionFacade.html @@ -836,10 +835,6 @@ javadoc/org/apache/geode/modules/session/catalina/PeerToPeerCacheLifecycleListen javadoc/org/apache/geode/modules/session/catalina/PeerToPeerSessionCache.html javadoc/org/apache/geode/modules/session/catalina/SessionCache.html javadoc/org/apache/geode/modules/session/catalina/SessionManager.html -javadoc/org/apache/geode/modules/session/catalina/Tomcat6CommitSessionValve.html -javadoc/org/apache/geode/modules/session/catalina/Tomcat6DeltaSessionManager.html -javadoc/org/apache/geode/modules/session/catalina/Tomcat7CommitSessionValve.html -javadoc/org/apache/geode/modules/session/catalina/Tomcat7DeltaSessionManager.html javadoc/org/apache/geode/modules/session/catalina/Tomcat8CommitSessionValve.html javadoc/org/apache/geode/modules/session/catalina/Tomcat8DeltaSessionManager.html javadoc/org/apache/geode/modules/session/catalina/Tomcat9CommitSessionValve.html @@ -1044,16 +1039,16 @@ lib/mx4j-remote-3.0.2.jar lib/mx4j-tools-3.0.1.jar lib/ra.jar lib/rmiio-2.1.2.jar -lib/shiro-cache-1.9.0.jar -lib/shiro-config-core-1.9.0.jar -lib/shiro-config-ogdl-1.9.0.jar -lib/shiro-core-1.9.0.jar -lib/shiro-crypto-cipher-1.9.0.jar -lib/shiro-crypto-core-1.9.0.jar -lib/shiro-crypto-hash-1.9.0.jar -lib/shiro-event-1.9.0.jar -lib/shiro-lang-1.9.0.jar -lib/slf4j-api-1.7.32.jar +lib/shiro-cache-1.9.1.jar +lib/shiro-config-core-1.9.1.jar +lib/shiro-config-ogdl-1.9.1.jar +lib/shiro-core-1.9.1.jar +lib/shiro-crypto-cipher-1.9.1.jar +lib/shiro-crypto-core-1.9.1.jar +lib/shiro-crypto-hash-1.9.1.jar +lib/shiro-event-1.9.1.jar +lib/shiro-lang-1.9.1.jar +lib/slf4j-api-1.7.36.jar lib/snappy-0.4.jar lib/spring-beans-5.3.21.jar lib/spring-context-5.3.21.jar diff --git a/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt b/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt index 62619491b2f8..2d0f655ef741 100644 --- a/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt +++ b/geode-assembly/src/integrationTest/resources/gfsh_dependency_classpath.txt @@ -45,8 +45,8 @@ antlr-2.7.7.jar istack-commons-runtime-4.0.1.jar jaxb-impl-2.3.2.jar commons-validator-1.7.jar -shiro-core-1.9.0.jar -shiro-config-ogdl-1.9.0.jar +shiro-core-1.9.1.jar +shiro-config-ogdl-1.9.1.jar commons-beanutils-1.9.4.jar commons-codec-1.15.jar commons-collections-3.2.2.jar @@ -66,14 +66,14 @@ jna-platform-5.11.0.jar jna-5.11.0.jar snappy-0.4.jar jgroups-3.6.14.Final.jar -shiro-cache-1.9.0.jar -shiro-crypto-hash-1.9.0.jar -shiro-crypto-cipher-1.9.0.jar -shiro-config-core-1.9.0.jar -shiro-event-1.9.0.jar -shiro-crypto-core-1.9.0.jar -shiro-lang-1.9.0.jar -slf4j-api-1.7.32.jar +shiro-cache-1.9.1.jar +shiro-crypto-hash-1.9.1.jar +shiro-crypto-cipher-1.9.1.jar +shiro-config-core-1.9.1.jar +shiro-event-1.9.1.jar +shiro-crypto-core-1.9.1.jar +shiro-lang-1.9.1.jar +slf4j-api-1.7.36.jar spring-beans-5.3.21.jar javax.activation-api-1.2.0.jar jline-2.12.jar diff --git a/geode-assembly/src/main/dist/LICENSE b/geode-assembly/src/main/dist/LICENSE index 6744983b6c82..72d998b4fddb 100644 --- a/geode-assembly/src/main/dist/LICENSE +++ b/geode-assembly/src/main/dist/LICENSE @@ -1097,7 +1097,7 @@ Apache Geode bundles the following files under the MIT License: - Normalize.css v2.1.0 (https://necolas.github.io/normalize.css/), Copyright (c) Nicolas Gallagher and Jonathan Neal - Sizzle.js (http://sizzlejs.com/), Copyright (c) 2011, The Dojo Foundation - - SLF4J API v1.7.32 (http://www.slf4j.org), Copyright (c) 2004-2017 QOS.ch + - SLF4J API v1.7.36 (http://www.slf4j.org), Copyright (c) 2004-2022 QOS.ch - Split.js (https://github.com/nathancahill/Split.js), Copyright (c) 2015 Nathan Cahill - TableDnD v0.5 (https://github.com/isocra/TableDnD), Copyright (c) 2012 diff --git a/geode-assembly/src/test/resources/expected-pom.xml b/geode-assembly/src/test/resources/expected-pom.xml index ea904d5aab1b..016454c4a749 100644 --- a/geode-assembly/src/test/resources/expected-pom.xml +++ b/geode-assembly/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire apache-geode ${version} tgz diff --git a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTestBase.java b/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTestBase.java index 4f9646c82506..3d6b7bf6bdf1 100644 --- a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTestBase.java +++ b/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTestBase.java @@ -87,20 +87,17 @@ public static Collection data() { protected File oldBuild; protected File oldModules; - protected TomcatInstall tomcat7079AndOldModules; - protected TomcatInstall tomcat7079AndCurrentModules; protected TomcatInstall tomcat8AndOldModules; protected TomcatInstall tomcat8AndCurrentModules; protected int locatorPort; - protected String classPathTomcat7079; protected String classPathTomcat8; protected String serverDir; protected String locatorDir; protected TomcatSessionBackwardsCompatibilityTestBase(String version) { VersionManager versionManager = VersionManager.getInstance(); - String installLocation = installLocation = versionManager.getInstall(version); + String installLocation = versionManager.getInstall(version); oldBuild = new File(installLocation); oldModules = new File(installLocation + "/tools/Modules/"); } @@ -140,17 +137,6 @@ protected void startLocator(String name, String classPath, int port) throws IOEx @Before public void setup() throws Exception { - tomcat7079AndOldModules = - new TomcatInstall("Tomcat7079AndOldModules", TomcatInstall.TomcatVersion.TOMCAT7, - ContainerInstall.ConnectionType.CLIENT_SERVER, - oldModules.getAbsolutePath(), oldBuild.getAbsolutePath() + "/lib", - portSupplier::getAvailablePort, TomcatInstall.CommitValve.DEFAULT); - - tomcat7079AndCurrentModules = - new TomcatInstall("Tomcat7079AndCurrentModules", TomcatInstall.TomcatVersion.TOMCAT7, - ContainerInstall.ConnectionType.CLIENT_SERVER, - portSupplier::getAvailablePort, TomcatInstall.CommitValve.DEFAULT); - tomcat8AndOldModules = new TomcatInstall("Tomcat8AndOldModules", TomcatInstall.TomcatVersion.TOMCAT8, ContainerInstall.ConnectionType.CLIENT_SERVER, @@ -163,17 +149,12 @@ public void setup() throws Exception { ContainerInstall.ConnectionType.CLIENT_SERVER, portSupplier::getAvailablePort, TomcatInstall.CommitValve.DEFAULT); - classPathTomcat7079 = tomcat7079AndCurrentModules.getHome() + "/lib/*" + File.pathSeparator - + tomcat7079AndCurrentModules.getHome() + "/bin/*"; classPathTomcat8 = tomcat8AndCurrentModules.getHome() + "/lib/*" + File.pathSeparator + tomcat8AndCurrentModules.getHome() + "/bin/*"; // Get available port for the locator locatorPort = portSupplier.getAvailablePort(); - tomcat7079AndOldModules.setDefaultLocatorPort(locatorPort); - tomcat7079AndCurrentModules.setDefaultLocatorPort(locatorPort); - tomcat8AndOldModules.setDefaultLocatorPort(locatorPort); tomcat8AndCurrentModules.setDefaultLocatorPort(locatorPort); diff --git a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModuleCanDoPutsTest.java b/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModuleCanDoPutsTest.java deleted file mode 100644 index 87f42951e238..000000000000 --- a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModuleCanDoPutsTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import org.junit.Test; -import org.junit.runners.Parameterized; - -import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory; - -@Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class) -public class TomcatSessionBackwardsCompatibilityTomcat7079WithOldModuleCanDoPutsTest - extends TomcatSessionBackwardsCompatibilityTestBase { - - public TomcatSessionBackwardsCompatibilityTomcat7079WithOldModuleCanDoPutsTest(String version) { - super(version); - } - - @Test - public void test() throws Exception { - startClusterWithTomcat(classPathTomcat7079); - manager.addContainer(tomcat7079AndOldModules); - manager.addContainer(tomcat7079AndOldModules); - doPutAndGetSessionOnAllClients(); - } - -} diff --git a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromCurrentModuleTest.java b/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromCurrentModuleTest.java deleted file mode 100644 index 411ab456df2e..000000000000 --- a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromCurrentModuleTest.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import org.junit.Test; -import org.junit.runners.Parameterized; - -import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory; - -@Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class) -public class TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromCurrentModuleTest - extends TomcatSessionBackwardsCompatibilityTestBase { - - public TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromCurrentModuleTest( - String version) { - super(version); - } - - @Test - public void test() throws Exception { - startClusterWithTomcat(classPathTomcat7079); - manager.addContainer(tomcat7079AndCurrentModules); - manager.addContainer(tomcat7079AndOldModules); - doPutAndGetSessionOnAllClients(); - } - -} diff --git a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromOldModuleTest.java b/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromOldModuleTest.java deleted file mode 100644 index 09f49375b9eb..000000000000 --- a/geode-assembly/src/upgradeTest/java/org/apache/geode/session/tests/TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromOldModuleTest.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.session.tests; - -import org.junit.Test; -import org.junit.runners.Parameterized; - -import org.apache.geode.test.junit.runners.CategoryWithParameterizedRunnerFactory; - -@Parameterized.UseParametersRunnerFactory(CategoryWithParameterizedRunnerFactory.class) -public class TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromOldModuleTest - extends TomcatSessionBackwardsCompatibilityTestBase { - - public TomcatSessionBackwardsCompatibilityTomcat7079WithOldModulesMixedWithCurrentCanDoPutFromOldModuleTest( - String version) { - super(version); - } - - @Test - public void test() throws Exception { - startClusterWithTomcat(classPathTomcat7079); - manager.addContainer(tomcat7079AndOldModules); - manager.addContainer(tomcat7079AndCurrentModules); - doPutAndGetSessionOnAllClients(); - } - -} diff --git a/geode-common/src/test/resources/expected-pom.xml b/geode-common/src/test/resources/expected-pom.xml index 1c512ff34f95..db951b648b9b 100644 --- a/geode-common/src/test/resources/expected-pom.xml +++ b/geode-common/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-common ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom diff --git a/geode-concurrency-test/src/test/resources/expected-pom.xml b/geode-concurrency-test/src/test/resources/expected-pom.xml index ac266b4fe9c2..e90721dfa262 100644 --- a/geode-concurrency-test/src/test/resources/expected-pom.xml +++ b/geode-concurrency-test/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-concurrency-test ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom diff --git a/geode-connectors/src/test/resources/expected-pom.xml b/geode-connectors/src/test/resources/expected-pom.xml index b76b329617bc..69aa6e072c24 100644 --- a/geode-connectors/src/test/resources/expected-pom.xml +++ b/geode-connectors/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-connectors ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,22 +47,22 @@ - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime - org.apache.geode + com.vmware.gemfire geode-core runtime - org.apache.geode + com.vmware.gemfire geode-gfsh runtime diff --git a/geode-core/src/distributedTest/java/org/apache/geode/distributed/DistributedSystemFindDistributedMembersDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/distributed/DistributedSystemFindDistributedMembersDUnitTest.java new file mode 100644 index 000000000000..09edc0c75d37 --- /dev/null +++ b/geode-core/src/distributedTest/java/org/apache/geode/distributed/DistributedSystemFindDistributedMembersDUnitTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.geode.distributed; + +import static java.util.Arrays.asList; +import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS; +import static org.apache.geode.test.dunit.VM.getVM; +import static org.apache.geode.test.dunit.rules.DistributedRule.getLocators; +import static org.assertj.core.api.Assertions.assertThat; + +import java.io.Serializable; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; +import java.util.Properties; +import java.util.Set; + +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import org.apache.geode.distributed.internal.InternalDistributedSystem; +import org.apache.geode.internal.inet.LocalHostUtil; +import org.apache.geode.test.dunit.VM; +import org.apache.geode.test.dunit.rules.DistributedReference; + +@SuppressWarnings({"serial", "deprecation"}) +public class DistributedSystemFindDistributedMembersDUnitTest implements Serializable { + + @Rule + public DistributedReference system = new DistributedReference<>(); + + @Before + public void setUp() { + Properties configProperties = new Properties(); + configProperties.setProperty(LOCATORS, getLocators()); + + for (VM vm : asList(getVM(0), getVM(1), getVM(2))) { + vm.invoke(() -> { + system.set((InternalDistributedSystem) DistributedSystem.connect(configProperties)); + }); + } + } + + @Test + public void findDistributedMembersForLocalHostReturnsManyMembers() throws UnknownHostException { + InetAddress localHost = LocalHostUtil.getLocalHost(); + + List serverVMs = asList(getVM(0), getVM(1), getVM(2)); + for (VM vm : serverVMs) { + vm.invoke(() -> { + Set members = system.get().findDistributedMembers(localHost); + // number of servers plus one locator + assertThat(members).hasSize(serverVMs.size() + 1); + }); + } + } +} diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ClearDuringGiiOplogWithMissingCreateRegressionTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ClearDuringGiiOplogWithMissingCreateRegressionTest.java index f8fad0d6c265..2d376c2f9824 100644 --- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ClearDuringGiiOplogWithMissingCreateRegressionTest.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ClearDuringGiiOplogWithMissingCreateRegressionTest.java @@ -212,7 +212,7 @@ public RegionEntry createEntry(RegionEntryContext r, Object key, Object value) { } @Override - public Class getEntryClass() { + public Class getEntryClass() { return TestableDiskRegionEntry.class; } diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GiiDiskAccessExceptionRegressionTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GiiDiskAccessExceptionRegressionTest.java index 5e8202de24fa..2508d4905874 100644 --- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GiiDiskAccessExceptionRegressionTest.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/GiiDiskAccessExceptionRegressionTest.java @@ -191,7 +191,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { return getClass(); } diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PRCustomPartitioningDistributedTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PRCustomPartitioningDistributedTest.java index b0bf9b84f27c..5f52dd9b9683 100755 --- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PRCustomPartitioningDistributedTest.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PRCustomPartitioningDistributedTest.java @@ -15,6 +15,7 @@ package org.apache.geode.internal.cache; import static org.apache.geode.test.dunit.VM.getVM; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.assertj.core.api.Assertions.assertThat; import java.io.Serializable; @@ -133,9 +134,9 @@ private void verifyKeys(final String regionName, final List listOfKeys) { Set bucketKeys = partitionedRegion.getBucketKeys(bucketId); for (Object key : bucketKeys) { EntryOperation entryOperation = - new EntryOperationImpl(partitionedRegion, null, key, null, null); + new EntryOperationImpl<>(partitionedRegion, null, key, null, null); PartitionResolver partitionResolver = - partitionedRegion.getPartitionResolver(); + uncheckedCast(partitionedRegion.getPartitionResolver()); Object routingObject = partitionResolver.getRoutingObject(entryOperation); int routingObjectHashCode = routingObject.hashCode() % TOTAL_NUM_BUCKETS; assertThat(routingObjectHashCode).isEqualTo(bucketId); diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionRestartRebalanceDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionRestartRebalanceDUnitTest.java index 8f8a37758b5d..ad6965f6cfca 100644 --- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionRestartRebalanceDUnitTest.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/PartitionedRegionRestartRebalanceDUnitTest.java @@ -18,7 +18,6 @@ import static org.assertj.core.api.Assertions.assertThat; import java.io.Serializable; -import java.util.Map; import java.util.Set; import org.apache.logging.log4j.LogManager; @@ -42,7 +41,7 @@ public class PartitionedRegionRestartRebalanceDUnitTest implements Serializable private static final int TOTAL_NUM_BUCKETS = 12; private static final Logger logger = LogManager.getLogger(); - private String REGION_NAME = getClass().getSimpleName();; + private String REGION_NAME = getClass().getSimpleName(); private VM[] datastores; @Rule @@ -57,9 +56,14 @@ public void setUp() throws Exception { for (int i = 0; i < datastores.length; i++) { datastores[i] = getVM(i); datastores[i].invoke(() -> cacheRule.createCache()); - datastores[i].invoke(() -> createRegion()); } - datastores[0].invoke(() -> feedData()); + } + + private LocalRegion createReplicateRegion() { + RegionFactory rf = cacheRule.getCache().createRegionFactory(); + rf.setDataPolicy(DataPolicy.REPLICATE); + LocalRegion region = (LocalRegion) rf.create(REGION_NAME); + return region; } private void createRegion() { @@ -102,18 +106,17 @@ private void verify() { break; } } - Map map = br.getVersionVector().getMemberToVersion(); - for (Object key : br.getVersionVector().getMemberToVersion().keySet()) { - logger.info(br.getFullPath() + ":" + key + ":" - + br.getVersionVector().getMemberToVersion().get(key)); - } - // The test proved that departedMemberSet is not growing - assertThat(departedMemberSet.size()).isLessThanOrEqualTo(datastores.length); + // The test proved that departedMemberSet is not growing: + assertThat(departedMemberSet.size()).isLessThanOrEqualTo(datastores.length - 1); } } @Test - public void restartAndRebalanceShouldNotIncreaseMemberToVersionMap() { + public void restartAndRebalanceShouldNotIncreaseMemberToVersionMap() throws InterruptedException { + for (int i = 0; i < datastores.length; i++) { + datastores[i].invoke(() -> createRegion()); + } + datastores[0].invoke(() -> feedData()); for (int i = 0; i < datastores.length * 10; i++) { datastores[i % datastores.length].invoke(() -> { cacheRule.getCache().close(); @@ -130,4 +133,42 @@ public void restartAndRebalanceShouldNotIncreaseMemberToVersionMap() { }); } } + + @Test + public void departedMembersShouldBeCleanedAfterGIIFinished() { + datastores[0].invoke(() -> { // member 89: :41001 + LocalRegion region = createReplicateRegion(); + region.put("key-0", "value-0"); + region.put("key-1", "value-1"); + }); + datastores[1].invoke(() -> { // member 90: :41002 + LocalRegion region = createReplicateRegion(); + }); + datastores[0].invoke(() -> cacheRule.getCache().close()); + datastores[1].invoke(() -> { + // member 90: since all entries are from member 89; member 89 is departed but will stay in map + LocalRegion region = (LocalRegion) cacheRule.getCache().getRegion(REGION_NAME); + // There are 2 members: member 90 and departed member 89 + assertThat(region.getVersionVector().getMemberToVersion().size()).isEqualTo(2); + assertThat(region.getVersionVector().getDepartedMembersSet().size()).isEqualTo(1); + }); + datastores[0].invoke(() -> { // member 92: :41001 + // member 92: GII entries from member 90. But departed member 89 will stay in map + cacheRule.createCache(); + LocalRegion region = createReplicateRegion(); + // There are 3 members: member 92, member 90, and departed member 89 + assertThat(region.getVersionVector().getMemberToVersion().size()).isEqualTo(3); + assertThat(region.getVersionVector().getDepartedMembersSet().size()).isEqualTo(1); + }); + datastores[1].invoke(() -> cacheRule.getCache().close()); + datastores[1].invoke(() -> { // 94 + // member 94: GII entries from member 92. But departed member 89 will stay in map + cacheRule.createCache(); + LocalRegion region = createReplicateRegion(); + // There are 3 members: member 94, member 92, and departed member 89. Member 90 is removed + // from map + assertThat(region.getVersionVector().getMemberToVersion().size()).isEqualTo(3); + assertThat(region.getVersionVector().getDepartedMembersSet().size()).isEqualTo(1); + }); + } } diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/entries/DiskEntryDunitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/entries/DiskEntryDunitTest.java new file mode 100644 index 000000000000..170da04a5bdd --- /dev/null +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/entries/DiskEntryDunitTest.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.geode.internal.cache.entries; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import org.assertj.core.api.Assertions; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; + +import org.apache.geode.cache.CacheFactory; +import org.apache.geode.cache.DataPolicy; +import org.apache.geode.cache.Region; +import org.apache.geode.cache.client.ClientCache; +import org.apache.geode.cache.client.ClientCacheFactory; +import org.apache.geode.cache.client.ClientRegionShortcut; +import org.apache.geode.distributed.internal.InternalDistributedSystem; +import org.apache.geode.internal.cache.DiskStoreFactoryImpl; +import org.apache.geode.test.awaitility.GeodeAwaitility; +import org.apache.geode.test.dunit.AsyncInvocation; +import org.apache.geode.test.dunit.SerializableConsumerIF; +import org.apache.geode.test.dunit.rules.ClientVM; +import org.apache.geode.test.dunit.rules.ClusterStartupRule; +import org.apache.geode.test.dunit.rules.MemberVM; + +public class DiskEntryDunitTest { + private static final String LOG_PREFIX = "XXX: "; + private static final String REGION_NAME = "data"; + private static final String DISK_STORE_NAME = "data_store"; + @Rule + public final ClusterStartupRule clusterStartupRule = new ClusterStartupRule(7); + + private List servers; + private List clients; + private int locatorPort; + + @Before + public void init() { + MemberVM locator0 = clusterStartupRule.startLocatorVM(0); + locatorPort = locator0.getPort(); + servers = new ArrayList<>(); + clients = new ArrayList<>(); + + Properties serverProp = new Properties(); + serverProp.setProperty("off-heap-memory-size", "500m"); + IntStream.range(1, 5).forEach(serverNum -> servers + .add(clusterStartupRule.startServerVM(serverNum, serverProp, locatorPort))); + servers.forEach(server -> server.invoke(() -> { + DiskStoreFactoryImpl diskStoreFactory = + new DiskStoreFactoryImpl(ClusterStartupRule.getCache()); + diskStoreFactory.create(DISK_STORE_NAME); + ClusterStartupRule.getCache() + .createRegionFactory() + .setDataPolicy(DataPolicy.PERSISTENT_REPLICATE) + .setDiskStoreName(DISK_STORE_NAME) + .setDiskSynchronous(false) + .setOffHeap(true) + .create(REGION_NAME); + })); + System.out.println(LOG_PREFIX + "servers: " + servers); + + int port = locatorPort; + SerializableConsumerIF cacheSetup = cf -> { + cf.addPoolLocator("localhost", port); + // cf.setPoolReadTimeout(READ_TIMEOUT); + }; + + Properties clientProps = new Properties(); + + IntStream.range(5, 7).forEach(clientNum -> { + System.out.println(LOG_PREFIX + "clientNum: " + clientNum); + try { + clients.add(clusterStartupRule.startClientVM(clientNum, clientProps, cacheSetup)); + } catch (Exception e) { + System.out.println(LOG_PREFIX + "bad client: " + e); + } + System.out.println(LOG_PREFIX + "good client: " + clientNum); + }); + + clients.forEach(client -> client.invoke(() -> { + ClientCache clientCache = ClusterStartupRule.getClientCache(); + clientCache.createClientRegionFactory(ClientRegionShortcut.PROXY).create(REGION_NAME); + })); + } + + @Test + public void serverShutdownDoesNotTriggerInternalGemfireError() { + List> asyncInvocations = startPuts(); + + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + // disconnect server + String serverName = servers.get(1).invoke(() -> { + InternalDistributedSystem internalDistributedSystem = + ClusterStartupRule.getCache().getInternalDistributedSystem(); + String sName = internalDistributedSystem.getName(); + internalDistributedSystem.getCache().close(); + internalDistributedSystem.disconnect(); + return sName; + }); + + try { + Thread.sleep(4000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + int lPort = locatorPort; + + // re-create the cache on the server + servers.get(1).invoke(() -> { + Properties properties = new Properties(); + properties.put("name", serverName); + properties.put("locators", "localhost[" + lPort + "]"); + new CacheFactory(properties).create(); + }); + + try { + Thread.sleep(8000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + GeodeAwaitility.await() + .until(() -> asyncInvocations.stream().allMatch(AsyncInvocation::isDone)); + + for (AsyncInvocation asyncInvocation : asyncInvocations) { + Assertions.assertThatNoException().isThrownBy(asyncInvocation::get); + } + + System.out.println(LOG_PREFIX + "asyncInvocations complete: " + asyncInvocations); + } + + private List> startPuts() { + System.out.println(LOG_PREFIX + "clients: " + clients); + + return clients.stream().map(client -> client.invokeAsync(() -> { + ClientCache clientCache = ClusterStartupRule.getClientCache(); + int processId = clientCache.getDistributedSystem().getDistributedMember().getProcessId(); + System.out + .println(LOG_PREFIX + " readTimeout: " + clientCache.getDefaultPool().getReadTimeout()); + Region region = clientCache.getRegion(DiskEntryDunitTest.REGION_NAME); + int keyNum = 0; + long run_until = System.currentTimeMillis() + 20000; + + System.out.println(LOG_PREFIX + ": start " + "[" + processId + "]"); + do { + keyNum += 10; + int mapKeyNum = keyNum; + Map putMap = new HashMap<>(); + IntStream.range(0, 9).forEach(num -> { + String mapKey = "_key_" + (mapKeyNum + num); + putMap.put(mapKey, (mapKeyNum + num)); + }); + try { + region.putAll(putMap); + } catch (Throwable unexpected) { + // Report the unexpected exception and stop doing operations. + System.out.println( + LOG_PREFIX + ": exception keyNum: " + keyNum + " [" + processId + "] " + unexpected); + throw unexpected; + } + } while (System.currentTimeMillis() < run_until); + System.out.println(LOG_PREFIX + ": finished keyNum: " + keyNum + " [" + processId + "]" + + " [region: " + DiskEntryDunitTest.REGION_NAME + "]"); + })).collect(Collectors.toList()); + } +} diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/LocalDataSetIndexingDUnitTest.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/LocalDataSetIndexingDUnitTest.java index 3c13fa1d7c95..373da16f9e4f 100644 --- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/LocalDataSetIndexingDUnitTest.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/execute/LocalDataSetIndexingDUnitTest.java @@ -140,7 +140,7 @@ public void execute(FunctionContext context) { try { RegionFunctionContext rContext = (RegionFunctionContext) context; Region pr1 = rContext.getDataSet(); - LocalDataSet localCust = + LocalDataSet localCust = (LocalDataSet) PartitionRegionHelper.getLocalDataForContext(rContext); Map> colocatedRegions = PartitionRegionHelper.getColocatedRegions(pr1); diff --git a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/functions/LocalDataSetFunction.java b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/functions/LocalDataSetFunction.java index 82226479049b..5e24b1f03dbd 100755 --- a/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/functions/LocalDataSetFunction.java +++ b/geode-core/src/distributedTest/java/org/apache/geode/internal/cache/functions/LocalDataSetFunction.java @@ -20,14 +20,14 @@ import java.util.Set; import org.apache.geode.cache.Region; -import org.apache.geode.cache.execute.FunctionAdapter; +import org.apache.geode.cache.execute.Function; import org.apache.geode.cache.execute.FunctionContext; import org.apache.geode.cache.execute.RegionFunctionContext; import org.apache.geode.cache.partition.PartitionRegionHelper; import org.apache.geode.internal.Assert; import org.apache.geode.internal.cache.LocalDataSet; -public class LocalDataSetFunction extends FunctionAdapter { +public class LocalDataSetFunction implements Function { private final boolean optimizeForWrite; @@ -36,27 +36,30 @@ public LocalDataSetFunction(boolean optimizeForWrite) { } @Override - public void execute(FunctionContext context) { - RegionFunctionContext rContext = (RegionFunctionContext) context; - Region cust = rContext.getDataSet(); - LocalDataSet localCust = (LocalDataSet) PartitionRegionHelper.getLocalDataForContext(rContext); + public void execute(FunctionContext context) { + RegionFunctionContext rContext = (RegionFunctionContext) context; + Region cust = rContext.getDataSet(); + LocalDataSet localCust = + (LocalDataSet) PartitionRegionHelper.getLocalDataForContext(rContext); Map> colocatedRegions = PartitionRegionHelper.getColocatedRegions(cust); Map> localColocatedRegions = PartitionRegionHelper.getLocalColocatedRegions(rContext); Assert.assertTrue(colocatedRegions.size() == 2); - Set custKeySet = cust.keySet(); - Set localCustKeySet = localCust.keySet(); - - Region ord = colocatedRegions.get(SEPARATOR + "OrderPR"); - LocalDataSet localOrd = (LocalDataSet) localColocatedRegions.get(SEPARATOR + "OrderPR"); - Set ordKeySet = ord.keySet(); - Set localOrdKeySet = localOrd.keySet(); - - Region ship = colocatedRegions.get(SEPARATOR + "ShipmentPR"); - LocalDataSet localShip = (LocalDataSet) localColocatedRegions.get(SEPARATOR + "ShipmentPR"); - Set shipKeySet = ship.keySet(); - Set localShipKeySet = localShip.keySet(); + Set custKeySet = cust.keySet(); + Set localCustKeySet = localCust.keySet(); + + Region ord = colocatedRegions.get(SEPARATOR + "OrderPR"); + LocalDataSet localOrd = + (LocalDataSet) localColocatedRegions.get(SEPARATOR + "OrderPR"); + Set ordKeySet = ord.keySet(); + Set localOrdKeySet = localOrd.keySet(); + + Region ship = colocatedRegions.get(SEPARATOR + "ShipmentPR"); + LocalDataSet localShip = + (LocalDataSet) localColocatedRegions.get(SEPARATOR + "ShipmentPR"); + Set shipKeySet = ship.keySet(); + Set localShipKeySet = localShip.keySet(); Assert.assertTrue(localCust.getBucketSet().size() == localOrd.getBucketSet().size()); Assert.assertTrue(localCust.getBucketSet().size() == localShip.getBucketSet().size()); diff --git a/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/ExecutionContextIntegrationTest.java b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/ExecutionContextIntegrationTest.java index 974567dd4f73..589bca5cbafb 100644 --- a/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/ExecutionContextIntegrationTest.java +++ b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/ExecutionContextIntegrationTest.java @@ -291,7 +291,7 @@ public void testCurrScopeDpndntItrsBasedOnSingleIndpndntItr() throws Exception { .isEqualTo(rIter.getInternalId()); } - List list1 = context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(indItr); + List list1 = context.getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator(indItr); assertThat(list1.size()) .as("The dependency set returned incorrect result with size =" + list1.size()) .isEqualTo(4); diff --git a/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexManagerIntegrationTest.java b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexManagerIntegrationTest.java index 4cfd145760b6..44207719b7f3 100644 --- a/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexManagerIntegrationTest.java +++ b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexManagerIntegrationTest.java @@ -17,6 +17,7 @@ import static org.apache.geode.cache.Region.SEPARATOR; import static org.apache.geode.cache.RegionShortcut.PARTITION; import static org.apache.geode.test.awaitility.GeodeAwaitility.await; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatCode; @@ -90,7 +91,7 @@ private Object[] getRegionAndIndexMaintenanceTypes() { }; } - private void waitForIndexUpdaterTask(boolean synchronousMaintenance, Region region) { + private void waitForIndexUpdaterTask(boolean synchronousMaintenance, Region region) { if (!synchronousMaintenance) { InternalRegion internalRegion = (InternalRegion) region; await().untilAsserted( @@ -199,8 +200,8 @@ public void indexShouldBeMarkedAsInvalidWhenAddMappingOperationFailsAfterEntryDe if (!PARTITION.equals(regionShortcut)) { ((RangeIndex) index).valueToEntriesMap.clear(); } else { - @SuppressWarnings("unchecked") - List bucketRangeIndexList = ((PartitionedIndex) index).getBucketIndexes(); + List bucketRangeIndexList = + uncheckedCast(((PartitionedIndex) index).getBucketIndexes()); bucketRangeIndexList.forEach(rangeIndex -> rangeIndex.valueToEntriesMap.clear()); } } diff --git a/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexUseJUnitTest.java b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexUseJUnitTest.java index e16615169abc..ebbec78d517e 100644 --- a/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexUseJUnitTest.java +++ b/geode-core/src/integrationTest/java/org/apache/geode/cache/query/internal/index/IndexUseJUnitTest.java @@ -1398,14 +1398,14 @@ public void testIndexesRemainInUseAfterARebalance() throws Exception { // Get the first index entry in the PartitionedIndex bucketIndexes and delete the index from it // (to simulate what happens when a bucket is moved) - Map.Entry> firstIndexEntry = index.getFirstBucketIndex(); + Map.Entry, List> firstIndexEntry = index.getFirstBucketIndex(); assertTrue(!firstIndexEntry.getValue().isEmpty()); index.removeFromBucketIndexes(firstIndexEntry.getKey(), firstIndexEntry.getValue().iterator().next()); // Verify the index was removed from the entry and the entry was removed from the bucket indexes assertTrue(firstIndexEntry.getValue().isEmpty()); - Map.Entry> nextFirstIndexEntry = index.getFirstBucketIndex(); + Map.Entry, List> nextFirstIndexEntry = index.getFirstBucketIndex(); assertTrue(!nextFirstIndexEntry.getValue().isEmpty()); // Run query again diff --git a/geode-core/src/integrationTest/java/org/apache/geode/distributed/DistributedSystemIntegrationTest.java b/geode-core/src/integrationTest/java/org/apache/geode/distributed/DistributedSystemIntegrationTest.java index 8075cb5da276..44cc049fc9d6 100755 --- a/geode-core/src/integrationTest/java/org/apache/geode/distributed/DistributedSystemIntegrationTest.java +++ b/geode-core/src/integrationTest/java/org/apache/geode/distributed/DistributedSystemIntegrationTest.java @@ -20,9 +20,13 @@ import java.io.File; import java.io.FileWriter; +import java.net.InetAddress; import java.net.URL; +import java.net.UnknownHostException; import java.util.Properties; +import java.util.Set; +import org.junit.After; import org.junit.Rule; import org.junit.Test; import org.junit.contrib.java.lang.system.RestoreSystemProperties; @@ -30,6 +34,7 @@ import org.junit.rules.TemporaryFolder; import org.junit.rules.TestName; +import org.apache.geode.internal.inet.LocalHostUtil; import org.apache.geode.test.junit.categories.MembershipTest; /** @@ -38,6 +43,8 @@ @Category(MembershipTest.class) public class DistributedSystemIntegrationTest { + private DistributedSystem system; + @Rule public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties(); @@ -47,6 +54,13 @@ public class DistributedSystemIntegrationTest { @Rule public TestName testName = new TestName(); + @After + public void tearDown() { + if (system != null) { + system.disconnect(); + } + } + @Test public void getPropertiesFileShouldUsePathInSystemProperty() throws Exception { File propertiesFile = temporaryFolder.newFile("test.properties"); @@ -92,4 +106,15 @@ public void getSecurityPropertiesFileUrlShouldUsePathInSystemProperty() throws E assertThat(value).isEqualTo(expectedPropertiesURL); } + + @Test + public void findDistributedMembersForLocalHostReturnsOneMember() throws UnknownHostException { + Properties properties = new Properties(); + system = DistributedSystem.connect(properties); + InetAddress localHost = LocalHostUtil.getLocalHost(); + + Set members = system.findDistributedMembers(localHost); + + assertThat(members).hasSize(1); + } } diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/OverflowOplogFlushTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/OverflowOplogFlushTest.java index 4d36c843db4a..3ad4fa9ea59c 100644 --- a/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/OverflowOplogFlushTest.java +++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/cache/OverflowOplogFlushTest.java @@ -15,228 +15,265 @@ package org.apache.geode.internal.cache; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.spy; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; +import java.nio.channels.FileLock; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.concurrent.atomic.AtomicInteger; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TestName; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - /** * Testing recovery from failures writing OverflowOplog entries */ public class OverflowOplogFlushTest extends DiskRegionTestingBase { + @Test + public void testAsyncChannelWriteRetriesOnFailureDuringFlush() throws Exception { + OverflowOplog oplog = getOverflowOplog(); + int numberOfWriteFailures = 1; + doChannelFlushWithFailures(oplog, numberOfWriteFailures); + } - @Rule - public ExpectedException expectedException = ExpectedException.none(); - - // How many times to fake the write failures - private int nFakeChannelWrites = 0; - private OverflowOplog ol = null; - private ByteBuffer bb1 = null; - private ByteBuffer bb2 = null; - private final ByteBuffer[] bbArray = new ByteBuffer[2]; - private FileChannel ch; - private FileChannel spyCh; - - @Rule - public TestName name = new TestName(); - - class FakeChannelWriteBB implements Answer { - - @Override - public Integer answer(InvocationOnMock invocation) throws Throwable { - return fakeWriteBB(ol, bb1); - } + @Test + public void testChannelWriteRetriesOnFailureDuringFlush() throws Exception { + OverflowOplog oplog = getOverflowOplog(); + int numberOfWriteFailures = 1; + doChannelFlushWithFailures(oplog, numberOfWriteFailures); } - private int fakeWriteBB(OverflowOplog ol, ByteBuffer bb) throws IOException { - if (nFakeChannelWrites > 0) { - bb.position(bb.limit()); - --nFakeChannelWrites; - return 0; - } - doCallRealMethod().when(spyCh).write(bb); - return spyCh.write(bb); + @Test + public void testChannelRecoversFromWriteFailureRepeatedRetriesDuringFlush() throws Exception { + OverflowOplog oplog = getOverflowOplog(); + int numberOfWriteFailures = 3; + doChannelFlushWithFailures(oplog, numberOfWriteFailures); } - private void verifyBB(ByteBuffer bb, byte[] src) { - bb.flip(); - for (int i = 0; i < src.length; ++i) { - assertEquals("Channel contents does not match expected at index " + i, src[i], bb.get()); - } + @Test + public void testOplogFlushThrowsIOExceptionWhenNumberOfChannelWriteRetriesExceedsLimit() { + OverflowOplog oplog = getOverflowOplog(); + int numberOfFailures = 6; // exceeds the retry limit in Oplog + assertThatThrownBy(() -> doChannelFlushWithFailures(oplog, numberOfFailures)) + .isInstanceOf(IOException.class); } - class FakeChannelWriteArrayBB implements Answer { - @Override - public Long answer(InvocationOnMock invocation) throws Throwable { - System.out.println("### in FakeChannelWriteArrayBB.answer :"); - return fakeWriteArrayBB(bbArray); - } + @Test + public void testOverflowOplogByteArrayFlush() throws Exception { + OverflowOplog oplog = getOverflowOplog(); + doPartialChannelByteArrayFlushForOverflowOpLog(oplog); } - /** - * This method tries to write half of the byte buffer to the channel. - */ - private long fakeWriteArrayBB(ByteBuffer[] bbArray) throws IOException { - nFakeChannelWrites++; - for (ByteBuffer b : bbArray) { - int numFakeWrite = b.limit() / 2; - if (b.position() <= 0) { - b.position(numFakeWrite); - return numFakeWrite; - } else if (b.position() == numFakeWrite) { - b.position(b.limit()); - return b.limit() - numFakeWrite; - } - } - return 0; + private OverflowOplog getOverflowOplog() { + DiskRegionProperties props = new DiskRegionProperties(); + props.setOverFlowCapacity(1); + region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props); + region.put("K1", "v1"); // add two entries to make it overflow + region.put("K2", "v2"); + DiskRegion dr = ((LocalRegion) region).getDiskRegion(); + OverflowOplog oplog = dr.getDiskStore().overflowOplogs.getActiveOverflowOplog(); + assertThat(oplog) + .as("oplog") + .isNotNull(); + return oplog; } private void doChannelFlushWithFailures(OverflowOplog oplog, int numFailures) throws IOException { - nFakeChannelWrites = numFailures; - ol = oplog; - ch = ol.getFileChannel(); - spyCh = spy(ch); - ol.testSetCrfChannel(spyCh); + AtomicInteger numberOfRemainingFailures = new AtomicInteger(numFailures); + FileChannel fileChannelThatFails = new FileChannelWrapper(oplog.getFileChannel()) { + @Override + public int write(ByteBuffer buffer) throws IOException { + if (numberOfRemainingFailures.get() > 0) { + // Force channel.write() failure + buffer.position(buffer.limit()); + numberOfRemainingFailures.getAndDecrement(); + return 0; + } + return delegate.write(buffer); + } + }; + oplog.testSetCrfChannel(fileChannelThatFails); byte[] entry1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}; byte[] entry2 = {100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}; - bb1 = ol.getWriteBuf(); + ByteBuffer oplogWriteBuffer = oplog.getWriteBuf(); try { - // Force channel.write() failures when writing the first entry - doAnswer(new FakeChannelWriteBB()).when(spyCh).write(bb1); - long chStartPos = ol.getFileChannel().position(); - bb1.clear(); - bb1.put(entry1); - ol.flush(); + FileChannel fileChannel = oplog.getFileChannel(); + long chStartPos = fileChannel.position(); + oplogWriteBuffer.clear(); + oplogWriteBuffer.put(entry1); + oplog.flush(); // Write the 2nd entry without forced channel failures - nFakeChannelWrites = 0; - bb1 = ol.getWriteBuf(); - bb1.clear(); - bb1.put(entry2); - ol.flush(); - long chEndPos = ol.getFileChannel().position(); - assertEquals("Change in channel position does not equal the size of the data flushed", - entry1.length + entry2.length, chEndPos - chStartPos); + numberOfRemainingFailures.set(0); + oplogWriteBuffer = oplog.getWriteBuf(); + oplogWriteBuffer.clear(); + oplogWriteBuffer.put(entry2); + oplog.flush(); + long chEndPos = fileChannel.position(); + assertThat(chEndPos - chStartPos) + .as("change in channel position") + .isEqualTo(entry1.length + entry2.length); ByteBuffer dst = ByteBuffer.allocateDirect(entry1.length); - ol.getFileChannel().position(chStartPos); - ol.getFileChannel().read(dst); + fileChannel.position(chStartPos); + fileChannel.read(dst); verifyBB(dst, entry1); } finally { region.destroyRegion(); } } - @Test - public void testAsyncChannelWriteRetriesOnFailureDuringFlush() throws Exception { - DiskRegionProperties props = new DiskRegionProperties(); - props.setOverFlowCapacity(1); - region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props); - region.put("K1", "v1"); // add two entries to make it overflow - region.put("K2", "v2"); - DiskRegion dr = ((LocalRegion) region).getDiskRegion(); - OverflowOplog oplog = dr.getDiskStore().overflowOplogs.getActiveOverflowOplog(); - assertNotNull("Unexpected null Oplog for " + dr.getName(), oplog); - doChannelFlushWithFailures(oplog, 1 /* write failure */); - } - - @Test - public void testChannelWriteRetriesOnFailureDuringFlush() throws Exception { - DiskRegionProperties props = new DiskRegionProperties(); - props.setOverFlowCapacity(1); - region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props); - region.put("K1", "v1"); // add two entries to make it overflow - region.put("K2", "v2"); - DiskRegion dr = ((LocalRegion) region).getDiskRegion(); - OverflowOplog oplog = dr.getDiskStore().overflowOplogs.getActiveOverflowOplog(); - assertNotNull("Unexpected null Oplog for " + dr.getName(), oplog); - doChannelFlushWithFailures(oplog, 1 /* write failure */); - } - - @Test - public void testChannelRecoversFromWriteFailureRepeatedRetriesDuringFlush() throws Exception { - DiskRegionProperties props = new DiskRegionProperties(); - props.setOverFlowCapacity(1); - region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props); - region.put("K1", "v1"); // add two entries to make it overflow - region.put("K2", "v2"); - DiskRegion dr = ((LocalRegion) region).getDiskRegion(); - OverflowOplog oplog = dr.getDiskStore().overflowOplogs.getActiveOverflowOplog(); - assertNotNull("Unexpected null Oplog for " + dr.getName(), oplog); - - doChannelFlushWithFailures(oplog, 3 /* write failures */); - } - - @Test - public void testOplogFlushThrowsIOExceptioniWhenNumberOfChannelWriteRetriesExceedsLimit() - throws Exception { - expectedException.expect(IOException.class); - DiskRegionProperties props = new DiskRegionProperties(); - props.setOverFlowCapacity(1); - region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props); - region.put("K1", "v1"); // add two entries to make it overflow - region.put("K2", "v2"); - DiskRegion dr = ((LocalRegion) region).getDiskRegion(); - OverflowOplog oplog = dr.getDiskStore().overflowOplogs.getActiveOverflowOplog(); - assertNotNull("Unexpected null Oplog for " + dr.getName(), oplog); - - doChannelFlushWithFailures(oplog, 6 /* exceeds the retry limit in Oplog */); - } - private void doPartialChannelByteArrayFlushForOverflowOpLog(OverflowOplog oplog) throws IOException { - OverflowOplog ol = oplog; - FileChannel ch = ol.getFileChannel(); - FileChannel spyCh = spy(ch); - ol.testSetCrfChannel(spyCh); + AtomicInteger numberOfFakeWrites = new AtomicInteger(); + FileChannel fileChannelThatFails = new FileChannelWrapper(oplog.getFileChannel()) { + // Pretend to write partial data from each buffer. + @Override + public long write(ByteBuffer[] buffers, int offset, int length) { + numberOfFakeWrites.incrementAndGet(); + for (ByteBuffer buffer : buffers) { + int bufferPosition = buffer.position(); + int bufferLimit = buffer.limit(); + int halfOfBufferLimit = bufferLimit / 2; + if (bufferPosition <= 0) { + buffer.position(halfOfBufferLimit); + return halfOfBufferLimit; + } else if (bufferPosition == halfOfBufferLimit) { + buffer.position(bufferLimit); + return bufferLimit - halfOfBufferLimit; + } + } + return 0; + } + }; + oplog.testSetCrfChannel(fileChannelThatFails); byte[] entry1 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}; byte[] entry2 = {100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}; - bbArray[0] = bb1 = ByteBuffer.allocate(entry1.length).put(entry1); - bbArray[1] = bb2 = ByteBuffer.allocate(entry2.length).put(entry2); + ByteBuffer entry1Buffer = ByteBuffer.allocate(entry1.length).put(entry1); + ByteBuffer entry2Buffer = ByteBuffer.allocate(entry2.length).put(entry2); try { - // Set fake channel, that pretends to write partial data. - doAnswer(new FakeChannelWriteArrayBB()).when(spyCh).write(bbArray); - - bb2.flip(); - ol.flush(bb1, bb2); - assertEquals("Incomplete flush calls.", 4, nFakeChannelWrites); + entry2Buffer.flip(); + oplog.flush(entry1Buffer, entry2Buffer); + assertThat(numberOfFakeWrites) + .as("number of incomplete flush calls") + .hasValue(4); } finally { region.destroyRegion(); } } - @Test - public void testOverflowOplogByteArrayFlush() throws Exception { - DiskRegionProperties props = new DiskRegionProperties(); - props.setOverFlowCapacity(1); - region = DiskRegionHelperFactory.getSyncOverFlowOnlyRegion(cache, props); - region.put("K1", "v1"); - region.put("K2", "v2"); + private static void verifyBB(ByteBuffer bb, byte[] src) { + bb.flip(); + for (int i = 0; i < src.length; ++i) { + assertThat(bb.get()) + .as("byte expected at position " + i) + .isEqualTo(src[i]); + } + } - DiskRegion dr = ((LocalRegion) region).getDiskRegion(); - OverflowOplog oplog = dr.getDiskStore().overflowOplogs.getActiveOverflowOplog(); - assertNotNull("Unexpected null Oplog", oplog); + static class FileChannelWrapper extends FileChannel { + protected final FileChannel delegate; - doPartialChannelByteArrayFlushForOverflowOpLog(oplog); + FileChannelWrapper(FileChannel delegate) { + this.delegate = delegate; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return delegate.read(dst); + } + + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + return delegate.read(dsts, offset, length); + } + + @Override + public int write(ByteBuffer src) throws IOException { + return delegate.read(src); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + return delegate.write(srcs, offset, length); + } + + @Override + public long position() throws IOException { + return delegate.position(); + } + + @Override + public FileChannel position(long newPosition) throws IOException { + return delegate.position(newPosition); + } + + @Override + public long size() throws IOException { + return delegate.size(); + } + + @Override + public FileChannel truncate(long size) throws IOException { + return delegate.truncate(size); + } + + @Override + public void force(boolean metaData) throws IOException { + delegate.force(metaData); + } + + @Override + public long transferTo(long position, long count, WritableByteChannel target) + throws IOException { + return delegate.transferTo(position, count, target); + } + + @Override + public long transferFrom(ReadableByteChannel src, long position, long count) + throws IOException { + return delegate.transferFrom(src, position, count); + } + + @Override + public int read(ByteBuffer dst, long position) throws IOException { + return delegate.read(dst, position); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + return delegate.write(src, position); + } + + @Override + public MappedByteBuffer map(MapMode mode, long position, long size) throws IOException { + return delegate.map(mode, position, size); + } + + @Override + public FileLock lock(long position, long size, boolean shared) throws IOException { + return delegate.lock(position, size, shared); + } + + @Override + public FileLock tryLock(long position, long size, boolean shared) throws IOException { + return delegate.tryLock(position, size, shared); + } + + @Override + protected void implCloseChannel() throws IOException { + delegate.close(); + } } } diff --git a/geode-core/src/integrationTest/java/org/apache/geode/internal/util/ProductVersionUtilTest.java b/geode-core/src/integrationTest/java/org/apache/geode/internal/util/ProductVersionUtilTest.java index 24a37fd83e64..5fa8b2ce5dae 100644 --- a/geode-core/src/integrationTest/java/org/apache/geode/internal/util/ProductVersionUtilTest.java +++ b/geode-core/src/integrationTest/java/org/apache/geode/internal/util/ProductVersionUtilTest.java @@ -45,7 +45,7 @@ public void getComponentVersionsReturnsGeodeVersionAndFakeVersion() { @Test public void appendFullVersionAppendsGeodeVersionAndFakeVersion() throws IOException { assertThat(ProductVersionUtil.appendFullVersion(new StringBuilder())) - .contains("Apache Geode") + .contains("VMware Tanzu GemFire") .contains("Source-Revision") .contains("Build-Id") .contains("Fake Distribution") @@ -55,7 +55,7 @@ public void appendFullVersionAppendsGeodeVersionAndFakeVersion() throws IOExcept @Test public void getFullVersionContainsGeodeVersionAndFakeVersion() { assertThat(ProductVersionUtil.getFullVersion()) - .contains("Apache Geode") + .contains("VMware Tanzu GemFire") .contains("Source-Revision") .contains("Build-Id") .contains("Fake Distribution") diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/excludedClasses.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/excludedClasses.txt index 835662d7a827..cd6336d86f50 100644 --- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/excludedClasses.txt +++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/excludedClasses.txt @@ -73,7 +73,7 @@ org/apache/geode/security/ResourcePermission org/apache/geode/security/ResourcePermission$Operation org/apache/geode/security/ResourcePermission$Resource org/apache/geode/security/ResourcePermission$Target -org/apache/geode/internal/cache/PartitionedRegion$6 +org/apache/geode/internal/cache/PartitionedRegion$3 org/apache/geode/internal/cache/TXFarSideCMTracker$2 org/apache/geode/internal/cache/TXManagerImpl$1 org/apache/geode/internal/cache/partitioned/FetchEntriesMessage$FetchEntriesResponse$1 diff --git a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt index 84309aac4f83..7402058985f0 100644 --- a/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt +++ b/geode-core/src/integrationTest/resources/org/apache/geode/codeAnalysis/sanctionedDataSerializables.txt @@ -791,8 +791,8 @@ fromData,46 toData,43 org/apache/geode/internal/cache/CreateRegionProcessor$CreateRegionReplyMessage,2 -fromData,164 -toData,208 +fromData,166 +toData,205 org/apache/geode/internal/cache/DestroyOperation$DestroyMessage,2 fromData,46 @@ -861,14 +861,14 @@ fromData,26 toData,23 org/apache/geode/internal/cache/DistributedPutAllOperation$EntryVersionsList,2 -fromData,272 -toData,292 +fromData,275 +toData,298 org/apache/geode/internal/cache/DistributedPutAllOperation$PutAllEntryData,1 toData,252 org/apache/geode/internal/cache/DistributedPutAllOperation$PutAllMessage,2 -fromData,214 +fromData,194 toData,188 org/apache/geode/internal/cache/DistributedRegionFunctionStreamingMessage,2 @@ -1287,7 +1287,7 @@ fromData,22 toData,19 org/apache/geode/internal/cache/execute/FunctionRemoteContext,2 -fromData,187 +fromData,193 toData,165 org/apache/geode/internal/cache/ha/HARegionQueue$DispatchedAndCurrentEvents,2 @@ -1535,7 +1535,7 @@ fromData,1 toData,1 org/apache/geode/internal/cache/partitioned/OfflineMemberDetailsImpl,2 -fromData,97 +fromData,103 toData,94 org/apache/geode/internal/cache/partitioned/PRLoad,2 @@ -1588,7 +1588,7 @@ toData,55 org/apache/geode/internal/cache/partitioned/QueryMessage,2 fromData,64 -toData,64 +toData,61 org/apache/geode/internal/cache/partitioned/RegionAdvisor$BucketProfileAndId,2 fromData,61 diff --git a/geode-core/src/main/java/org/apache/geode/DataSerializer.java b/geode-core/src/main/java/org/apache/geode/DataSerializer.java index 5690d0c1f650..66b829c32741 100644 --- a/geode-core/src/main/java/org/apache/geode/DataSerializer.java +++ b/geode-core/src/main/java/org/apache/geode/DataSerializer.java @@ -2078,25 +2078,7 @@ public static byte[][] readArrayOfByteArrays(DataInput in) * @see #readArrayList */ public static void writeArrayList(ArrayList list, DataOutput out) throws IOException { - - InternalDataSerializer.checkOut(out); - - int size; - if (list == null) { - size = -1; - } else { - size = list.size(); - } - InternalDataSerializer.writeArrayLength(size, out); - if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) { - logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing ArrayList with {} elements: {}", size, - list); - } - if (size > 0) { - for (int i = 0; i < size; i++) { - writeObject(list.get(i), out); - } - } + InternalDataSerializer.writeList(list, out); } @@ -3396,7 +3378,7 @@ public void setContext(Object/* ClientProxyMembershipID */ context) { * maps a class to its enum constants. */ @MakeNotStatic - private static final ConcurrentMap, Enum[]> knownEnums = + private static final ConcurrentMap>, Enum[]> knownEnums = new ConcurrentHashMap<>(); /** @@ -3406,7 +3388,7 @@ public void setContext(Object/* ClientProxyMembershipID */ context) { * @return enum constants for the given class */ @SuppressWarnings("unchecked") - private static E[] getEnumConstantsForClass(Class clazz) { + private static > E[] getEnumConstantsForClass(Class clazz) { E[] returnVal = (E[]) knownEnums.get(clazz); if (returnVal == null) { returnVal = clazz.getEnumConstants(); @@ -3429,7 +3411,7 @@ private static E[] getEnumConstantsForClass(Class clazz) { * @since GemFire 6.5 * @throws IOException if a problem occurs while writing to out */ - public static void writeEnum(Enum e, DataOutput out) throws IOException { + public static void writeEnum(Enum e, DataOutput out) throws IOException { InternalDataSerializer.checkOut(out); diff --git a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/ParallelAsyncEventQueueImpl.java b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/ParallelAsyncEventQueueImpl.java index 4afb51d8724e..44e98b6f9af3 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/ParallelAsyncEventQueueImpl.java +++ b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/ParallelAsyncEventQueueImpl.java @@ -154,7 +154,6 @@ public String toString() { @Override public void fillInProfile(Profile profile) { - assert profile instanceof GatewaySenderProfile; GatewaySenderProfile pf = (GatewaySenderProfile) profile; pf.Id = getId(); pf.remoteDSId = getRemoteDSId(); @@ -181,8 +180,7 @@ public void fillInProfile(Profile profile) { @Override public void setModifiedEventId(EntryEventImpl clonedEvent) { - int bucketId = -1; - // merged from 42004 + final int bucketId; if (clonedEvent.getRegion() instanceof DistributedRegion) { bucketId = PartitionedRegionHelper.getHashKey(clonedEvent.getKey(), getMaxParallelismForReplicatedRegion()); diff --git a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/SerialAsyncEventQueueImpl.java b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/SerialAsyncEventQueueImpl.java index 1713feff76aa..9995430df600 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/SerialAsyncEventQueueImpl.java +++ b/geode-core/src/main/java/org/apache/geode/cache/asyncqueue/internal/SerialAsyncEventQueueImpl.java @@ -248,9 +248,7 @@ public void setModifiedEventId(EntryEventImpl clonedEvent) { EventID originalEventId = clonedEvent.getEventId(); long originalThreadId = originalEventId.getThreadID(); long newThreadId = originalThreadId; - if (ThreadIdentifier.isWanTypeThreadID(newThreadId)) { - // This thread id has already been converted. Do nothing. - } else { + if (!ThreadIdentifier.isWanTypeThreadID(newThreadId)) { newThreadId = ThreadIdentifier.createFakeThreadIDForParallelGSPrimaryBucket(0, originalThreadId, getEventIdIndex()); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java index 4ab8ff51712f..4b4df5514be1 100755 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientMetadataService.java @@ -14,8 +14,6 @@ */ package org.apache.geode.cache.client.internal; -import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; - import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -98,11 +96,13 @@ public ClientMetadataService(Cache cache) { private final Map> colocatedPRAdvisors = new ConcurrentHashMap<>(); - private PartitionResolver getResolver(Region r, Object key, Object callbackArgument) { + @SuppressWarnings("unchecked") + private PartitionResolver getResolver(Region r, Object key, + Object callbackArgument) { // First choice is one associated with the region final String regionFullPath = r.getFullPath(); ClientPartitionAdvisor advisor = getClientPartitionAdvisor(regionFullPath); - PartitionResolver result = null; + PartitionResolver result = null; if (advisor != null) { result = advisor.getPartitionResolver(); } @@ -112,35 +112,36 @@ private PartitionResolver getResolver(Region r, Object key, Object callbackArgum } // Second is the key - if (key != null && key instanceof PartitionResolver) { - return (PartitionResolver) key; + if (key instanceof PartitionResolver) { + return (PartitionResolver) key; } // Third is the callback argument - if (callbackArgument != null && callbackArgument instanceof PartitionResolver) { - return (PartitionResolver) callbackArgument; + if (callbackArgument instanceof PartitionResolver) { + return (PartitionResolver) callbackArgument; } // There is no resolver. return null; } - public ServerLocation getBucketServerLocation(Region region, Operation operation, Object key, - Object value, Object callbackArg) { + public ServerLocation getBucketServerLocation(Region region, Operation operation, + K key, + V value, Object callbackArg) { ClientPartitionAdvisor prAdvisor = getClientPartitionAdvisor(region.getFullPath()); if (prAdvisor == null) { return null; } int totalNumberOfBuckets = prAdvisor.getTotalNumBuckets(); - final PartitionResolver resolver = getResolver(region, key, callbackArg); + final PartitionResolver resolver = getResolver(region, key, callbackArg); Object resolveKey; - EntryOperation entryOp = null; + EntryOperation entryOp = null; if (resolver == null) { // client has not registered PartitionResolver // Assuming even PR at server side is not using PartitionResolver resolveKey = key; } else { - entryOp = new EntryOperationImpl(region, operation, key, value, callbackArg); + entryOp = new EntryOperationImpl<>(region, operation, key, value, callbackArg); resolveKey = resolver.getRoutingObject(entryOp); if (resolveKey == null) { throw new IllegalStateException( @@ -149,18 +150,14 @@ public ServerLocation getBucketServerLocation(Region region, Operation operation } int bucketId; if (resolver instanceof FixedPartitionResolver) { - if (entryOp == null) { - entryOp = new EntryOperationImpl(region, Operation.FUNCTION_EXECUTION, key, null, null); - } - String partition = ((FixedPartitionResolver) resolver).getPartitionName(entryOp, + String partition = ((FixedPartitionResolver) resolver).getPartitionName(entryOp, prAdvisor.getFixedPartitionNames()); if (partition == null) { - Object[] prms = new Object[] {region.getName(), resolver}; throw new IllegalStateException( String.format("For region %s, partition resolver %s returned partition name null", - prms)); + region.getName(), resolver)); } else { - bucketId = prAdvisor.assignFixedBucketId(region, partition, resolveKey); + bucketId = prAdvisor.assignFixedBucketId(partition, resolveKey); if (bucketId == -1) { return null; } @@ -179,7 +176,7 @@ public ServerLocation getBucketServerLocation(Region region, Operation operation return location; } - private ServerLocation getServerLocation(Region region, Operation operation, int bucketId) { + private ServerLocation getServerLocation(Region region, Operation operation, int bucketId) { final String regionFullPath = region.getFullPath(); ClientPartitionAdvisor prAdvisor = getClientPartitionAdvisor(regionFullPath); if (prAdvisor == null) { @@ -198,23 +195,23 @@ private ServerLocation getServerLocation(Region region, Operation operation, int } } - public Map getServerToFilterMap(final Collection routingKeys, - final Region region, boolean primaryMembersNeeded) { + public Map> getServerToFilterMap(final Collection routingKeys, + final Region region, boolean primaryMembersNeeded) { return getServerToFilterMap(routingKeys, region, primaryMembersNeeded, false); } - public Map getServerToFilterMap(final Collection routingKeys, - final Region region, boolean primaryMembersNeeded, boolean bucketsAsFilter) { + public Map> getServerToFilterMap(final Collection routingKeys, + final Region region, boolean primaryMembersNeeded, boolean bucketsAsFilter) { final String regionFullPath = region.getFullPath(); ClientPartitionAdvisor prAdvisor = getClientPartitionAdvisor(regionFullPath); if (prAdvisor == null || prAdvisor.adviseRandomServerLocation() == null) { scheduleGetPRMetaData((InternalRegion) region, false); return null; } - Map bucketToKeysMap = + Map> bucketToKeysMap = groupByBucketOnClientSide(region, prAdvisor, routingKeys, bucketsAsFilter); - Map serverToKeysMap = new HashMap<>(); + Map> serverToKeysMap = new HashMap<>(); Map> serverToBuckets = groupByServerToBuckets(prAdvisor, bucketToKeysMap.keySet(), primaryMembersNeeded, region); @@ -227,15 +224,15 @@ public Map getServerToFilterMap(final Collection routingKey return null; } - for (Map.Entry entry : serverToBuckets.entrySet()) { - ServerLocation server = (ServerLocation) entry.getKey(); - Set buckets = uncheckedCast(entry.getValue()); + for (Map.Entry> entry : serverToBuckets.entrySet()) { + ServerLocation server = entry.getKey(); + Set buckets = entry.getValue(); for (Integer bucket : buckets) { // use LinkedHashSet to maintain the order of keys // the keys will be iterated several times - Set keys = serverToKeysMap.get(server); + Set keys = serverToKeysMap.get(server); if (keys == null) { - keys = new LinkedHashSet(); + keys = new LinkedHashSet<>(); } keys.addAll(bucketToKeysMap.get(bucket)); serverToKeysMap.put(server, keys); @@ -248,7 +245,7 @@ public Map getServerToFilterMap(final Collection routingKey return serverToKeysMap; } - public Map> groupByServerToAllBuckets(Region region, + public Map> groupByServerToAllBuckets(Region region, boolean primaryOnly) { final String regionFullPath = region.getFullPath(); ClientPartitionAdvisor prAdvisor = getClientPartitionAdvisor(regionFullPath); @@ -270,7 +267,7 @@ public Map> groupByServerToAllBuckets(Region region */ private Map> groupByServerToBuckets( ClientPartitionAdvisor prAdvisor, Set bucketSet, boolean primaryOnly, - Region region) { + Region region) { if (primaryOnly) { Map> serverToBucketsMap = new HashMap<>(); for (Integer bucketId : bucketSet) { @@ -284,12 +281,7 @@ private Map> groupByServerToBuckets( prAdvisor.adviseServerLocations(bucketId)); return null; } - Set buckets = serverToBucketsMap.get(server); - if (buckets == null) { - buckets = new HashSet<>(); // faster if this was an ArrayList - serverToBucketsMap.put(server, buckets); - } - buckets.add(bucketId); + serverToBucketsMap.computeIfAbsent(server, k -> new HashSet<>()).add(bucketId); } if (logger.isDebugEnabled()) { @@ -347,7 +339,7 @@ private Map> pruneNodes(ClientPartitionAdvisor prAd logger.debug("ClientMetadataService: The server to buckets map is : {}", serverToBucketsMap); } - ServerLocation randomFirstServer = null; + final ServerLocation randomFirstServer; if (serverToBucketsMap.isEmpty()) { return null; } else { @@ -424,19 +416,15 @@ private ServerLocation findNextServer(Set return null; } - private Map groupByBucketOnClientSide(Region region, - ClientPartitionAdvisor prAdvisor, Collection routingKeys, boolean bucketsAsFilter) { + private Map> groupByBucketOnClientSide(Region region, + ClientPartitionAdvisor prAdvisor, Collection routingKeys, boolean bucketsAsFilter) { - Map bucketToKeysMap = new HashMap<>(); + Map> bucketToKeysMap = new HashMap<>(); int totalNumberOfBuckets = prAdvisor.getTotalNumBuckets(); - for (final Object key : routingKeys) { + for (final K key : routingKeys) { int bucketId = bucketsAsFilter ? (Integer) key : extractBucketID(region, prAdvisor, totalNumberOfBuckets, key); - Set bucketKeys = bucketToKeysMap.get(bucketId); - if (bucketKeys == null) { - bucketKeys = new HashSet(); // faster if this was an ArrayList - bucketToKeysMap.put(bucketId, bucketKeys); - } + Set bucketKeys = bucketToKeysMap.computeIfAbsent(bucketId, k -> new HashSet<>()); bucketKeys.add(key); } if (logger.isDebugEnabled()) { @@ -445,18 +433,17 @@ private Map groupByBucketOnClientSide(Region region, return bucketToKeysMap; } - private int extractBucketID(Region region, ClientPartitionAdvisor prAdvisor, - int totalNumberOfBuckets, Object key) { - int bucketId = -1; - final PartitionResolver resolver = getResolver(region, key, null); - Object resolveKey; - EntryOperation entryOp = null; + private int extractBucketID(Region region, ClientPartitionAdvisor prAdvisor, + int totalNumberOfBuckets, K key) { + final PartitionResolver resolver = getResolver(region, key, null); + final Object resolveKey; + EntryOperation entryOp = null; if (resolver == null) { // client has not registered PartitionResolver // Assuming even PR at server side is not using PartitionResolver resolveKey = key; } else { - entryOp = new EntryOperationImpl(region, Operation.FUNCTION_EXECUTION, key, null, null); + entryOp = new EntryOperationImpl<>(region, Operation.FUNCTION_EXECUTION, key, null, null); resolveKey = resolver.getRoutingObject(entryOp); if (resolveKey == null) { throw new IllegalStateException( @@ -464,20 +451,17 @@ private int extractBucketID(Region region, ClientPartitionAdvisor prAdvisor, } } + final int bucketId; if (resolver instanceof FixedPartitionResolver) { - if (entryOp == null) { - entryOp = new EntryOperationImpl(region, Operation.FUNCTION_EXECUTION, key, null, null); - } - String partition = ((FixedPartitionResolver) resolver).getPartitionName(entryOp, + String partition = ((FixedPartitionResolver) resolver).getPartitionName(entryOp, prAdvisor.getFixedPartitionNames()); if (partition == null) { - Object[] prms = new Object[] {region.getName(), resolver}; throw new IllegalStateException( String.format("For region %s, partition resolver %s returned partition name null", - prms)); + region.getName(), resolver)); } else { - bucketId = prAdvisor.assignFixedBucketId(region, partition, resolveKey); - // This bucketid can be -1 in some circumstances where we don't have information about + bucketId = prAdvisor.assignFixedBucketId(partition, resolveKey); + // This bucketId can be -1 in some circumstances where we don't have information about // all the partition on the server. // Do proactive scheduling of metadata fetch if (bucketId == -1) { @@ -517,7 +501,7 @@ public void scheduleGetPRMetaData(final InternalRegion region, final boolean isR refreshTaskCount++; totalRefreshTaskCount++; } - Runnable fetchTask = () -> { + SingleHopClientExecutor.submitTask(() -> { try { getClientPRMetadata(region); } catch (VirtualMachineError e) { @@ -533,16 +517,15 @@ public void scheduleGetPRMetaData(final InternalRegion region, final boolean isR refreshTaskCount--; } } - }; - SingleHopClientExecutor.submitTask(fetchTask); + }); } } public void getClientPRMetadata(InternalRegion region) { final String regionFullPath = region.getFullPath(); - ClientPartitionAdvisor advisor = null; + ClientPartitionAdvisor advisor; InternalPool pool = region.getServerProxy().getPool(); - // Acquires lock only if it is free, else a request to fetch meta data is in + // Acquires lock only if it is free, else a request to fetch metadata is in // progress, so just return if (region.getClientMetaDataLock().tryLock()) { try { @@ -571,7 +554,6 @@ public void getClientPRMetadata(InternalRegion region) { InternalRegion leaderRegion = (InternalRegion) region.getCache().getRegion(colocatedWith); if (colocatedAdvisor == null) { scheduleGetPRMetaData(leaderRegion, true); - return; } else { isMetadataRefreshed_TEST_ONLY = true; GetClientPRMetaDataOp.execute(pool, colocatedWith, this); @@ -625,7 +607,7 @@ public void scheduleGetPRMetaData(final InternalRegion region, final boolean isR refreshTaskCount++; totalRefreshTaskCount++; } - Runnable fetchTask = () -> { + SingleHopClientExecutor.submitTask(() -> { try { getClientPRMetadata(region); } catch (VirtualMachineError e) { @@ -642,8 +624,7 @@ public void scheduleGetPRMetaData(final InternalRegion region, final boolean isR refreshTaskCount--; } } - }; - SingleHopClientExecutor.submitTask(fetchTask); + }); } } @@ -666,7 +647,7 @@ public void removeBucketServerLocation(ServerLocation serverLocation) { } } - public byte getMetaDataVersion(Region region, Operation operation, Object key, Object value, + public byte getMetaDataVersion(Region region, Operation operation, K key, V value, Object callbackArg) { ClientPartitionAdvisor prAdvisor = getClientPartitionAdvisor(region.getFullPath()); if (prAdvisor == null) { @@ -675,15 +656,15 @@ public byte getMetaDataVersion(Region region, Operation operation, Object key, O int totalNumberOfBuckets = prAdvisor.getTotalNumBuckets(); - final PartitionResolver resolver = getResolver(region, key, callbackArg); + final PartitionResolver resolver = getResolver(region, key, callbackArg); Object resolveKey; - EntryOperation entryOp = null; + EntryOperation entryOp = null; if (resolver == null) { // client has not registered PartitionResolver // Assuming even PR at server side is not using PartitionResolver resolveKey = key; } else { - entryOp = new EntryOperationImpl(region, operation, key, value, callbackArg); + entryOp = new EntryOperationImpl<>(region, operation, key, value, callbackArg); resolveKey = resolver.getRoutingObject(entryOp); if (resolveKey == null) { throw new IllegalStateException( @@ -693,18 +674,14 @@ public byte getMetaDataVersion(Region region, Operation operation, Object key, O int bucketId; if (resolver instanceof FixedPartitionResolver) { - if (entryOp == null) { - entryOp = new EntryOperationImpl(region, Operation.FUNCTION_EXECUTION, key, null, null); - } - String partition = ((FixedPartitionResolver) resolver).getPartitionName(entryOp, + String partition = ((FixedPartitionResolver) resolver).getPartitionName(entryOp, prAdvisor.getFixedPartitionNames()); if (partition == null) { - Object[] prms = new Object[] {region.getName(), resolver}; throw new IllegalStateException( String.format("For region %s, partition resolver %s returned partition name null", - prms)); + region.getName(), resolver)); } else { - bucketId = prAdvisor.assignFixedBucketId(region, partition, resolveKey); + bucketId = prAdvisor.assignFixedBucketId(partition, resolveKey); } } else { bucketId = PartitionedRegionHelper.getHashKey(resolveKey, totalNumberOfBuckets); @@ -718,7 +695,7 @@ public byte getMetaDataVersion(Region region, Operation operation, Object key, O return bsl.getVersion(); } - private ServerLocation getPrimaryServerLocation(Region region, int bucketId) { + private ServerLocation getPrimaryServerLocation(Region region, int bucketId) { final String regionFullPath = region.getFullPath(); ClientPartitionAdvisor prAdvisor = getClientPartitionAdvisor(regionFullPath); if (prAdvisor == null) { @@ -770,13 +747,11 @@ public ClientPartitionAdvisor getClientPartitionAdvisor(String regionFullPath) { if (cache.isClosed()) { return null; } - ClientPartitionAdvisor prAdvisor = null; try { - prAdvisor = clientPRAdvisors.get(regionFullPath); - } catch (Exception npe) { - return null; + return clientPRAdvisors.get(regionFullPath); + } catch (Exception ignored) { } - return prAdvisor; + return null; } public Set getColocatedClientPartitionAdvisor(String regionFullPath) { @@ -790,13 +765,11 @@ private Set getAllRegionFullPaths() { if (cache.isClosed()) { return null; } - Set keys = null; try { - keys = clientPRAdvisors.keySet(); - } catch (Exception npe) { - return null; + return clientPRAdvisors.keySet(); + } catch (Exception ignored) { } - return keys; + return null; } public void close() { diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientPartitionAdvisor.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientPartitionAdvisor.java index f6fa02685e77..ce24c2a8306d 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientPartitionAdvisor.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ClientPartitionAdvisor.java @@ -29,7 +29,6 @@ import org.apache.geode.annotations.VisibleForTesting; import org.apache.geode.cache.FixedPartitionAttributes; import org.apache.geode.cache.PartitionResolver; -import org.apache.geode.cache.Region; import org.apache.geode.distributed.internal.ServerLocation; import org.apache.geode.internal.cache.BucketServerLocation66; import org.apache.geode.internal.cache.FixedPartitionAttributesImpl; @@ -37,11 +36,9 @@ import org.apache.geode.logging.internal.log4j.api.LogService; /** - * Stores the information such as partition attributes and meta data details - * + * Stores the information such as partition attributes and metadata details * * @since GemFire 6.5 - * */ public class ClientPartitionAdvisor { @@ -56,7 +53,7 @@ public class ClientPartitionAdvisor { private final String colocatedWith; - private PartitionResolver partitionResolver = null; + private PartitionResolver partitionResolver = null; private Map> fixedPAMap = null; @@ -64,7 +61,6 @@ public class ClientPartitionAdvisor { private final Random random = new Random(); - @SuppressWarnings("unchecked") public ClientPartitionAdvisor(int totalNumBuckets, String colocatedWith, String partitionResolverName, Set fpaSet) { @@ -72,7 +68,7 @@ public ClientPartitionAdvisor(int totalNumBuckets, String colocatedWith, this.colocatedWith = colocatedWith; try { if (partitionResolverName != null) { - partitionResolver = (PartitionResolver) ClassPathLoader.getLatest() + partitionResolver = (PartitionResolver) ClassPathLoader.getLatest() .forName(partitionResolverName).newInstance(); } } catch (Exception e) { @@ -88,7 +84,7 @@ public ClientPartitionAdvisor(int totalNumBuckets, String colocatedWith, fixedPAMap = new ConcurrentHashMap<>(); int totalFPABuckets = 0; for (FixedPartitionAttributes fpa : fpaSet) { - List attrList = new ArrayList(); + List attrList = new ArrayList<>(); totalFPABuckets += fpa.getNumBuckets(); attrList.add(fpa.getNumBuckets()); attrList.add(((FixedPartitionAttributesImpl) fpa).getStartingBucketID()); @@ -100,7 +96,7 @@ public ClientPartitionAdvisor(int totalNumBuckets, String colocatedWith, } } - public ServerLocation adviseServerLocation(int bucketId) { + public BucketServerLocation66 adviseServerLocation(int bucketId) { if (bucketServerLocationsMap.containsKey(bucketId)) { List locations = bucketServerLocationsMap.get(bucketId); List locationsCopy = new ArrayList<>(locations); @@ -136,9 +132,7 @@ public ServerLocation adviseRandomServerLocation() { public List adviseServerLocations(int bucketId) { if (bucketServerLocationsMap.containsKey(bucketId)) { - List locationsCopy = - new ArrayList<>(bucketServerLocationsMap.get(bucketId)); - return locationsCopy; + return new ArrayList<>(bucketServerLocationsMap.get(bucketId)); } return null; } @@ -241,21 +235,21 @@ public String getColocatedWith() { * * @return PartitionResolver for the PartitionedRegion */ - public PartitionResolver getPartitionResolver() { - return partitionResolver; + @SuppressWarnings("unchecked") + public PartitionResolver getPartitionResolver() { + return (PartitionResolver) partitionResolver; } public Set getFixedPartitionNames() { return fixedPAMap.keySet(); } - public int assignFixedBucketId(Region region, String partition, Object resolveKey) { + public int assignFixedBucketId(String partition, Object resolveKey) { if (fixedPAMap.containsKey(partition)) { List attList = fixedPAMap.get(partition); int hc = resolveKey.hashCode(); int bucketId = Math.abs(hc % (attList.get(0))); - int partitionBucketID = bucketId + attList.get(1); - return partitionBucketID; + return bucketId + attList.get(1); } else { // We don't know as we might not have got the all FPAttributes // from the FPR, So don't throw the exception but send the request diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionNoAckOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionNoAckOp.java index af92e34fda4e..9e1e5bce0d23 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionNoAckOp.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionNoAckOp.java @@ -123,7 +123,7 @@ public ExecuteRegionFunctionNoAckOpImpl(String region, Function function, MemberMappedArgument memberMappedArg = serverRegionExecutor.getMemberMappedArgument(); getMessage().addBytesPart(new byte[] {functionState}); getMessage().addStringPart(region, true); - if (serverRegionExecutor.isFnSerializationReqd()) { + if (serverRegionExecutor.isFunctionSerializationRequired()) { getMessage().addStringOrObjPart(function); } else { getMessage().addStringOrObjPart(function.getId()); diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionOp.java index a330a01ebab8..15d7056a9d36 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionOp.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionOp.java @@ -18,13 +18,14 @@ import static org.apache.geode.internal.cache.execute.AbstractExecution.DEFAULT_CLIENT_FUNCTION_TIMEOUT; import static org.apache.geode.internal.cache.tier.MessageType.EXECUTE_REGION_FUNCTION; import static org.apache.geode.internal.cache.tier.MessageType.EXECUTE_REGION_FUNCTION_ERROR; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Set; -import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.apache.geode.InternalGemFireError; @@ -50,7 +51,6 @@ import org.apache.geode.internal.cache.tier.sockets.Message; import org.apache.geode.internal.cache.tier.sockets.Part; import org.apache.geode.internal.serialization.KnownVersion; -import org.apache.geode.logging.internal.log4j.api.LogService; /** * Does a Execution of function on server region.
@@ -60,8 +60,6 @@ */ public class ExecuteRegionFunctionOp { - private static final Logger logger = LogService.getLogger(); - private static final int MAX_RETRY_INITIAL_VALUE = -1; private ExecuteRegionFunctionOp() { @@ -73,7 +71,7 @@ private ExecuteRegionFunctionOp() { * the server. */ static void execute(ExecutablePool pool, - ResultCollector resultCollector, + ResultCollector resultCollector, int retryAttempts, boolean isHA, ExecuteRegionFunctionOpImpl op, boolean isReexecute, Set failedNodes) { @@ -135,16 +133,16 @@ private static Set ensureMutability(final Set failedNodes) { static class ExecuteRegionFunctionOpImpl extends AbstractOpWithTimeout { // To collect the results from the server - private final ResultCollector resultCollector; + private final ResultCollector resultCollector; // To get the instance of the Function Statistics we need the function name or instance - private Function function; + private Function function; private byte isReExecute = 0; private final String regionName; - private final ServerRegionFunctionExecutor executor; + private final ServerRegionFunctionExecutor executor; private final byte hasResult; @@ -164,15 +162,15 @@ private static int getMessagePartCount(int filterSize, int removedNodesSize) { return PART_COUNT + filterSize + removedNodesSize; } - private void fillMessage(String region, Function function, String functionId, - ServerRegionFunctionExecutor serverRegionExecutor, + private void fillMessage(String region, Function function, String functionId, + ServerRegionFunctionExecutor serverRegionExecutor, Set removedNodes, byte functionState, byte flags) { - Set routingObjects = serverRegionExecutor.getFilter(); + Set routingObjects = serverRegionExecutor.getFilter(); Object args = serverRegionExecutor.getArguments(); MemberMappedArgument memberMappedArg = serverRegionExecutor.getMemberMappedArgument(); addBytes(functionState); getMessage().addStringPart(region, true); - if (function != null && serverRegionExecutor.isFnSerializationReqd()) { + if (function != null && serverRegionExecutor.isFunctionSerializationRequired()) { getMessage().addStringOrObjPart(function); } else { getMessage().addStringOrObjPart(functionId); @@ -187,13 +185,13 @@ private void fillMessage(String region, Function function, String functionId, getMessage().addStringOrObjPart(key); } getMessage().addIntPart(removedNodes.size()); - for (Object nodes : removedNodes) { + for (String nodes : removedNodes) { getMessage().addStringOrObjPart(nodes); } } - ExecuteRegionFunctionOpImpl(String region, Function function, - ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, + ExecuteRegionFunctionOpImpl(String region, Function function, + ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, final int timeoutMs) { super(EXECUTE_REGION_FUNCTION, getMessagePartCount(serverRegionExecutor.getFilter().size(), 0), timeoutMs); @@ -205,7 +203,7 @@ private void fillMessage(String region, Function function, String functionId, fillMessage(region, function, function.getId(), serverRegionExecutor, failedNodes, functionState, flags); - resultCollector = rc; + resultCollector = uncheckedCast(rc); regionName = region; this.function = function; functionId = function.getId(); @@ -230,7 +228,8 @@ private void fillMessage(String region, Function function, String functionId, } ExecuteRegionFunctionOpImpl(String region, String functionId, - ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, byte hasResult, + ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, + byte hasResult, boolean isHA, boolean optimizeForWrite, boolean calculateFnState, final int timeoutMs) { super(EXECUTE_REGION_FUNCTION, @@ -249,7 +248,7 @@ private void fillMessage(String region, Function function, String functionId, fillMessage(region, null, functionId, serverRegionExecutor, failedNodes, functionState, flags); - resultCollector = rc; + resultCollector = uncheckedCast(rc); regionName = region; this.functionId = functionId; executor = serverRegionExecutor; @@ -321,7 +320,7 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { Object resultResponse = executeFunctionResponseMsg.getPart(0).getObject(); Object result; if (resultResponse instanceof ArrayList) { - result = ((ArrayList) resultResponse).get(0); + result = ((ArrayList) resultResponse).get(0); } else { result = resultResponse; } @@ -333,7 +332,7 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { if (ex instanceof InternalFunctionException) { Throwable cause = ex.getCause(); DistributedMember memberID = - (DistributedMember) ((ArrayList) resultResponse).get(1); + (DistributedMember) ((List) resultResponse).get(1); resultCollector.addResult(memberID, cause); FunctionStatsManager .getFunctionStats(functionId, executor.getRegion().getSystem()) @@ -373,7 +372,7 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { } if (resultResponse instanceof ArrayList) { DistributedMember memberID = - (DistributedMember) ((ArrayList) resultResponse).get(1); + (DistributedMember) ((ArrayList) resultResponse).get(1); failedNodes = ensureMutability(failedNodes); failedNodes.add(memberID.getId()); } @@ -388,8 +387,7 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { functionException.addException(t); } } else { - DistributedMember memberID = - (DistributedMember) ((ArrayList) resultResponse).get(1); + DistributedMember memberID = (DistributedMember) ((List) resultResponse).get(1); resultCollector.addResult(memberID, result); FunctionStatsManager .getFunctionStats(functionId, executor.getRegion().getSystem()) diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java index 86461a86b366..dea294f84e1c 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOp.java @@ -18,6 +18,7 @@ import static org.apache.geode.internal.cache.execute.AbstractExecution.DEFAULT_CLIENT_FUNCTION_TIMEOUT; import static org.apache.geode.internal.cache.tier.MessageType.EXECUTE_REGION_FUNCTION_ERROR; import static org.apache.geode.internal.cache.tier.MessageType.EXECUTE_REGION_FUNCTION_SINGLE_HOP; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.util.ArrayList; import java.util.HashSet; @@ -39,7 +40,6 @@ import org.apache.geode.cache.execute.ResultCollector; import org.apache.geode.distributed.DistributedMember; import org.apache.geode.distributed.internal.ServerLocation; -import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.LocalRegion; import org.apache.geode.internal.cache.execute.AbstractExecution; import org.apache.geode.internal.cache.execute.BucketMovedException; @@ -65,18 +65,16 @@ public class ExecuteRegionFunctionSingleHopOp { private ExecuteRegionFunctionSingleHopOp() {} - public static void execute(ExecutablePool pool, Region region, - ServerRegionFunctionExecutor serverRegionExecutor, - ResultCollector resultCollector, - Map serverToFilterMap, + public static void execute(ExecutablePool pool, Region region, + ServerRegionFunctionExecutor serverRegionExecutor, + ResultCollector resultCollector, + Map> serverToFilterMap, boolean isHA, - final java.util.function.Function regionFunctionSingleHopOpFunction, + final java.util.function.Function, AbstractOp> regionFunctionSingleHopOpFunction, final Supplier executeRegionFunctionOpSupplier) { Set failedNodes = new HashSet<>(); - ClientMetadataService cms = ((InternalCache) region.getCache()).getClientMetadataService(); - final boolean isDebugEnabled = logger.isDebugEnabled(); if (isDebugEnabled) { logger.debug("ExecuteRegionFunctionSingleHopOp#execute : The serverToFilterMap is : {}", @@ -85,7 +83,7 @@ public static void execute(ExecutablePool pool, Region region, List callableTasks = constructAndGetExecuteFunctionTasks( serverRegionExecutor, serverToFilterMap, (PoolImpl) pool, - cms, regionFunctionSingleHopOpFunction); + regionFunctionSingleHopOpFunction); final int retryAttempts = SingleHopClientExecutor.submitAllHA(callableTasks, (LocalRegion) region, isHA, @@ -115,11 +113,10 @@ public static void execute(ExecutablePool pool, Region region, private static List constructAndGetExecuteFunctionTasks( - ServerRegionFunctionExecutor serverRegionExecutor, - final Map serverToFilterMap, + ServerRegionFunctionExecutor serverRegionExecutor, + final Map> serverToFilterMap, final PoolImpl pool, - ClientMetadataService cms, - final java.util.function.Function opFactory) { + final java.util.function.Function, AbstractOp> opFactory) { final List tasks = new ArrayList<>(); ArrayList servers = new ArrayList<>(serverToFilterMap.keySet()); @@ -127,8 +124,9 @@ private static List constructAndGetExecuteFunctionTa logger.debug("Constructing tasks for the servers {}", servers); } for (ServerLocation server : servers) { - ServerRegionFunctionExecutor executor = (ServerRegionFunctionExecutor) serverRegionExecutor - .withFilter(serverToFilterMap.get(server)); + ServerRegionFunctionExecutor executor = + (ServerRegionFunctionExecutor) serverRegionExecutor + .withFilter(serverToFilterMap.get(server)); AbstractOp op = opFactory.apply(executor); @@ -142,13 +140,13 @@ private static List constructAndGetExecuteFunctionTa static class ExecuteRegionFunctionSingleHopOpImpl extends AbstractOpWithTimeout { - private final ResultCollector resultCollector; + private final ResultCollector resultCollector; private final String functionId; private final String regionName; - private final ServerRegionFunctionExecutor executor; + private final ServerRegionFunctionExecutor executor; private final byte hasResult; @@ -158,8 +156,8 @@ static class ExecuteRegionFunctionSingleHopOpImpl extends AbstractOpWithTimeout private final boolean optimizeForWrite; - ExecuteRegionFunctionSingleHopOpImpl(String region, Function function, - ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, byte hasResult, + ExecuteRegionFunctionSingleHopOpImpl(String region, Function function, + ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, Set removedNodes, boolean allBuckets, final int timeoutMs) { // What is this 8 that is getting added to filter and removednode sizes? // It should have been used as a constant and documented @@ -169,12 +167,12 @@ static class ExecuteRegionFunctionSingleHopOpImpl extends AbstractOpWithTimeout optimizeForWrite = function.optimizeForWrite(); byte functionState = AbstractExecution.getFunctionState(function.isHA(), function.hasResult(), function.optimizeForWrite()); - Set routingObjects = serverRegionExecutor.getFilter(); + Set routingObjects = serverRegionExecutor.getFilter(); Object args = serverRegionExecutor.getArguments(); MemberMappedArgument memberMappedArg = serverRegionExecutor.getMemberMappedArgument(); addBytes(functionState); getMessage().addStringPart(region, true); - if (serverRegionExecutor.isFnSerializationReqd()) { + if (serverRegionExecutor.isFunctionSerializationRequired()) { getMessage().addStringOrObjPart(function); } else { getMessage().addStringOrObjPart(function.getId()); @@ -191,20 +189,21 @@ static class ExecuteRegionFunctionSingleHopOpImpl extends AbstractOpWithTimeout } } getMessage().addIntPart(removedNodes.size()); - for (Object nodes : removedNodes) { + for (String nodes : removedNodes) { getMessage().addStringOrObjPart(nodes); } - resultCollector = rc; + resultCollector = uncheckedCast(rc); regionName = region; functionId = function.getId(); executor = serverRegionExecutor; - this.hasResult = functionState; + hasResult = functionState; failedNodes = removedNodes; } ExecuteRegionFunctionSingleHopOpImpl(String region, String functionId, - ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, byte hasResult, + ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector rc, + byte hasResult, Set removedNodes, boolean allBuckets, boolean isHA, boolean optimizeForWrite, final int timeoutMs) { // What is this 8 that is getting added to filter and removednode sizes? @@ -213,7 +212,7 @@ static class ExecuteRegionFunctionSingleHopOpImpl extends AbstractOpWithTimeout 8 + serverRegionExecutor.getFilter().size() + removedNodes.size(), timeoutMs); this.isHA = isHA; this.optimizeForWrite = optimizeForWrite; - Set routingObjects = serverRegionExecutor.getFilter(); + Set routingObjects = serverRegionExecutor.getFilter(); Object args = serverRegionExecutor.getArguments(); byte functionState = AbstractExecution.getFunctionState(isHA, hasResult == (byte) 1, optimizeForWrite); @@ -237,7 +236,7 @@ static class ExecuteRegionFunctionSingleHopOpImpl extends AbstractOpWithTimeout getMessage().addStringOrObjPart(nodes); } - resultCollector = rc; + resultCollector = uncheckedCast(rc); regionName = region; this.functionId = functionId; executor = serverRegionExecutor; @@ -274,7 +273,7 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { Object resultResponse = executeFunctionResponseMsg.getPart(0).getObject(); Object result; if (resultResponse instanceof ArrayList) { - result = ((List) resultResponse).get(0); + result = ((List) resultResponse).get(0); } else { result = resultResponse; } @@ -287,7 +286,8 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { } if (ex instanceof InternalFunctionException) { Throwable cause = ex.getCause(); - DistributedMember memberID = (DistributedMember) ((List) resultResponse).get(1); + DistributedMember memberID = + (DistributedMember) ((List) resultResponse).get(1); resultCollector.addResult(memberID, cause); FunctionStatsManager .getFunctionStats(functionId, executor.getRegion().getSystem()) @@ -312,7 +312,8 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { new InternalFunctionInvocationTargetException( ((CacheClosedException) result).getMessage()); if (resultResponse instanceof ArrayList) { - DistributedMember memberID = (DistributedMember) ((List) resultResponse).get(1); + DistributedMember memberID = + (DistributedMember) ((List) resultResponse).get(1); failedNodes.add(memberID.getId()); } exception = new FunctionException(fite); @@ -320,7 +321,7 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { String s = "While performing a remote " + getOpName(); exception = new ServerOperationException(s, (Throwable) result); } else { - DistributedMember memberID = (DistributedMember) ((List) resultResponse).get(1); + DistributedMember memberID = (DistributedMember) ((List) resultResponse).get(1); resultCollector.addResult(memberID, result); FunctionStatsManager .getFunctionStats(functionId, executor.getRegion().getSystem()) @@ -383,8 +384,9 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { return null; } - ResultCollector getResultCollector() { - return resultCollector; + @SuppressWarnings("unchecked") + ResultCollector getResultCollector() { + return (ResultCollector) resultCollector; } String getFunctionId() { @@ -395,7 +397,7 @@ String getRegionName() { return regionName; } - ServerRegionFunctionExecutor getExecutor() { + ServerRegionFunctionExecutor getExecutor() { return executor; } diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/GetClientPRMetaDataOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/GetClientPRMetaDataOp.java index 165216798c32..75094c4da14e 100755 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/GetClientPRMetaDataOp.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/GetClientPRMetaDataOp.java @@ -58,9 +58,9 @@ public static void execute(ExecutablePool pool, String regionFullPath, static class GetClientPRMetaDataOpImpl extends AbstractOp { - String regionFullPath = null; + private final String regionFullPath; - ClientMetadataService cms = null; + private final ClientMetadataService cms; public GetClientPRMetaDataOpImpl(String regionFullPath, ClientMetadataService cms) { super(GET_CLIENT_PR_METADATA, 1); @@ -137,8 +137,8 @@ protected Object processResponse(final @NotNull Message msg) throws Exception { String s = "While performing GetClientPRMetaDataOp " + ((Throwable) obj).getMessage(); throw new ServerOperationException(s, (Throwable) obj); default: - throw new InternalGemFireError(String.format("Unknown message type %s", - msg.getMessageType())); + throw new InternalGemFireError( + String.format("Unknown message type %s", msg.getMessageType())); } } diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/PutAllOp.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/PutAllOp.java index 8639cf19c10b..90f87a6c629a 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/PutAllOp.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/PutAllOp.java @@ -67,9 +67,10 @@ public class PutAllOp { * @param eventId the event id for this putAll * @param skipCallbacks true if no callbacks will be invoked */ - public static VersionedObjectList execute(ExecutablePool pool, Region region, Map map, + public static VersionedObjectList execute(ExecutablePool pool, Region region, + Map map, EventID eventId, boolean skipCallbacks, boolean isRetry, Object callbackArg) { - PutAllOpImpl op = new PutAllOpImpl(region, map, eventId, + PutAllOpImpl op = new PutAllOpImpl<>(region, map, eventId, ((Pool) pool).getPRSingleHopEnabled(), skipCallbacks, callbackArg); op.initMessagePart(); if (isRetry) { @@ -87,24 +88,25 @@ public static VersionedObjectList execute(ExecutablePool pool, Region region, Ma * @param map the Map of keys and values to put * @param eventId the event id for this putAll */ - public static VersionedObjectList execute(ExecutablePool pool, Region region, - Map map, EventID eventId, boolean skipCallbacks, int retryAttempts, + public static VersionedObjectList execute(ExecutablePool pool, Region region, + Map map, EventID eventId, boolean skipCallbacks, int retryAttempts, Object callbackArg) { ClientMetadataService clientMetadataService = ((RegionMapOwner) region).getCache().getClientMetadataService(); - Map serverToFilterMap = + Map> serverToFilterMap = clientMetadataService.getServerToFilterMap(map.keySet(), region, true); if (serverToFilterMap == null || serverToFilterMap.isEmpty()) { - AbstractOp op = new PutAllOpImpl(region, map, eventId, + AbstractOp op = new PutAllOpImpl<>(region, map, eventId, ((Pool) pool).getPRSingleHopEnabled(), skipCallbacks, callbackArg); op.initMessagePart(); return (VersionedObjectList) pool.execute(op); } - List callableTasks = constructAndGetPutAllTasks(region, map, eventId, skipCallbacks, - serverToFilterMap, (InternalPool) pool, callbackArg); + List callableTasks = + constructAndGetPutAllTasks(region, map, eventId, skipCallbacks, + serverToFilterMap, (InternalPool) pool, callbackArg); final boolean isDebugEnabled = logger.isDebugEnabled(); if (isDebugEnabled) { @@ -149,12 +151,12 @@ public static VersionedObjectList execute(ExecutablePool pool, Region succeedKeySet = new LinkedHashSet<>(); Set serverSet = serverToFilterMap.keySet(); for (ServerLocation server : serverSet) { if (!failedServers.containsKey(server)) { @@ -180,9 +182,9 @@ public static VersionedObjectList execute(ExecutablePool pool, Region newMap = new LinkedHashMap<>(); + Set keySet = serverToFilterMap.get(failedServer); + for (K key : keySet) { newMap.put(key, map.get(key)); } @@ -221,9 +223,10 @@ private PutAllOp() { // no instances allowed } - private static List constructAndGetPutAllTasks(Region region, final Map map, + private static List constructAndGetPutAllTasks( + Region region, final Map map, final EventID eventId, - boolean skipCallbacks, final Map serverToFilterMap, + boolean skipCallbacks, final Map> serverToFilterMap, final InternalPool pool, Object callbackArg) { final List tasks = new ArrayList<>(); List servers = new ArrayList<>(serverToFilterMap.keySet()); @@ -232,14 +235,14 @@ private static List constructAndGetPutAllTasks(Region region, final Map map, logger.debug("Constructing tasks for the servers {}", servers); } for (ServerLocation server : servers) { - Set filterSet = serverToFilterMap.get(server); - Map newKeysValuesMap = new LinkedHashMap(); + Set filterSet = serverToFilterMap.get(server); + Map newKeysValuesMap = new LinkedHashMap<>(); // iterator 1: for single hop, both iterator filterSet and newKeysValuesMap - for (Object key : filterSet) { + for (K key : filterSet) { newKeysValuesMap.put(key, map.get(key)); } AbstractOp putAllOp = - new PutAllOpImpl(region, newKeysValuesMap, eventId, true, skipCallbacks, callbackArg); + new PutAllOpImpl<>(region, newKeysValuesMap, eventId, true, skipCallbacks, callbackArg); SingleHopOperationCallable task = new SingleHopOperationCallable(new ServerLocation(server.getHostName(), server.getPort()), @@ -249,20 +252,23 @@ private static List constructAndGetPutAllTasks(Region region, final Map map, return tasks; } - private static class PutAllOpImpl extends AbstractOp { + private static class PutAllOpImpl extends AbstractOp { - private boolean prSingleHopEnabled = false; + private final boolean prSingleHopEnabled; - private LocalRegion region = null; + private final LocalRegion region; + + private final Map map; - private Map map = null; private final Object callbackArg; - private ArrayList keys = null; + + private ArrayList keys = null; /** * @throws org.apache.geode.SerializationException if serialization fails */ - public PutAllOpImpl(Region region, Map map, EventID eventId, boolean prSingleHopEnabled, + public PutAllOpImpl(Region region, Map map, EventID eventId, + boolean prSingleHopEnabled, boolean skipCallbacks, Object callbackArg) { super(callbackArg != null ? MessageType.PUT_ALL_WITH_CALLBACK : MessageType.PUTALL, (callbackArg != null ? 6 : 5) + (map.size() * 2)); @@ -290,9 +296,8 @@ protected void initMessagePart() { if (callbackArg != null) { getMessage().addObjPart(callbackArg); } - keys = new ArrayList(size); - for (final Object o : map.entrySet()) { - Map.Entry mapEntry = (Map.Entry) o; + keys = new ArrayList<>(size); + for (final Map.Entry mapEntry : map.entrySet()) { Object key = mapEntry.getKey(); keys.add(key); getMessage().addStringOrObjPart(key); @@ -342,13 +347,15 @@ protected Object processResponse(final @NotNull Message msg, final @NotNull Conn for (int partNo = 0; partNo < numParts; partNo++) { Part part = cm.getPart(partNo); try { - Object o = part.getObject(); + final Object o = part.getObject(); if (isDebugEnabled) { logger.debug("part({}) contained {}", partNo, o); } if (o == null) { // no response is an okay response - } else if (o instanceof byte[]) { + continue; + } + if (o instanceof byte[]) { if (prSingleHopEnabled) { byte[] bytesReceived = part.getSerializedForm(); if (bytesReceived[0] != ClientMetadataService.INITIAL_VERSION) { // nw hop diff --git a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ServerRegionProxy.java b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ServerRegionProxy.java index 5b57d77649f9..3e0cf8fadace 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/client/internal/ServerRegionProxy.java +++ b/geode-core/src/main/java/org/apache/geode/cache/client/internal/ServerRegionProxy.java @@ -124,10 +124,9 @@ public int size() { } /** - * Do not call this method if the value is Delta instance. Exclicitly passing + * Do not call this method if the value is Delta instance. Explicitly passing * Operation.CREATE to the PutOp.execute() method as the caller of this * method does not put Delta instances as value. - * */ public Object putForMetaRegion(Object key, Object value, byte[] deltaBytes, EntryEventImpl event, Object callbackArg) { @@ -603,7 +602,7 @@ public VersionedObjectList putAll(Map map, EventID eventId, bool return PutAllOp.execute(pool, uncheckedCast(region), map, eventId, skipCallbacks, pool.getRetryAttempts(), callbackArg); } else { - return PutAllOp.execute(pool, region, map, eventId, skipCallbacks, false, + return PutAllOp.execute(pool, uncheckedCast(region), map, eventId, skipCallbacks, false, callbackArg); } } @@ -671,7 +670,7 @@ public String getRegionName() { } public void executeFunction(Function function, - ServerRegionFunctionExecutor serverRegionExecutor, + ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector resultCollector, byte hasResult, final int timeoutMs) { @@ -704,10 +703,10 @@ public void executeFunction(Function function, } else { - final java.util.function.Function regionFunctionSingleHopOpFunction = + final java.util.function.Function, AbstractOp> regionFunctionSingleHopOpFunction = executor -> new ExecuteRegionFunctionSingleHopOp.ExecuteRegionFunctionSingleHopOpImpl( region.getFullPath(), function, executor, resultCollector, - hasResult, emptySet(), true, timeoutMs); + emptySet(), true, timeoutMs); ExecuteRegionFunctionSingleHopOp.execute(pool, region, serverRegionExecutor, resultCollector, serverToBuckets, function.isHA(), @@ -715,7 +714,8 @@ public void executeFunction(Function function, } } else { boolean isBucketFilter = serverRegionExecutor.getExecuteOnBucketSetFlag(); - Map serverToFilterMap = + @SuppressWarnings("unchecked") + Map> serverToFilterMap = cms.getServerToFilterMap(serverRegionExecutor.getFilter(), region, function.optimizeForWrite(), isBucketFilter); @@ -730,10 +730,10 @@ public void executeFunction(Function function, } else { - final java.util.function.Function regionFunctionSingleHopOpFunction = + final java.util.function.Function, AbstractOp> regionFunctionSingleHopOpFunction = executor -> new ExecuteRegionFunctionSingleHopOp.ExecuteRegionFunctionSingleHopOpImpl( region.getFullPath(), function, executor, resultCollector, - hasResult, emptySet(), isBucketFilter, timeoutMs); + emptySet(), isBucketFilter, timeoutMs); ExecuteRegionFunctionSingleHopOp.execute(pool, region, serverRegionExecutor, resultCollector, serverToFilterMap, @@ -759,7 +759,7 @@ public void executeFunction(Function function, public void executeFunction(String functionId, - ServerRegionFunctionExecutor serverRegionExecutor, + ServerRegionFunctionExecutor serverRegionExecutor, ResultCollector resultCollector, byte hasResult, boolean isHA, boolean optimizeForWrite, final int timeoutMs) { @@ -791,7 +791,7 @@ public void executeFunction(String functionId, cms.scheduleGetPRMetaData(region, false); } else { - final java.util.function.Function regionFunctionSingleHopOpFunction = + final java.util.function.Function, AbstractOp> regionFunctionSingleHopOpFunction = executor1 -> new ExecuteRegionFunctionSingleHopOp.ExecuteRegionFunctionSingleHopOpImpl( region.getFullPath(), functionId, executor1, resultCollector, hasResult, emptySet(), true, isHA, optimizeForWrite, timeoutMs); @@ -803,7 +803,8 @@ public void executeFunction(String functionId, } else { boolean isBucketsAsFilter = serverRegionExecutor.getExecuteOnBucketSetFlag(); - Map serverToFilterMap = cms.getServerToFilterMap( + @SuppressWarnings("unchecked") + Map> serverToFilterMap = cms.getServerToFilterMap( serverRegionExecutor.getFilter(), region, optimizeForWrite, isBucketsAsFilter); if (serverToFilterMap == null || serverToFilterMap.isEmpty()) { @@ -815,7 +816,7 @@ public void executeFunction(String functionId, cms.scheduleGetPRMetaData(region, false); } else { - final java.util.function.Function regionFunctionSingleHopOpFunction = + final java.util.function.Function, AbstractOp> regionFunctionSingleHopOpFunction = executor -> new ExecuteRegionFunctionSingleHopOp.ExecuteRegionFunctionSingleHopOpImpl( region.getFullPath(), functionId, executor, resultCollector, hasResult, emptySet(), isBucketsAsFilter, isHA, optimizeForWrite, timeoutMs); @@ -844,7 +845,7 @@ public void executeFunction(String functionId, public void executeFunctionNoAck(String rgnName, Function function, - ServerRegionFunctionExecutor serverRegionExecutor, + ServerRegionFunctionExecutor serverRegionExecutor, byte hasResult) { recordTXOperation(ServerRegionOperation.EXECUTE_FUNCTION, null, 3, function, serverRegionExecutor, hasResult); @@ -853,7 +854,7 @@ public void executeFunctionNoAck(String rgnName, Function function, } public void executeFunctionNoAck(String rgnName, String functionId, - ServerRegionFunctionExecutor serverRegionExecutor, + ServerRegionFunctionExecutor serverRegionExecutor, byte hasResult, boolean isHA, boolean optimizeForWrite) { recordTXOperation(ServerRegionOperation.EXECUTE_FUNCTION, null, 4, functionId, diff --git a/geode-core/src/main/java/org/apache/geode/cache/execute/RegionFunctionContext.java b/geode-core/src/main/java/org/apache/geode/cache/execute/RegionFunctionContext.java index ee608333b75c..68cdbfd946e5 100755 --- a/geode-core/src/main/java/org/apache/geode/cache/execute/RegionFunctionContext.java +++ b/geode-core/src/main/java/org/apache/geode/cache/execute/RegionFunctionContext.java @@ -37,7 +37,7 @@ * @see FunctionContext * @see PartitionRegionHelper */ -public interface RegionFunctionContext extends FunctionContext { +public interface RegionFunctionContext extends FunctionContext { /** * Returns subset of keys (filter) provided by the invoking thread (aka routing objects). The set diff --git a/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionListener.java b/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionListener.java index fd4070b9a8fb..3cdde370b8d4 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionListener.java +++ b/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionListener.java @@ -24,7 +24,7 @@ * created/deleted or any bucket in a partitioned region becomes primary.
*
* It is highly recommended that implementations of this listener should be quick and not try to - * manipulate regions and data because the the callbacks are invoked while holding locks that may + * manipulate regions and data because the callbacks are invoked while holding locks that may * block region operations.
* *
diff --git a/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionRegionHelper.java b/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionRegionHelper.java
index b5bed4593d95..48cdc5cc0fc7 100644
--- a/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionRegionHelper.java
+++ b/geode-core/src/main/java/org/apache/geode/cache/partition/PartitionRegionHelper.java
@@ -14,10 +14,13 @@
  */
 package org.apache.geode.cache.partition;
 
+import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast;
+
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.SortedSet;
 import java.util.TreeSet;
 
 import org.apache.geode.cache.Cache;
@@ -41,9 +44,11 @@
 import org.apache.geode.internal.cache.PartitionedRegionHelper;
 import org.apache.geode.internal.cache.control.RebalanceResultsImpl;
 import org.apache.geode.internal.cache.execute.InternalRegionFunctionContext;
+import org.apache.geode.internal.cache.partitioned.InternalPRInfo;
 import org.apache.geode.internal.cache.partitioned.PartitionedRegionRebalanceOp;
 import org.apache.geode.internal.cache.partitioned.rebalance.ExplicitMoveDirector;
 import org.apache.geode.internal.cache.partitioned.rebalance.PercentageMoveDirector;
+import org.apache.geode.management.runtime.RebalanceResult;
 
 /**
  * Utility methods for handling partitioned Regions, for example during execution of {@link Function
@@ -88,7 +93,7 @@ private PartitionRegionHelper() {
    * @since GemFire 6.0
    */
   public static Map> getColocatedRegions(final Region r) {
-    Map ret;
+    Map ret;
     if (isPartitionedRegion(r)) {
       final PartitionedRegion pr = (PartitionedRegion) r;
       ret = ColocationHelper.getAllColocationRegions(pr);
@@ -96,8 +101,8 @@ private PartitionRegionHelper() {
         ret = Collections.emptyMap();
       }
     } else if (r instanceof LocalDataSet) {
-      LocalDataSet lds = (LocalDataSet) r;
-      InternalRegionFunctionContext fc = lds.getFunctionContext();
+      LocalDataSet lds = (LocalDataSet) r;
+      InternalRegionFunctionContext fc = lds.getFunctionContext();
       if (fc != null) {
         ret = ColocationHelper.getAllColocatedLocalDataSets(lds.getProxy(), fc);
         if (ret.isEmpty()) {
@@ -112,7 +117,7 @@ private PartitionRegionHelper() {
           String.format("Region %s is not a Partitioned Region",
               r.getFullPath()));
     }
-    return Collections.unmodifiableMap(ret);
+    return uncheckedCast(Collections.unmodifiableMap(ret));
   }
 
   /**
@@ -124,8 +129,7 @@ private PartitionRegionHelper() {
    */
   public static boolean isPartitionedRegion(final Region r) {
     if (r == null) {
-      throw new IllegalArgumentException(
-          "Argument 'Region' is null");
+      throw new IllegalArgumentException("Argument 'Region' is null");
     }
     return r instanceof PartitionedRegion;
   }
@@ -154,9 +158,9 @@ private static PartitionedRegion isPartitionedCheck(final Region r) {
    * @since GemFire 6.0
    */
   public static Set getPartitionRegionInfo(final Cache cache) {
-    Set prDetailsSet = new TreeSet<>();
+    SortedSet prDetailsSet = new TreeSet<>();
     fillInPartitionedRegionInfo((InternalCache) cache, prDetailsSet, false);
-    return prDetailsSet;
+    return uncheckedCast(prDetailsSet);
   }
 
   /**
@@ -179,7 +183,8 @@ public static PartitionRegionInfo getPartitionRegionInfo(final Region regi
     return null;
   }
 
-  private static void fillInPartitionedRegionInfo(final InternalCache cache, final Set prDetailsSet,
+  private static void fillInPartitionedRegionInfo(final InternalCache cache,
+      final SortedSet prDetailsSet,
       final boolean internal) {
     // TODO: optimize by fetching all PR details from each member at once
     Set partitionedRegions = cache.getPartitionedRegions();
@@ -187,7 +192,7 @@ private static void fillInPartitionedRegionInfo(final InternalCache cache, final
       return;
     }
     for (PartitionedRegion partitionedRegion : partitionedRegions) {
-      PartitionRegionInfo prDetails = partitionedRegion.getRedundancyProvider()
+      InternalPRInfo prDetails = partitionedRegion.getRedundancyProvider()
           .buildPartitionedRegionInfo(internal, cache.getInternalResourceManager().getLoadProbe());
       if (prDetails != null) {
         prDetailsSet.add(prDetails);
@@ -204,7 +209,7 @@ private static void fillInPartitionedRegionInfo(final InternalCache cache, final
    *
    * This method will block until all buckets are assigned.
    *
-   * @param region The region which should have it's buckets assigned.
+   * @param region The region which should have its buckets assigned.
    * @throws IllegalStateException if the provided region is something other than a
    *         {@linkplain DataPolicy#PARTITION partitioned Region}
    * @since GemFire 6.0
@@ -329,7 +334,7 @@ private static  Set getAllForKey(final Region
    * Region}, return a map of {@linkplain PartitionAttributesFactory#setColocatedWith(String)
    * colocated Regions} with read access limited to the context of the function.
    * 

- * Writes using these Region have no constraints and behave the same as a partitioned Region. + * Writes using these regions have no constraints and behave the same as a partitioned Region. *

* If there are no colocated regions, return an empty map. * @@ -339,12 +344,11 @@ private static Set getAllForKey(final Region * @return an unmodifiable map of {@linkplain Region#getFullPath() region name} to {@link Region} * @since GemFire 6.0 */ - public static Map> getLocalColocatedRegions(final RegionFunctionContext c) { - final Region r = c.getDataSet(); + public static Map> getLocalColocatedRegions( + final RegionFunctionContext c) { + final Region r = c.getDataSet(); isPartitionedCheck(r); - final InternalRegionFunctionContext rfci = (InternalRegionFunctionContext) c; - Map ret = rfci.getColocatedLocalDataSets(); - return ret; + return ((InternalRegionFunctionContext) c).getColocatedLocalDataSets(); } /** @@ -365,11 +369,10 @@ private static Set getAllForKey(final Region * @return a Region for efficient reads * @since GemFire 6.0 */ - public static Region getLocalDataForContext(final RegionFunctionContext c) { - final Region r = c.getDataSet(); + public static Region getLocalDataForContext(final RegionFunctionContext c) { + final Region r = c.getDataSet(); isPartitionedCheck(r); - InternalRegionFunctionContext rfci = (InternalRegionFunctionContext) c; - return rfci.getLocalDataSet(r); + return ((InternalRegionFunctionContext) c).getLocalDataSet(r); } /** @@ -393,7 +396,7 @@ public static Region getLocalData(final Region r) { } else { buckets = Collections.emptySet(); } - return new LocalDataSet(pr, buckets); + return new LocalDataSet<>(pr, buckets); } else if (r instanceof LocalDataSet) { return r; } else { @@ -425,7 +428,7 @@ public static Region getLocalPrimaryData(final Region r) { } else { buckets = Collections.emptySet(); } - return new LocalDataSet(pr, buckets); + return new LocalDataSet<>(pr, buckets); } else if (r instanceof LocalDataSet) { return r; } else { @@ -492,7 +495,7 @@ public static void moveBucketByKey(Region region, DistributedMember so * It may not be possible to move data to the destination member, if the destination member has no * available space, no bucket smaller than the given percentage exists, or if moving data would * violate redundancy constraints. If data cannot be moved, this method will return a - * RebalanceResult object with 0 total bucket transfers. + * {@link RebalanceResult} object with 0 total bucket transfers. *

* This method allows direct control of what data to move. To automatically balance buckets, see * {@link ResourceManager#createRebalanceFactory()} @@ -505,9 +508,9 @@ public static void moveBucketByKey(Region region, DistributedMember so * @param percentage the maximum amount of data to move, as a percentage from 0 to 100. * * @throws IllegalStateException if the source or destination are not valid members of the system. - * @throws IllegalArgumentException if the percentage is not between 0 to 100. + * @throws IllegalArgumentException if the percentage is not between 0 and 100. * - * @return A RebalanceResult object that contains information about what what data was actually + * @return A {@link RebalanceResult} object that contains information about what data was actually * moved. * * @since GemFire 7.1 diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java index 0740a80e6f6e..a74ee69dd84a 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractCompiledValue.java @@ -276,7 +276,7 @@ public void generateCanonicalizedExpression(StringBuilder clauseBuffer, Executio } @Override - public void getRegionsInQuery(Set regionsInQuery, Object[] parameters) { + public void getRegionsInQuery(Set regionsInQuery, Object[] parameters) { for (final Object o : getChildren()) { CompiledValue v = (CompiledValue) o; if (v == null) { diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractGroupOrRangeJunction.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractGroupOrRangeJunction.java index 31060b0f629e..788030c0b71b 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractGroupOrRangeJunction.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AbstractGroupOrRangeJunction.java @@ -373,7 +373,7 @@ private SelectResults auxIterateEvaluate(CompiledValue operand, ExecutionContext return intermediateResults; } List currentIters = (completeExpansion) ? context.getCurrentIterators() - : QueryUtils.getDependentItrChainForIndpndntItrs(indpndntItr, context); + : QueryUtils.getDependentIteratorChainForIndependentIterators(indpndntItr, context); SelectResults resultSet = null; RuntimeIterator[] rIters = new RuntimeIterator[currentIters.size()]; currentIters.toArray(rIters); diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AllGroupJunction.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AllGroupJunction.java index fe34838ef4d3..98b4796fdfab 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AllGroupJunction.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AllGroupJunction.java @@ -137,9 +137,9 @@ private SelectResults evaluateAndJunction(ExecutionContext context) } else { results[j] = filterResults; grpItrs = (gj instanceof CompositeGroupJunction) - ? QueryUtils.getDependentItrChainForIndpndntItrs( + ? QueryUtils.getDependentIteratorChainForIndependentIterators( ((CompositeGroupJunction) gj).getIndependentIteratorsOfCJ(), context) - : context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr( + : context.getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator( ((AbstractGroupOrRangeJunction) gj).getIndependentIteratorForGroup()[0]); itrsForResultFields[j] = new RuntimeIterator[grpItrs.size()]; Iterator grpItr = grpItrs.iterator(); @@ -208,9 +208,9 @@ private SelectResults evaluateOrJunction(ExecutionContext context) throws Functi gj = (CompiledValue) junctionItr.next(); grpResults[0] = ((Filter) gj).filterEvaluate(context, null); grpItrs = (gj instanceof CompositeGroupJunction) - ? QueryUtils.getDependentItrChainForIndpndntItrs( + ? QueryUtils.getDependentIteratorChainForIndependentIterators( ((CompositeGroupJunction) gj).getIndependentIteratorsOfCJ(), context) - : context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr( + : context.getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator( ((AbstractGroupOrRangeJunction) gj).getIndependentIteratorForGroup()[0]); itrsForResultFields[0] = new RuntimeIterator[grpItrs.size()]; Iterator grpItr = grpItrs.iterator(); diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java index 2e282273b761..1897ff48908e 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/AttributeDescriptor.java @@ -304,7 +304,7 @@ private boolean isFieldAlreadySearchedAndNotFound(String className, String field } private void updateClassToMethodsMap(String className, String field) { - Map> map = DefaultQuery.getPdxClasstoMethodsmap(); + Map> map = DefaultQuery.getPdxClassToMethodsMap(); Set fields = map.get(className); if (fields == null) { fields = new HashSet<>(); @@ -315,7 +315,7 @@ private void updateClassToMethodsMap(String className, String field) { } private boolean isMethodAlreadySearchedAndNotFound(String className, String field) { - Set fields = DefaultQuery.getPdxClasstoMethodsmap().get(className); + Set fields = DefaultQuery.getPdxClassToMethodsMap().get(className); if (fields != null) { return fields.contains(field); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java index e12e8cff359e..44138dd91b57 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledRegion.java @@ -51,35 +51,35 @@ public String getRegionPath() { @Override public Object evaluate(ExecutionContext context) throws RegionNotFoundException { - Region rgn; + Region region; Cache cache = context.getCache(); // do PR bucketRegion substitution here for expressions that evaluate to a Region. PartitionedRegion pr = context.getPartitionedRegion(); if (pr != null && pr.getFullPath().equals(regionPath)) { - rgn = context.getBucketRegion(); + region = context.getBucketRegion(); } else if (pr != null) { // Asif : This is a very tricky solution to allow equijoin queries on PartitionedRegion // locally // We have possibly got a situation of equijoin. it may be across PRs. so use the context's // bucket region - // to get ID and then retrieve the this region's bucket region + // to get ID and then retrieve this region's bucket region BucketRegion br = context.getBucketRegion(); int bucketID = br.getId(); // Is current region a partitioned region - rgn = cache.getRegion(regionPath); - if (rgn.getAttributes().getDataPolicy().withPartitioning()) { + region = cache.getRegion(regionPath); + if (region.getAttributes().getDataPolicy().withPartitioning()) { // convert it into bucket region. - PartitionedRegion prLocal = (PartitionedRegion) rgn; - rgn = prLocal.getDataStore().getLocalBucketById(bucketID); + PartitionedRegion prLocal = (PartitionedRegion) region; + region = prLocal.getDataStore().getLocalBucketById(bucketID); } } else { - rgn = cache.getRegion(regionPath); + region = cache.getRegion(regionPath); } - if (rgn == null) { + if (region == null) { // if we couldn't find the region because the cache is closed, throw // a CacheClosedException if (cache.isClosed()) { @@ -90,9 +90,9 @@ public Object evaluate(ExecutionContext context) throws RegionNotFoundException } if (context.isCqQueryContext()) { - return new QRegion(rgn, true, context); + return new QRegion(region, true, context); } else { - return new QRegion(rgn, false, context); + return new QRegion(region, false, context); } } @@ -105,7 +105,7 @@ public void generateCanonicalizedExpression(StringBuilder clauseBuffer, Executio } @Override - public void getRegionsInQuery(Set regionsInQuery, Object[] parameters) { + public void getRegionsInQuery(Set regionsInQuery, Object[] parameters) { regionsInQuery.add(regionPath); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledValue.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledValue.java index ce292bb198d5..84df78a239ca 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledValue.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompiledValue.java @@ -136,7 +136,7 @@ void generateCanonicalizedExpression(StringBuilder clauseBuffer, ExecutionContex * object (CompiledRegion). The default implementation is provided in the AbstractCompiledValue & * overridden in the CompiledSelect as it can contain multiple iterators */ - void getRegionsInQuery(Set regionNames, Object[] parameters); + void getRegionsInQuery(Set regionNames, Object[] parameters); /** Get the CompiledValues that this owns */ List getChildren(); diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompositeGroupJunction.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompositeGroupJunction.java index 2b9f4e7f9b19..17d765ceffe1 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompositeGroupJunction.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/CompositeGroupJunction.java @@ -224,7 +224,8 @@ private SelectResults evaluateAndJunction(ExecutionContext context) if (intermediateResults.isEmpty()) { StructType structType = QueryUtils.createStructTypeForRuntimeIterators( completeExpansion ? context.getCurrentIterators() - : QueryUtils.getDependentItrChainForIndpndntItrs(indpndnts, context)); + : QueryUtils.getDependentIteratorChainForIndependentIterators(indpndnts, + context)); return QueryUtils.createStructCollection(context, structType); } } @@ -261,7 +262,8 @@ private SelectResults evaluateAndJunction(ExecutionContext context) if (completeExpansion) { finalList = context.getCurrentIterators(); } else { - finalList = QueryUtils.getDependentItrChainForIndpndntItrs(indpndnts, context); + finalList = + QueryUtils.getDependentIteratorChainForIndependentIterators(indpndnts, context); } List expansionList = new LinkedList(finalList); RuntimeIterator[][] itrsForResultFields = new RuntimeIterator[len][]; @@ -294,7 +296,7 @@ private SelectResults evaluateAndJunction(ExecutionContext context) return empty; } else { results[j] = filterResults; - grpItrs = context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr( + grpItrs = context.getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator( gj.getIndependentIteratorForGroup()[0]); itrsForResultFields[j] = new RuntimeIterator[grpItrs.size()]; Iterator grpItr = grpItrs.iterator(); @@ -396,7 +398,8 @@ private SelectResults evaluateOrJunction(ExecutionContext context) throws Functi if (completeExpansion) { finalList = context.getCurrentIterators(); } else { - finalList = QueryUtils.getDependentItrChainForIndpndntItrs(indpndnts, context); + finalList = + QueryUtils.getDependentIteratorChainForIndependentIterators(indpndnts, context); } RuntimeIterator[][] itrsForResultFields = new RuntimeIterator[1][]; @@ -408,7 +411,7 @@ private SelectResults evaluateOrJunction(ExecutionContext context) throws Functi List expansionList = new LinkedList(finalList); gj = (AbstractGroupOrRangeJunction) junctionItr.next(); grpResults[0] = ((Filter) gj).filterEvaluate(context, null); - grpItrs = context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr( + grpItrs = context.getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator( gj.getIndependentIteratorForGroup()[0]); itrsForResultFields[0] = new RuntimeIterator[grpItrs.size()]; Iterator grpItr = grpItrs.iterator(); diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java index 11bda03cb499..6ffd1d6a7574 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/DefaultQuery.java @@ -14,6 +14,8 @@ */ package org.apache.geode.cache.query.internal; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -51,7 +53,6 @@ import org.apache.geode.cache.query.TypeMismatchException; import org.apache.geode.cache.query.internal.cq.InternalCqQuery; import org.apache.geode.internal.NanoTimer; -import org.apache.geode.internal.cache.BucketRegion; import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.LocalDataSet; import org.apache.geode.internal.cache.PartitionedRegion; @@ -146,11 +147,7 @@ static Map> getPdxClasstofieldsmap() { private static final ThreadLocal>> pdxClassToMethodsMap = ThreadLocal.withInitial(HashMap::new); - public static void setPdxClasstoMethodsmap(Map> map) { - pdxClassToMethodsMap.set(map); - } - - public static Map> getPdxClasstoMethodsmap() { + public static Map> getPdxClassToMethodsMap() { return pdxClassToMethodsMap.get(); } @@ -242,11 +239,11 @@ public Object execute(Object[] params) throws FunctionDomainException, TypeMisma // For local queries returning pdx objects wrap the resultset with // ResultsCollectionPdxDeserializerWrapper // which deserializes these pdx objects. - if (needsPDXDeserializationWrapper(true /* is query on PR */) + if (needsPDXDeserializationWrapper( /* is query on PR */) && result instanceof SelectResults) { // we use copy on read false here because the copying has already taken effect earlier in // the PartitionedRegionQueryEvaluator - result = new ResultsCollectionPdxDeserializerWrapper((SelectResults) result, false); + result = new ResultsCollectionPdxDeserializerWrapper((SelectResults) result, false); } return result; } @@ -284,14 +281,14 @@ public Object execute(Object[] params) throws FunctionDomainException, TypeMisma // For local queries returning pdx objects wrap the resultset with // ResultsCollectionPdxDeserializerWrapper // which deserializes these pdx objects. - if (needsPDXDeserializationWrapper(false /* is query on PR */) + if (needsPDXDeserializationWrapper( /* is query on PR */) && result instanceof SelectResults) { - result = new ResultsCollectionPdxDeserializerWrapper((SelectResults) result, + result = new ResultsCollectionPdxDeserializerWrapper((SelectResults) result, needsCopyOnReadWrapper); } else if (!isRemoteQuery() && cache.getCopyOnRead() && result instanceof SelectResults) { if (needsCopyOnReadWrapper) { - result = new ResultsCollectionCopyOnReadWrapper((SelectResults) result); + result = new ResultsCollectionCopyOnReadWrapper((SelectResults) result); } } return result; @@ -310,7 +307,7 @@ public Object execute(Object[] params) throws FunctionDomainException, TypeMisma * For Order by queries ,since they are already ordered by the comparator && it takes care of * conversion, we do not have to wrap it in a wrapper */ - private boolean needsPDXDeserializationWrapper(boolean isQueryOnPR) { + private boolean needsPDXDeserializationWrapper() { return !isRemoteQuery() && !cache.getPdxReadSerialized(); } @@ -381,17 +378,16 @@ QueryExecutor checkQueryOnPR(Object[] parameters) // 3) PR reference can only be in the first FROM clause List prs = new ArrayList<>(); - for (final Object o : getRegionsInQuery(parameters)) { - String regionPath = (String) o; - Region rgn = cache.getRegion(regionPath); - if (rgn == null) { + for (final String regionPath : getRegionsInQuery(parameters)) { + Region region = cache.getRegion(regionPath); + if (region == null) { cache.getCancelCriterion().checkCancelInProgress(null); throw new RegionNotFoundException( String.format("Region not found: %s", regionPath)); } - if (rgn instanceof QueryExecutor) { - ((PartitionedRegion) rgn).checkPROffline(); - prs.add((QueryExecutor) rgn); + if (region instanceof QueryExecutor) { + ((PartitionedRegion) region).checkPROffline(); + prs.add((QueryExecutor) region); } } if (prs.size() == 1) { @@ -441,7 +437,7 @@ QueryExecutor checkQueryOnPR(Object[] parameters) } // make sure the where clause references no regions - Set regions = new HashSet(); + Set regions = new HashSet<>(); CompiledValue whereClause = select.getWhereClause(); if (whereClause != null) { whereClause.getRegionsInQuery(regions, parameters); @@ -450,10 +446,10 @@ QueryExecutor checkQueryOnPR(Object[] parameters) "The WHERE clause cannot refer to a region when querying on a Partitioned Region"); } } - List fromClause = select.getIterators(); + List fromClause = select.getIterators(); // the first iterator in the FROM clause must be just a reference to the Partitioned Region - Iterator fromClauseIterator = fromClause.iterator(); + Iterator fromClauseIterator = fromClause.iterator(); CompiledIteratorDef itrDef = (CompiledIteratorDef) fromClauseIterator.next(); // By process of elimination, we know that the first iterator contains a reference @@ -478,7 +474,7 @@ QueryExecutor checkQueryOnPR(Object[] parameters) } // check the projections, must not reference any regions - List projs = select.getProjectionAttributes(); + List projs = select.getProjectionAttributes(); if (projs != null) { for (Object proj1 : projs) { Object[] rawProj = (Object[]) proj1; @@ -643,7 +639,7 @@ private static String getLogMessage(QueryObserver observer, long startTime, int String usedIndexesString = null; if (observer instanceof IndexTrackingQueryObserver) { IndexTrackingQueryObserver indexObserver = (IndexTrackingQueryObserver) observer; - Map usedIndexes = indexObserver.getUsedIndexes(); + Map usedIndexes = indexObserver.getUsedIndexes(); indexObserver.reset(); StringBuilder sb = new StringBuilder(); sb.append(" indexesUsed("); @@ -651,8 +647,9 @@ private static String getLogMessage(QueryObserver observer, long startTime, int sb.append(')'); if (usedIndexes.size() > 0) { sb.append(':'); - for (Iterator itr = usedIndexes.entrySet().iterator(); itr.hasNext();) { - Map.Entry entry = (Map.Entry) itr.next(); + for (final Iterator> itr = + uncheckedCast(usedIndexes.entrySet().iterator()); itr.hasNext();) { + final Map.Entry entry = itr.next(); sb.append(entry.getKey()).append(entry.getValue()); if (itr.hasNext()) { sb.append(','); @@ -673,43 +670,6 @@ private static String getLogMessage(QueryObserver observer, long startTime, int + (usedIndexesString != null ? usedIndexesString : "") + " \"" + query + '"'; } - private static String getLogMessage(IndexTrackingQueryObserver indexObserver, long startTime, - String otherObserver, int resultSize, String query, BucketRegion bucket) { - float time = 0.0f; - - if (startTime > 0L) { - time = (NanoTimer.getTime() - startTime) / 1.0e6f; - } - - String usedIndexesString = null; - if (indexObserver != null) { - Map usedIndexes = indexObserver.getUsedIndexes(bucket.getFullPath()); - StringBuilder sb = new StringBuilder(); - sb.append(" indexesUsed("); - sb.append(usedIndexes.size()); - sb.append(')'); - if (!usedIndexes.isEmpty()) { - sb.append(':'); - for (Iterator itr = usedIndexes.entrySet().iterator(); itr.hasNext();) { - Map.Entry entry = (Map.Entry) itr.next(); - sb.append(entry.getKey()).append("(Results: ").append(entry.getValue()) - .append(", Bucket: ").append(bucket.getId()).append(")"); - if (itr.hasNext()) { - sb.append(','); - } - } - } - usedIndexesString = sb.toString(); - } else if (DefaultQuery.QUERY_VERBOSE) { - usedIndexesString = - " indexesUsed(NA due to other observer in the way: " + otherObserver + ')'; - } - - String rowCountString = " rowCount = " + resultSize + ';'; - return "Query Executed" + (startTime > 0L ? " in " + time + " ms;" : ";") + rowCountString - + (usedIndexesString != null ? usedIndexesString : "") + " \"" + query + '"'; - } - @Override public Object execute(RegionFunctionContext context) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { @@ -744,8 +704,8 @@ public Object execute(RegionFunctionContext context, Object[] params) try { indexObserver = startTrace(); if (qe != null) { - LocalDataSet localDataSet = - (LocalDataSet) PartitionRegionHelper.getLocalDataForContext(context); + LocalDataSet localDataSet = + (LocalDataSet) PartitionRegionHelper.getLocalDataForContext(context); Set buckets = localDataSet.getBucketSet(); final ExecutionContext executionContext = new ExecutionContext(null, cache); result = qe.executeQuery(this, executionContext, params, buckets); @@ -793,7 +753,7 @@ public void endTrace(QueryObserver indexObserver, long startTime, Object result) int resultSize = -1; if (result instanceof Collection) { - resultSize = ((Collection) result).size(); + resultSize = ((Collection) result).size(); } String queryVerboseMsg = @@ -802,11 +762,12 @@ public void endTrace(QueryObserver indexObserver, long startTime, Object result) } } - public void endTrace(QueryObserver indexObserver, long startTime, Collection result) { + public void endTrace(QueryObserver indexObserver, long startTime, + Collection> result) { if (logger.isInfoEnabled() && traceOn) { int resultSize = 0; - for (Collection aResult : result) { + for (Collection aResult : result) { resultSize += aResult.size(); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java index f9e4dcd6e093..2808a9faef8e 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/ExecutionContext.java @@ -80,7 +80,7 @@ public class ExecutionContext { * final Independent RuntimeIterator or Iterators , ie. those referring to a Region or * BindArgument, on which the CompiledIteratorDef depends upon . */ - private final Map> itrDefToIndpndtRuntimeItrMap = + private final Map> compiledIteratorDefToIndependentRuntimeIterators = new HashMap<>(); /** @@ -88,7 +88,8 @@ public class ExecutionContext { * this Map will be only for those RuntimeIterators which have an underlying Region as its * Collection Expression */ - private final Map indpndtItrToRgnMap = new HashMap<>(); + private final Map independentRuntimeIteratorsToRegionPath = + new HashMap<>(); // used when querying on a PR: Substitute reference to PartitionedRegion with BucketRegion private BucketRegion bukRgn = null; @@ -97,7 +98,7 @@ public class ExecutionContext { private Object currentProjectionField = null; private boolean isPRQueryNode = false; - private Optional cancellationTask; + private ScheduledFuture cancellationTask; private volatile CacheRuntimeException canceledException; static final ThreadLocal isCanceled = ThreadLocal.withInitial(AtomicBoolean::new); @@ -132,17 +133,17 @@ public MethodInvocationAuthorizer getMethodInvocationAuthorizer() { public ExecutionContext(Object[] bindArguments, InternalCache cache) { this.cache = cache; this.bindArguments = bindArguments; - cancellationTask = Optional.empty(); + cancellationTask = null; queryConfigurationService = cache.getService(QueryConfigurationService.class); methodInvocationAuthorizer = queryConfigurationService.getMethodAuthorizer(); } - Optional getCancellationTask() { - return cancellationTask; + Optional> getCancellationTask() { + return Optional.ofNullable(cancellationTask); } - void setCancellationTask(final ScheduledFuture cancellationTask) { - this.cancellationTask = Optional.of(cancellationTask); + void setCancellationTask(final ScheduledFuture cancellationTask) { + this.cancellationTask = cancellationTask; } public CachePerfStats getCachePerfStats() { @@ -155,14 +156,14 @@ public CachePerfStats getCachePerfStats() { * * @return the dependency set as a shortcut */ - Set addDependency(CompiledValue cv, RuntimeIterator itr) { + Set addDependency(CompiledValue cv, RuntimeIterator itr) { Set ds = getDependencySet(cv, false); ds.add(itr); return ds; } /** @return the dependency set as a shortcut */ - public Set addDependencies(CompiledValue cv, Set set) { + public Set addDependencies(CompiledValue cv, Set set) { if (set.isEmpty()) { return getDependencySet(cv, true); } @@ -176,22 +177,21 @@ public Set addDependencies(CompiledValue cv, Set set) { */ boolean isDependentOnCurrentScope(CompiledValue cv) { // return !getDependencySet(cv, true).isEmpty(); - Set setRItr = getDependencySet(cv, true); - boolean isDependent = false; - if (!setRItr.isEmpty()) { + final Set iterators = getDependencySet(cv, true); + if (!iterators.isEmpty()) { int currScopeID = currentScope().getScopeID(); - for (RuntimeIterator ritr : setRItr) { - if (currScopeID == ritr.getScopeID()) { - isDependent = true; - break; + for (final RuntimeIterator iterator : iterators) { + if (currScopeID == iterator.getScopeID()) { + return true; } } } - return isDependent; + return false; } /** - * Return true if given CompiledValue is dependent on any RuntimeIterator in all of the scopes + * @return {@code true} if given {@link CompiledValue} is dependent on any {@link RuntimeIterator} + * in all the scopes, otherwise {@code false}. */ boolean isDependentOnAnyIterator(CompiledValue cv) { return !getDependencySet(cv, true).isEmpty(); @@ -222,7 +222,7 @@ Set getDependencySet(CompiledValue cv, boolean readOnly) { * * @return All {@link AbstractCompiledValue} dependencies. */ - public Map getDependencyGraph() { + public Map> getDependencyGraph() { return dependencyGraph; } @@ -264,12 +264,10 @@ public CompiledValue resolve(String name) throws TypeMismatchException, Ambiguou return value; } - /** Return null if cannot be resolved as a variable in current scope */ + /** @return {@code null} if name cannot be resolved as a variable in current scope */ private CompiledValue resolveAsVariable(String name) { - CompiledValue value; for (int i = scopes.size() - 1; i >= 0; i--) { - QScope scope = scopes.get(i); - value = scope.resolve(name); + final CompiledValue value = scopes.get(i).resolve(name); if (value != null) { return value; } @@ -297,7 +295,7 @@ QScope currentScope() { return scopes.peek(); } - public List getCurrentIterators() { + public List getCurrentIterators() { return currentScope().getIterators(); } @@ -311,21 +309,22 @@ public List getCurrentIterators() { * list. If an iterator is dependent on more than one independent iterator, it is not added to the * List *

- * TODO: If we are storing a single Iterator instead of Set , in the itrDefToIndpndtRuntimeItrMap - * , we need to take care of this function. * - * @param rIter Independent RuntimeIterator on which dependent iterators of current scope need to + * @param iterator Independent RuntimeIterator on which dependent iterators of current scope need + * to * identified * @return List containing the independent Runtime Iterator & its dependent iterators */ - public List getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(RuntimeIterator rIter) { + public List getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator( + RuntimeIterator iterator) { List list = new ArrayList<>(); - list.add(rIter); + list.add(iterator); for (RuntimeIterator iteratorInCurrentScope : currentScope().getIterators()) { Set itrSet = - itrDefToIndpndtRuntimeItrMap.get(iteratorInCurrentScope.getCmpIteratorDefn()); - if (rIter != iteratorInCurrentScope && itrSet.size() == 1 - && itrSet.iterator().next() == rIter) { + compiledIteratorDefToIndependentRuntimeIterators + .get(iteratorInCurrentScope.getCmpIteratorDefn()); + if (iterator != iteratorInCurrentScope && itrSet.size() == 1 + && itrSet.iterator().next() == iterator) { list.add(iteratorInCurrentScope); } } @@ -474,7 +473,7 @@ void computeUltimateDependencies(CompiledValue cv, Set set) { Set dependencySet = getDependencySet(cv, true); for (RuntimeIterator rIter : dependencySet) { Set indRuntimeIterators = - itrDefToIndpndtRuntimeItrMap.get(rIter.getCmpIteratorDefn()); + compiledIteratorDefToIndependentRuntimeIterators.get(rIter.getCmpIteratorDefn()); if (indRuntimeIterators != null) { set.addAll(indRuntimeIterators); } @@ -482,9 +481,11 @@ void computeUltimateDependencies(CompiledValue cv, Set set) { } /** - * This function populates the Map itrDefToIndpndtRuntimeItrMap. It creates a Set of - * RuntimeIterators to which the current CompilediteratorDef is dependent upon. Also it sets the - * index_internal_id for the RuntimeIterator, which is used for calculating the canonicalized + * This function populates the Map independentRuntimeIteratorsToRegionPath. It creates a Set of + * RuntimeIterators to which the current {@link CompiledIteratorDef} is dependent upon. It sets + * the + * index_internal_id for the {@link RuntimeIterator}, which is used for calculating the + * canonicalized * iterator definitions for identifying the available index. * * @param itrDef CompiledIteratorDef object representing iterator in the query from clause @@ -505,23 +506,24 @@ public void addToIndependentRuntimeItrMap(CompiledIteratorDef itrDef) QueryUtils.obtainTheBottomMostCompiledValue(itrDef.getCollectionExpr()); if (startVal.getType() == OQLLexerTokenTypes.RegionPath) { rgnPath = ((QRegion) ((CompiledRegion) startVal).evaluate(this)).getFullPath(); - indpndtItrToRgnMap.put(itr, rgnPath); + independentRuntimeIteratorsToRegionPath.put(itr, rgnPath); } else if (startVal.getType() == OQLLexerTokenTypes.QUERY_PARAM) { Object rgn; CompiledBindArgument cba = (CompiledBindArgument) startVal; if ((rgn = cba.evaluate(this)) instanceof Region) { - indpndtItrToRgnMap.put(itr, rgnPath = ((Region) rgn).getFullPath()); + independentRuntimeIteratorsToRegionPath.put(itr, + rgnPath = ((Region) rgn).getFullPath()); } } } - itrDefToIndpndtRuntimeItrMap.put(itrDef, set); + compiledIteratorDefToIndependentRuntimeIterators.put(itrDef, set); IndexManager mgr = null; // Set the canonicalized index_internal_id if the condition is satisfied if (set.size() == 1) { if (itr == null) { itr = set.iterator().next(); if (itr.getScopeID() == currentScope().getScopeID()) { - rgnPath = indpndtItrToRgnMap.get(itr); + rgnPath = independentRuntimeIteratorsToRegionPath.get(itr); } } if (rgnPath != null) { @@ -540,10 +542,11 @@ public void addToIndependentRuntimeItrMap(CompiledIteratorDef itrDef) ? currItr.getInternalId() : tempIndexID); } - List getAllIndependentIteratorsOfCurrentScope() { - List independentIterators = new ArrayList<>(indpndtItrToRgnMap.size()); + List getAllIndependentIteratorsOfCurrentScope() { + List independentIterators = new ArrayList<>( + independentRuntimeIteratorsToRegionPath.size()); int currentScopeId = currentScope().getScopeID(); - for (RuntimeIterator rIter : indpndtItrToRgnMap.keySet()) { + for (RuntimeIterator rIter : independentRuntimeIteratorsToRegionPath.keySet()) { if (rIter.getScopeID() == currentScopeId) { independentIterators.add(rIter); } @@ -560,7 +563,7 @@ List getAllIndependentIteratorsOfCurrentScope() { * @return String containing region path */ String getRegionPathForIndependentRuntimeIterator(RuntimeIterator riter) { - return indpndtItrToRgnMap.get(riter); + return independentRuntimeIteratorsToRegionPath.get(riter); } /** @@ -577,7 +580,7 @@ public void addToIndependentRuntimeItrMapForIndexCreation(CompiledIteratorDef it RuntimeIterator itr = itrDef.getRuntimeIterator(this); set.add(itr); } - itrDefToIndpndtRuntimeItrMap.put(itrDef, set); + compiledIteratorDefToIndependentRuntimeIterators.put(itrDef, set); } public void setBindArguments(Object[] bindArguments) { @@ -624,7 +627,7 @@ public boolean isCqQueryContext() { return false; } - public List getBucketList() { + public List getBucketList() { return null; } @@ -644,7 +647,7 @@ public Query getQuery() { throw new UnsupportedOperationException("Method should not have been called"); } - public void setBucketList(List list) { + public void setBucketList(List list) { throw new UnsupportedOperationException("Method should not have been called"); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/IndexConditioningHelper.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/IndexConditioningHelper.java index e3ee3c71a99f..a90cb5df0ac0 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/IndexConditioningHelper.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/IndexConditioningHelper.java @@ -63,7 +63,7 @@ class IndexConditioningHelper { // The default is initialized as empty List rather than null to avoid // Null Pointer Exception in the function // getconditionedRelationshipIndexResults - List expansionList = Collections.emptyList(); + List expansionList = Collections.emptyList(); /** * The List containing RuntimeIterators which define the final SelectResults after the relevant @@ -74,7 +74,7 @@ class IndexConditioningHelper { // do not need finalList , but it is used in relation ship index , even if // match level is zero. // So we should never leave it as null - List finalList = null; + List finalList = null; /** * This is the List of RuntimeIterators which gets created only if the index resulst require a @@ -95,7 +95,7 @@ class IndexConditioningHelper { /* * Below Can be null or empty collections if the match level is exact & no shuffling needed */ - List checkList = null; + List checkList = null; /** * This field is meaningful iff the match level is zero, no shuffling needed & there exists a @@ -152,7 +152,8 @@ class IndexConditioningHelper { Support.Assert(indexInfo._index.getResultSetType() instanceof StructType, " If the match level is zero & the size of mapping array is 1 then Index is surely ResultBag else StructBag"); // The independent iterator is added as the first element - grpItrs = context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(indpndntItr); + grpItrs = context + .getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator(indpndntItr); // Check if reshuffling is needed or just changing the struct // type will suffice boolean isReshufflingNeeded = false; @@ -202,7 +203,8 @@ class IndexConditioningHelper { // There is some expansion or truncation needed on the data // obtained from index.Identify a the iterators belonging to this group // The independent iterator is added as the first element - grpItrs = context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(indpndntItr); + grpItrs = context + .getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator(indpndntItr); // Create an array of RuntimeIterators which map to the fields of the // Index set. // For those fields which do not have corresponding RuntimeIterator , keep diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QRegion.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QRegion.java index d0bfce1f8d4a..3573a8c9f9cd 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QRegion.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QRegion.java @@ -102,8 +102,8 @@ public QRegion(Region region, boolean includeKeys, ExecutionContext context) { ResultsCollectionWrapper res = null; if (context.getBucketList() != null && region instanceof PartitionedRegion) { PartitionedRegion partitionedRegion = (PartitionedRegion) region; - LocalDataSet localData = - new LocalDataSet(partitionedRegion, new HashSet(context.getBucketList())); + LocalDataSet localData = + new LocalDataSet<>(partitionedRegion, new HashSet<>(context.getBucketList())); this.region = localData; if (includeKeys) { res = new ResultsCollectionWrapper(TypeUtils.getObjectType(constraint), @@ -156,7 +156,7 @@ public void setElementType(ObjectType elementType) { public SelectResults getKeys() { ResultsCollectionWrapper res; if (region instanceof LocalDataSet) { - LocalDataSet localData = (LocalDataSet) region; + LocalDataSet localData = (LocalDataSet) region; res = new ResultsCollectionWrapper(getKeyType(), localData.localKeys()); } else { res = new ResultsCollectionWrapper(getKeyType(), region.keySet()); @@ -188,7 +188,7 @@ public SelectResults getValues() { public SelectResults getEntries() { ResultsCollectionWrapper res; if (region instanceof LocalDataSet) { - LocalDataSet localData = (LocalDataSet) region; + LocalDataSet localData = (LocalDataSet) region; res = new ResultsCollectionWrapper(TypeUtils.getRegionEntryType(region), localData.localEntrySet()); } else { diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutionContext.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutionContext.java index 22dced671ded..dff9c5474b7d 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutionContext.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutionContext.java @@ -14,6 +14,8 @@ */ package org.apache.geode.cache.query.internal; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -25,7 +27,7 @@ import org.apache.geode.pdx.internal.PdxString; /** - * This ExecutionContext will be used ONLY for querying because this is a bit heavt-weight context + * This ExecutionContext will be used ONLY for querying because this is a bit heavy-weight context * whose life is longer in JVM than {@link ExecutionContext} which will be used ONLY for index * updates. * @@ -39,20 +41,20 @@ public class QueryExecutionContext extends ExecutionContext { private final boolean cqQueryContext; - private List bucketList; + private List bucketList; private boolean indexUsed = false; /** * stack used to determine which execCache to currently be using */ - private final Stack execCacheStack = new Stack(); + private final Stack execCacheStack = new Stack<>(); /** * a map that stores general purpose maps for caching data that is valid for one query execution * only */ - private final Map execCaches = new HashMap(); + private final Map> execCaches = new HashMap<>(); /** * This map stores PdxString corresponding to the bind argument @@ -62,7 +64,7 @@ public class QueryExecutionContext extends ExecutionContext { /** * List of query index names that the user has hinted on using */ - private ArrayList hints = null; + private ArrayList hints = null; public QueryExecutionContext(Object[] bindArguments, InternalCache cache) { super(bindArguments, cache); @@ -86,21 +88,16 @@ public QueryExecutionContext(Object[] bindArguments, InternalCache cache, Query @Override void cachePut(Object key, Object value) { if (key.equals(CompiledValue.QUERY_INDEX_HINTS)) { - setHints((ArrayList) value); + setHints(uncheckedCast(value)); return; } - // execCache can be empty in cases where we are doing adds to indexes + // execCache can be empty in cases where we are doing adds to the indexes // in that case, we use a default execCache int scopeId = -1; if (!execCacheStack.isEmpty()) { - scopeId = (Integer) execCacheStack.peek(); - } - Map execCache = (Map) execCaches.get(scopeId); - if (execCache == null) { - execCache = new HashMap(); - execCaches.put(scopeId, execCache); + scopeId = execCacheStack.peek(); } - execCache.put(key, value); + execCaches.computeIfAbsent(scopeId, k -> new HashMap<>()).put(key, value); } @Override @@ -110,13 +107,13 @@ public Object cacheGet(Object key) { @Override public Object cacheGet(Object key, Object defaultValue) { - // execCache can be empty in cases where we are doing adds to indexes + // execCache can be empty in cases where we are doing adds to the indexes // in that case, we use a default execCache int scopeId = -1; if (!execCacheStack.isEmpty()) { - scopeId = (Integer) execCacheStack.peek(); + scopeId = execCacheStack.peek(); } - Map execCache = (Map) execCaches.get(scopeId); + Map execCache = execCaches.get(scopeId); if (execCache == null) { return defaultValue; } @@ -161,12 +158,12 @@ public Query getQuery() { } @Override - public void setBucketList(List list) { + public void setBucketList(List list) { bucketList = list; } @Override - public List getBucketList() { + public List getBucketList() { return bucketList; } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutor.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutor.java index b5cf9a70d6be..79aa5b6f0cb0 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutor.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryExecutor.java @@ -29,7 +29,7 @@ public interface QueryExecutor { Object executeQuery(DefaultQuery query, ExecutionContext executionContext, - Object[] parameters, Set buckets) + Object[] parameters, Set buckets) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException; diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java index 5967ddd23c2c..e2306c971a33 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/QueryUtils.java @@ -133,7 +133,7 @@ public static SelectResults createStructCollection(boolean distinct, StructType */ public static SelectResults getEmptySelectResults(ObjectType objectType, CachePerfStats statsOrNull) { - SelectResults emptyResults = null; + final SelectResults emptyResults; if (objectType instanceof StructType) { emptyResults = new StructBag((StructTypeImpl) objectType, statsOrNull); } else { @@ -152,7 +152,7 @@ public static SelectResults getEmptySelectResults(ObjectType objectType, */ public static SelectResults getEmptySelectResults(CollectionType collectionType, CachePerfStats statsOrNull) { - SelectResults emptyResults = null; + final SelectResults emptyResults; if (collectionType.isOrdered()) { // The collectionType is ordered. // The 'order by' clause was used in the query. @@ -283,9 +283,7 @@ private static SelectResults sizeSortedUnion(SelectResults small, SelectResults rs = new ResultsBag(large, null); } - for (Object element : small) { - rs.add(element); - } + rs.addAll(small); return rs; } @@ -295,13 +293,15 @@ private static SelectResults sizeSortedUnion(SelectResults small, SelectResults * is based on the order of independent Iterators present in the array . For each group the first * iterator is its independent iterator * - * @param indpndntItrs array of independent RuntimeIterators + * @param independentIterators array of independent RuntimeIterators */ - static List getDependentItrChainForIndpndntItrs(RuntimeIterator[] indpndntItrs, + static List getDependentIteratorChainForIndependentIterators( + RuntimeIterator[] independentIterators, ExecutionContext context) { List ret = new ArrayList(); - for (RuntimeIterator indpndntItr : indpndntItrs) { - ret.addAll(context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(indpndntItr)); + for (RuntimeIterator indpndntItr : independentIterators) { + ret.addAll( + context.getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator(indpndntItr)); } return ret; } @@ -341,7 +341,7 @@ public static SelectResults cartesian(SelectResults[] results, RuntimeIterator[][] itrsForResultFields, List expansionList, List finalList, ExecutionContext context, CompiledValue operand) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { - SelectResults returnSet = null; + final SelectResults returnSet; if (finalList.size() == 1) { ObjectType type = ((RuntimeIterator) finalList.iterator().next()).getElementType(); if (type instanceof StructType) { @@ -375,15 +375,13 @@ private static void doNestedIterations(int level, SelectResults returnSet, Iterator itr = finalItrs.iterator(); int len = finalItrs.size(); if (len > 1) { - Object[] values = new Object[len]; + final Object[] values = new Object[len]; int j = 0; while (itr.hasNext()) { values[j++] = ((RuntimeIterator) itr.next()).evaluate(context); } if (select) { - ((StructFields) returnSet).addFieldValues(values); - } } else { if (select) { @@ -457,7 +455,7 @@ public static boolean applyCondition(CompiledValue operand, ExecutionContext con private static void mergeRelationshipIndexResultsWithIntermediateResults(SelectResults returnSet, SelectResults[] intermediateResults, RuntimeIterator[][] itrsForIntermediateResults, Object[][] indexResults, RuntimeIterator[][] indexFieldToItrsMapping, - ListIterator expansionListItr, List finalItrs, ExecutionContext context, List[] checkList, + ListIterator expansionListItr, List finalItrs, ExecutionContext context, CompiledValue iterOps, IndexCutDownExpansionHelper[] icdeh, int level, int maxExpnCartesianDepth) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { @@ -476,7 +474,7 @@ private static void mergeRelationshipIndexResultsWithIntermediateResults(SelectR } else { mergeRelationshipIndexResultsWithIntermediateResults(returnSet, intermediateResults, itrsForIntermediateResults, indexResults, indexFieldToItrsMapping, expansionListItr, - finalItrs, context, checkList, iterOps, icdeh, level + 1, maxExpnCartesianDepth); + finalItrs, context, iterOps, icdeh, level + 1, maxExpnCartesianDepth); if (icdeh[level + 1].cutDownNeeded) { icdeh[level + 1].checkSet.clear(); } @@ -550,9 +548,8 @@ private static boolean setIndexFieldValuesInRespectiveIterators(Object value, } // Object values[] = new Object[numItersInResultSet]; int j = 0; - RuntimeIterator rItr = null; for (int i = 0; i < size; i++) { - rItr = indexFieldToItrsMapping[i]; + final RuntimeIterator rItr = indexFieldToItrsMapping[i]; if (rItr != null) { rItr.setCurrent(fieldValues[i]); if (icdeh.cutDownNeeded) { @@ -561,7 +558,7 @@ private static boolean setIndexFieldValuesInRespectiveIterators(Object value, } } if (icdeh.cutDownNeeded) { - Object temp = null; + final Object temp; if (icdeh.checkSize == 1) { temp = checkFields[0]; } else { @@ -592,7 +589,7 @@ private static SelectResults cutDownAndExpandIndexResults(SelectResults result, useLinkedDataStructure = orderByAttrs.size() == 1; nullValuesAtStart = !((CompiledSortCriterion) orderByAttrs.get(0)).getCriterion(); } - SelectResults returnSet = null; + final SelectResults returnSet; if (finalItrs.size() == 1) { ObjectType resultType = ((RuntimeIterator) finalItrs.iterator().next()).getElementType(); if (useLinkedDataStructure) { @@ -937,7 +934,7 @@ private static IndexData getAvailableIndexIfAny(CompiledValue cv, ExecutionConte return null; } RuntimeIterator rIter = (RuntimeIterator) set.iterator().next(); - String regionPath = null; + final String regionPath; // An Index is not available if the ultimate independent RuntimeIterator is // of different scope or if the underlying // collection is not a Region @@ -946,7 +943,8 @@ private static IndexData getAvailableIndexIfAny(CompiledValue cv, ExecutionConte return null; } // The independent iterator is added as the first element - List groupRuntimeItrs = context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(rIter); + List groupRuntimeItrs = + context.getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator(rIter); String[] definitions = new String[groupRuntimeItrs.size()]; Iterator iterator = groupRuntimeItrs.iterator(); int i = 0; @@ -1018,15 +1016,13 @@ static SelectResults getConditionedIndexResults(SelectResults indexResults, Inde // indicates // that there will be at least one independent group to which we need to // expand to - ich.finalList = getDependentItrChainForIndpndntItrs(grpIndpndntItr, context); + ich.finalList = getDependentIteratorChainForIndependentIterators(grpIndpndntItr, context); // Add the iterators of remaining independent grp to the expansion list List newExpList = new ArrayList(); - int len = grpIndpndntItr.length; - RuntimeIterator tempItr = null; for (RuntimeIterator aGrpIndpndntItr : grpIndpndntItr) { - tempItr = aGrpIndpndntItr; - if (tempItr != ich.indpndntItr) { - newExpList.addAll(context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(tempItr)); + if (aGrpIndpndntItr != ich.indpndntItr) { + newExpList.addAll(context + .getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator(aGrpIndpndntItr)); } } newExpList.addAll(ich.expansionList); @@ -1123,10 +1119,10 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( // have called int noOfIndexesToUse = intermediateResults == null || intermediateResults.isEmpty() ? 2 : 0; RuntimeIterator[] resultFieldsItrMapping = null; - List allItrs = context.getCurrentIterators(); + final List allItrs = context.getCurrentIterators(); IndexConditioningHelper singleUsableICH = null; IndexConditioningHelper nonUsableICH = null; - List finalList = + List finalList = completeExpansionNeeded ? allItrs : indpdntItrs == null ? new ArrayList() : null; // the set will contain those iterators which we don't have to expand to either because they are // already present ( because of intermediate results or because index result already contains @@ -1137,15 +1133,15 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( // fields of intermediate // resultset contains any independent iterator of the current condition noOfIndexesToUse = 2; - StructType stype = (StructType) intermediateResults.getCollectionType().getElementType(); - String[] fieldNames = stype.getFieldNames(); - int len = fieldNames.length; + final StructType stype = + (StructType) intermediateResults.getCollectionType().getElementType(); + final String[] fieldNames = stype.getFieldNames(); + final int len = fieldNames.length; resultFieldsItrMapping = new RuntimeIterator[len]; - String fieldName = null; - String lhsID = ich1.indpndntItr.getInternalId(); - String rhsID = ich2.indpndntItr.getInternalId(); + final String lhsID = ich1.indpndntItr.getInternalId(); + final String rhsID = ich2.indpndntItr.getInternalId(); for (int i = 0; i < len; ++i) { - fieldName = fieldNames[i]; + final String fieldName = fieldNames[i]; if (noOfIndexesToUse != 0) { if (fieldName.equals(lhsID)) { --noOfIndexesToUse; @@ -1183,7 +1179,6 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( if (noOfIndexesToUse == 2) { List data = null; try { - ArrayList resultData = new ArrayList(); observer.beforeIndexLookup(indxInfo[0]._index, OQLLexerTokenTypes.TOK_EQ, null); observer.beforeIndexLookup(indxInfo[1]._index, OQLLexerTokenTypes.TOK_EQ, null); if (context.getBucketList() != null) { @@ -1212,9 +1207,7 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( expnItrsToIgnore.addAll(ich2.finalList); // identify the iterators which we need to expand to // TODO: Make the code compact by using a common function to take care of this - int size = finalList.size(); - for (Object o : finalList) { - RuntimeIterator currItr = (RuntimeIterator) o; + for (RuntimeIterator currItr : finalList) { // If the runtimeIterators of scope not present in CheckSet add it to the expansion list if (!expnItrsToIgnore.contains(currItr)) { totalExpList.add(currItr); @@ -1226,19 +1219,17 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( // iterators in the order of indpendent iterators present in CGJ. Otherwise we will have // struct set mismatch while doing intersection with GroupJunction results if (indpdntItrs != null) { - finalList = getDependentItrChainForIndpndntItrs(indpdntItrs, context); + finalList = getDependentIteratorChainForIndependentIterators(indpdntItrs, context); } else { finalList.addAll(ich1.finalList); finalList.addAll(ich2.finalList); } } - List[] checkList = new List[] {ich1.checkList, ich2.checkList}; StructType stype = createStructTypeForRuntimeIterators(finalList); SelectResults returnSet = QueryUtils.createStructCollection(context, stype); RuntimeIterator[][] mappings = new RuntimeIterator[2][]; mappings[0] = ich1.indexFieldToItrsMapping; mappings[1] = ich2.indexFieldToItrsMapping; - List[] totalCheckList = new List[] {ich1.checkList, ich2.checkList}; RuntimeIterator[][] resultMappings = new RuntimeIterator[1][]; resultMappings[0] = resultFieldsItrMapping; Iterator dataItr = data.iterator(); @@ -1267,7 +1258,7 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( if (doMergeWithIntermediateResults) { mergeRelationshipIndexResultsWithIntermediateResults(returnSet, new SelectResults[] {intermediateResults}, resultMappings, values, mappings, - expansionListIterator, finalList, context, checkList, iterOperands, icdeh, 0, + expansionListIterator, finalList, context, iterOperands, icdeh, 0, maxCartesianDepth); } else { mergeAndExpandCutDownRelationshipIndexResults(values, returnSet, mappings, @@ -1290,14 +1281,10 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( // complete expansion flag.. List totalExpList = new ArrayList(singleUsableICH.expansionList); if (completeExpansionNeeded) { - Support.Assert(expnItrsToIgnore != null, - "expnItrsToIgnore should not have been null as we are in this block itself indicates that intermediate results was not null"); expnItrsToIgnore.addAll(singleUsableICH.finalList); // identify the iterators which we need to expand to // TODO: Make the code compact by using a common function to take care of this - int size = finalList.size(); - for (Object o : finalList) { - RuntimeIterator currItr = (RuntimeIterator) o; + for (RuntimeIterator currItr : finalList) { // If the runtimeIterators of scope not present in CheckSet add it to the expansion list if (!expnItrsToIgnore.contains(currItr)) { totalExpList.add(currItr); @@ -1309,7 +1296,7 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( // iterators in the order of indpendent iterators present in CGJ. Otherwise we will havve // struct set mismatch while doing intersection with GroupJunction results if (indpdntItrs != null) { - finalList = getDependentItrChainForIndpndntItrs(indpdntItrs, context); + finalList = getDependentIteratorChainForIndependentIterators(indpdntItrs, context); } else { finalList.addAll(singleUsableICH.finalList); } @@ -1322,7 +1309,7 @@ static SelectResults getRelationshipIndexResultsMergedWithIntermediateResults( CompiledValue nonUsblIndxPath = nonUsableICH.indxInfo._path; ObjectType singlUsblIndxResType = singleUsblIndex.getResultSetType(); - SelectResults singlUsblIndxRes = null; + final SelectResults singlUsblIndxRes; if (singlUsblIndxResType instanceof StructType) { singlUsblIndxRes = QueryUtils.createStructCollection(context, (StructTypeImpl) singlUsblIndxResType); @@ -1452,29 +1439,28 @@ static SelectResults getConditionedRelationshipIndexResultsExpandedToTopOrCGJLev totalExpList.addAll(ich1.expansionList); totalExpList.addAll(ich2.expansionList); - List totalFinalList = null; + final List totalFinalList; if (completeExpansionNeeded) { totalFinalList = context.getCurrentIterators(); Set expnItrsAlreadyAccounted = new HashSet(); expnItrsAlreadyAccounted.addAll(ich1.finalList); expnItrsAlreadyAccounted.addAll(ich2.finalList); - int size = totalFinalList.size(); - for (Object o : totalFinalList) { - RuntimeIterator currItr = (RuntimeIterator) o; + for (RuntimeIterator runtimeIterator : totalFinalList) { // If the runtimeIterators of scope not present in CheckSet add it to the expansion list - if (!expnItrsAlreadyAccounted.contains(currItr)) { - totalExpList.add(currItr); + if (!expnItrsAlreadyAccounted.contains(runtimeIterator)) { + totalExpList.add(runtimeIterator); } } } else { - totalFinalList = new ArrayList(); + totalFinalList = new ArrayList<>(); for (RuntimeIterator indpndntItr : indpdntItrs) { if (indpndntItr == ich1.finalList.get(0)) { totalFinalList.addAll(ich1.finalList); } else if (indpndntItr == ich2.finalList.get(0)) { totalFinalList.addAll(ich2.finalList); } else { - List temp = context.getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(indpndntItr); + List temp = context + .getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator(indpndntItr); totalFinalList.addAll(temp); totalExpList.addAll(temp); } @@ -1537,10 +1523,11 @@ static SelectResults testCutDownAndExpandIndexResults(List dataList) (ExecutionContext) dataList.get(4), (List) dataList.get(5), null, null); } - static List queryEquijoinConditionBucketIndexes(IndexInfo[] indxInfo, ExecutionContext context) + static List queryEquijoinConditionBucketIndexes(IndexInfo[] indxInfo, + ExecutionContext context) throws QueryInvocationTargetException, TypeMismatchException, FunctionDomainException, NameResolutionException { - List resultData = new ArrayList(); + List resultData = new ArrayList<>(); AbstractIndex index0 = (AbstractIndex) indxInfo[0]._index; AbstractIndex index1 = (AbstractIndex) indxInfo[1]._index; PartitionedRegion pr0 = null; @@ -1554,22 +1541,19 @@ static List queryEquijoinConditionBucketIndexes(IndexInfo[] indxInfo, ExecutionC pr1 = ((Bucket) index1.getRegion()).getPartitionedRegion(); } - List data = null; - IndexProtocol i0 = null; - IndexProtocol i1 = null; - for (Object b : context.getBucketList()) { - i0 = pr0 != null ? PartitionedIndex.getBucketIndex(pr0, index0.getName(), (Integer) b) - : indxInfo[0]._index; - i1 = pr1 != null ? PartitionedIndex.getBucketIndex(pr1, index1.getName(), (Integer) b) - : indxInfo[1]._index; + for (final Integer b : context.getBucketList()) { + final IndexProtocol i0 = + pr0 != null ? PartitionedIndex.getBucketIndex(pr0, index0.getName(), b) + : indxInfo[0]._index; + final IndexProtocol i1 = + pr1 != null ? PartitionedIndex.getBucketIndex(pr1, index1.getName(), b) + : indxInfo[1]._index; if (i0 == null || i1 == null) { continue; } - data = i0.queryEquijoinCondition(i1, context); - resultData.addAll(data); + resultData.addAll(i0.queryEquijoinCondition(i1, context)); } - data = resultData; - return data; + return resultData; } } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractIndex.java index 4d5389bf380b..ffd7cf55672d 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractIndex.java @@ -27,6 +27,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; @@ -97,7 +98,7 @@ public abstract class AbstractIndex implements IndexProtocol { final String indexName; - final Region region; + final Region region; final String indexedExpression; @@ -140,9 +141,9 @@ public abstract class AbstractIndex implements IndexProtocol { /** Flag to indicate if the index is populated with data */ volatile boolean isPopulated = false; - AbstractIndex(InternalCache cache, String indexName, Region region, String fromClause, + AbstractIndex(InternalCache cache, String indexName, Region region, String fromClause, String indexedExpression, String projectionAttributes, String originalFromClause, - String originalIndexedExpression, String[] defintions, IndexStatistics stats) { + String originalIndexedExpression, String[] definitions, IndexStatistics stats) { this.cache = cache; this.indexName = indexName; this.region = region; @@ -150,7 +151,7 @@ public abstract class AbstractIndex implements IndexProtocol { this.fromClause = fromClause; this.originalIndexedExpression = originalIndexedExpression; this.originalFromClause = originalFromClause; - canonicalizedDefinitions = defintions; + canonicalizedDefinitions = definitions; if (StringUtils.isEmpty(projectionAttributes)) { projectionAttributes = "*"; } @@ -169,7 +170,7 @@ public abstract class AbstractIndex implements IndexProtocol { * * @return the forward map of respective index. */ - public Map getValueToEntriesMap() { + public Map getValueToEntriesMap() { return null; } @@ -238,7 +239,7 @@ public IndexedExpressionEvaluator getEvaluator() { * @return the Region for this index */ @Override - public Region getRegion() { + public Region getRegion() { return region; } @@ -251,7 +252,7 @@ public String getName() { } @Override - public void query(Object key, int operator, Collection results, ExecutionContext context) + public void query(Object key, int operator, Collection results, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { @@ -283,9 +284,11 @@ public void query(Object key, int operator, Collection results, ExecutionContext } @Override - public void query(Object key, int operator, Collection results, @Retained CompiledValue iterOp, - RuntimeIterator indpndntItr, ExecutionContext context, List projAttrib, - SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, + public void query(Object key, int operator, Collection results, + @Retained CompiledValue iterOp, + RuntimeIterator runtimeIterator, ExecutionContext context, List projAttrib, + SelectResults intermediateResults, boolean isIntersection) + throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { // get a read lock when doing a lookup @@ -299,8 +302,8 @@ public void query(Object key, int operator, Collection results, @Retained Compil if (bucketIndex == null) { continue; } - bucketIndex.lockedQuery(key, operator, results, iterOp, indpndntItr, context, projAttrib, - intermediateResults, isIntersection); + bucketIndex.lockedQuery(key, operator, results, iterOp, runtimeIterator, context, + projAttrib, intermediateResults, isIntersection); } } finally { updateIndexUseEndStats(start); @@ -308,7 +311,7 @@ public void query(Object key, int operator, Collection results, @Retained Compil } else { long start = updateIndexUseStats(); try { - lockedQuery(key, operator, results, iterOp, indpndntItr, context, projAttrib, + lockedQuery(key, operator, results, iterOp, runtimeIterator, context, projAttrib, intermediateResults, isIntersection); } finally { updateIndexUseEndStats(start); @@ -317,7 +320,7 @@ public void query(Object key, int operator, Collection results, @Retained Compil } @Override - public void query(Object key, int operator, Collection results, Set keysToRemove, + public void query(Object key, int operator, Collection results, Set keysToRemove, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { @@ -348,11 +351,11 @@ public void query(Object key, int operator, Collection results, Set keysToRemove } @Override - public void query(Collection results, Set keysToRemove, ExecutionContext context) + public void query(Collection results, Set keysToRemove, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { - Iterator iterator = keysToRemove.iterator(); + Iterator iterator = keysToRemove.iterator(); Object temp = iterator.next(); iterator.remove(); if (context.getBucketList() != null && region instanceof BucketRegion) { @@ -384,7 +387,8 @@ public void query(Collection results, Set keysToRemove, ExecutionContext context @Override public void query(Object lowerBoundKey, int lowerBoundOperator, Object upperBoundKey, - int upperBoundOperator, Collection results, Set keysToRemove, ExecutionContext context) + int upperBoundOperator, Collection results, Set keysToRemove, + ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { @@ -418,7 +422,7 @@ public void query(Object lowerBoundKey, int lowerBoundOperator, Object upperBoun } @Override - public List queryEquijoinCondition(IndexProtocol index, ExecutionContext context) + public List queryEquijoinCondition(IndexProtocol index, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { @@ -531,10 +535,10 @@ public void markValid(boolean b) { } @Override - public boolean isMatchingWithIndexExpression(CompiledValue condnExpr, String condnExprStr, - ExecutionContext context) + public boolean isMatchingWithIndexExpression(CompiledValue conditionExpression, + String conditionExpressionString, ExecutionContext context) throws TypeMismatchException, NameResolutionException { - return indexedExpression.equals(condnExprStr); + return indexedExpression.equals(conditionExpressionString); } // package-private to avoid synthetic accessor @@ -555,8 +559,8 @@ Object verifyAndGetPdxDomainObject(Object value) { return value; } - private void addToResultsWithUnionOrIntersection(Collection results, - SelectResults intermediateResults, boolean isIntersection, Object value) { + private void addToResultsWithUnionOrIntersection(Collection results, + SelectResults intermediateResults, boolean isIntersection, Object value) { value = verifyAndGetPdxDomainObject(value); if (intermediateResults == null) { @@ -574,8 +578,8 @@ private void addToResultsWithUnionOrIntersection(Collection results, } } - private void addToStructsWithUnionOrIntersection(Collection results, - SelectResults intermediateResults, boolean isIntersection, Object[] values) { + private void addToStructsWithUnionOrIntersection(Collection results, + SelectResults intermediateResults, boolean isIntersection, Object[] values) { for (int i = 0; i < values.length; i++) { values[i] = verifyAndGetPdxDomainObject(values[i]); @@ -586,7 +590,7 @@ private void addToStructsWithUnionOrIntersection(Collection results, ((StructFields) results).addFieldValues(values); } else { // The results could be LinkedStructSet or SortedResultsBag or StructSet - SelectResults selectResults = (SelectResults) results; + SelectResults selectResults = (SelectResults) results; StructImpl structImpl = new StructImpl( (StructTypeImpl) selectResults.getCollectionType().getElementType(), values); selectResults.add(structImpl); @@ -603,7 +607,7 @@ private void addToStructsWithUnionOrIntersection(Collection results, } else { // could be LinkedStructSet or SortedResultsBag - SelectResults selectResults = (SelectResults) results; + SelectResults selectResults = (SelectResults) results; StructImpl structImpl = new StructImpl( (StructTypeImpl) selectResults.getCollectionType().getElementType(), values); if (intermediateResults.remove(structImpl)) { @@ -616,7 +620,7 @@ private void addToStructsWithUnionOrIntersection(Collection results, ((StructFields) results).addFieldValues(values); } else { // could be LinkedStructSet or SortedResultsBag - SelectResults selectResults = (SelectResults) results; + SelectResults selectResults = (SelectResults) results; StructImpl structImpl = new StructImpl( (StructTypeImpl) selectResults.getCollectionType().getElementType(), values); if (intermediateResults.remove(structImpl)) { @@ -627,8 +631,9 @@ private void addToStructsWithUnionOrIntersection(Collection results, } } - void applyCqOrProjection(List projAttrib, ExecutionContext context, Collection result, - Object iterValue, SelectResults intermediateResults, boolean isIntersection, Object key) + void applyCqOrProjection(List projAttrib, ExecutionContext context, Collection result, + Object iterValue, SelectResults intermediateResults, boolean isIntersection, + Object key) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { if (context != null && context.isCqQueryContext()) { @@ -638,8 +643,8 @@ void applyCqOrProjection(List projAttrib, ExecutionContext context, Collection r } } - void applyProjection(List projAttrib, ExecutionContext context, Collection result, - Object iterValue, SelectResults intermediateResults, boolean isIntersection) + void applyProjection(List projAttrib, ExecutionContext context, Collection result, + Object iterValue, SelectResults intermediateResults, boolean isIntersection) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { @@ -650,16 +655,15 @@ void applyProjection(List projAttrib, ExecutionContext context, Collection resul } else { boolean isStruct = result instanceof SelectResults - && ((SelectResults) result).getCollectionType().getElementType() != null - && ((SelectResults) result).getCollectionType().getElementType().isStructType(); + && ((SelectResults) result).getCollectionType().getElementType() != null + && ((SelectResults) result).getCollectionType().getElementType().isStructType(); if (isStruct) { int projCount = projAttrib.size(); Object[] values = new Object[projCount]; - Iterator projIter = projAttrib.iterator(); int i = 0; - while (projIter.hasNext()) { - Object[] projDef = (Object[]) projIter.next(); + for (final Object o : projAttrib) { + Object[] projDef = (Object[]) o; values[i] = deserializePdxForLocalDistinctQuery(context, ((CompiledValue) projDef[1]).evaluate(context)); i++; @@ -679,7 +683,7 @@ void applyProjection(List projAttrib, ExecutionContext context, Collection resul * For local queries with distinct, deserialize all PdxInstances as we do not have a way to * compare Pdx and non Pdx objects in case the cache has a mix of pdx and non pdx objects. We * still have to honor the cache level readSerialized flag in case of all Pdx objects in cache. - * Also always convert PdxString to String before adding to resultSet for remote queries + * Always convert PdxString to String before adding to resultSet for remote queries */ private Object deserializePdxForLocalDistinctQuery(ExecutionContext context, Object value) throws QueryInvocationTargetException { @@ -701,73 +705,6 @@ private Object deserializePdxForLocalDistinctQuery(ExecutionContext context, Obj return value; } - private void removeFromResultsWithUnionOrIntersection(Collection results, - SelectResults intermediateResults, boolean isIntersection, Object value) { - - if (intermediateResults == null) { - results.remove(value); - } else { - if (isIntersection) { - int numOcc = ((SelectResults) results).occurrences(value); - if (numOcc > 0) { - results.remove(value); - intermediateResults.add(value); - } - } else { - results.remove(value); - } - } - } - - private void removeFromStructsWithUnionOrIntersection(Collection results, - SelectResults intermediateResults, boolean isIntersection, Object[] values) { - - if (intermediateResults == null) { - ((StructFields) results).removeFieldValues(values); - } else { - if (isIntersection) { - int numOcc = ((SelectResults) results).occurrences(values); - if (numOcc > 0) { - ((StructFields) results).removeFieldValues(values); - ((StructFields) intermediateResults).addFieldValues(values); - - } - } else { - ((StructFields) results).removeFieldValues(values); - } - } - } - - private void removeProjection(List projAttrib, ExecutionContext context, Collection result, - Object iterValue, SelectResults intermediateResults, boolean isIntersection) - throws FunctionDomainException, TypeMismatchException, NameResolutionException, - QueryInvocationTargetException { - - if (projAttrib == null) { - removeFromResultsWithUnionOrIntersection(result, intermediateResults, isIntersection, - iterValue); - } else { - if (result instanceof StructFields) { - int projCount = projAttrib.size(); - Object[] values = new Object[projCount]; - Iterator projIter = projAttrib.iterator(); - int i = 0; - while (projIter.hasNext()) { - Object[] projDef = (Object[]) projIter.next(); - values[i++] = ((CompiledValue) projDef[1]).evaluate(context); - } - removeFromStructsWithUnionOrIntersection(result, intermediateResults, isIntersection, - values); - } else { - Object[] temp = (Object[]) projAttrib.get(0); - Object val = ((CompiledValue) temp[1]).evaluate(context); - removeFromResultsWithUnionOrIntersection(result, intermediateResults, isIntersection, - val); - } - } - - } - /** * This function returns the canonicalized definitions of the from clauses used in Index creation */ @@ -831,17 +768,21 @@ public String toString() { abstract void saveMapping(Object key, Object value, RegionEntry entry) throws IMQException; /** Lookup method used when appropriate lock is held */ - abstract void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, - RuntimeIterator indpndntItr, ExecutionContext context, List projAttrib, - SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, + abstract void lockedQuery(Object key, int operator, Collection results, + CompiledValue iterOps, + RuntimeIterator independentIterator, ExecutionContext context, List projAttrib, + SelectResults intermediateResults, boolean isIntersection) + throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; abstract void lockedQuery(Object lowerBoundKey, int lowerBoundOperator, Object upperBoundKey, - int upperBoundOperator, Collection results, Set keysToRemove, ExecutionContext context) + int upperBoundOperator, Collection results, Set keysToRemove, + ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; - abstract void lockedQuery(Object key, int operator, Collection results, Set keysToRemove, + abstract void lockedQuery(Object key, int operator, Collection results, + Set keysToRemove, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; @@ -946,20 +887,20 @@ public void incNumBucketIndexes(int delta) {} class IMQEvaluator implements IndexedExpressionEvaluator { private final InternalCache cache; - private List fromIterators = null; + private final List fromIterators; - private CompiledValue indexedExpr = null; + private final CompiledValue indexedExpr; private final String[] canonicalIterNames; private ObjectType indexResultSetType = null; - private Map dependencyGraph = null; + private Map> dependencyGraph = null; /** * The boolean if true indicates that the 0th iterator is on entries . If the 0th iterator is on * collection of Region.Entry objects, then the RegionEntry object used in Index data objects is - * obtained directly from its corresponding Region.Entry object. However if the 0th iterator is + * obtained directly from its corresponding Region.Entry object. However, if the 0th iterator is * not on entries then the boolean is false. In this case the additional projection attribute * gives us the original value of the iterator while the Region.Entry object is obtained from * 0th iterator. It is possible to have index being created on a Region Entry itself , instead @@ -969,50 +910,49 @@ class IMQEvaluator implements IndexedExpressionEvaluator { * be easily obtained from the 0th iterator. In this case, the additional projection attribute s * not null as it is used to evaluate the Entry object from the 0th iterator. */ - private boolean isFirstItrOnEntry = false; + private final boolean isFirstItrOnEntry; /** The boolean if true indicates that the 0th iterator is on keys. */ - private boolean isFirstItrOnKey = false; + private final boolean isFirstItrOnKey; /** * List of modified iterators, not null only when the boolean isFirstItrOnEntry is false. */ - private List indexInitIterators = null; + private final List indexInitIterators; /** * The additional Projection attribute representing the value of the original 0th iterator. If - * the isFirstItrOnEntry is false, then it is not null. However if the isFirstItrOnEntry is true + * the isFirstItrOnEntry is false, then it is not null. However, if the isFirstItrOnEntry is + * true * but & still this attribute is not null, this indicates that the 0th iterator is derived using - * an individual entry thru Map operator on the Region. + * an individual entry through Map operator on the Region. */ - private CompiledValue additionalProj = null; + private final CompiledValue additionalProj; /** This is not null iff the boolean isFirstItrOnEntry is false. */ private CompiledValue modifiedIndexExpr = null; - private ObjectType addnlProjType = null; - - private int initEntriesUpdated = 0; + private ObjectType additionalProjType = null; private boolean hasInitOccurredOnce = false; - private ExecutionContext initContext = null; + private final ExecutionContext initContext; - private int iteratorSize = -1; + private final int iteratorSize; - private Region rgn = null; + private final Region region; /** Creates a new instance of IMQEvaluator */ IMQEvaluator(IndexCreationHelper helper) { cache = helper.getCache(); fromIterators = helper.getIterators(); indexedExpr = helper.getCompiledIndexedExpression(); - rgn = helper.getRegion(); + region = helper.getRegion(); // The modified iterators for optimizing Index creation isFirstItrOnEntry = ((FunctionalIndexCreationHelper) helper).isFirstIteratorRegionEntry; isFirstItrOnKey = ((FunctionalIndexCreationHelper) helper).isFirstIteratorRegionKey; additionalProj = ((FunctionalIndexCreationHelper) helper).additionalProj; - Object[] params1 = {new QRegion(rgn, false)}; + Object[] params1 = {new QRegion(region, false)}; initContext = new ExecutionContext(params1, cache); canonicalIterNames = helper.canonicalizedIteratorNames; if (isFirstItrOnEntry) { @@ -1020,7 +960,7 @@ class IMQEvaluator implements IndexedExpressionEvaluator { } else { indexInitIterators = ((FunctionalIndexCreationHelper) helper).indexInitIterators; modifiedIndexExpr = ((FunctionalIndexCreationHelper) helper).modifiedIndexExpr; - addnlProjType = ((FunctionalIndexCreationHelper) helper).addnlProjType; + additionalProjType = ((FunctionalIndexCreationHelper) helper).additionalProjType; } iteratorSize = indexInitIterators.size(); } @@ -1041,7 +981,7 @@ public String getFromClause() { } @Override - public void expansion(List expandedResults, Object lowerBoundKey, Object upperBoundKey, + public void expansion(List expandedResults, Object lowerBoundKey, Object upperBoundKey, int lowerBoundOperator, int upperBoundOperator, Object value) throws IMQException { // no-op } @@ -1049,7 +989,7 @@ public void expansion(List expandedResults, Object lowerBoundKey, Object upperBo @Override public void evaluate(RegionEntry target, boolean add) throws IMQException { assert add; // ignored, but should be true here - DummyQRegion dQRegion = new DummyQRegion(rgn); + DummyQRegion dQRegion = new DummyQRegion(region); dQRegion.setEntry(target); Object[] params = {dQRegion}; ExecutionContext context = new ExecutionContext(params, cache); @@ -1080,11 +1020,11 @@ public void evaluate(RegionEntry target, boolean add) throws IMQException { } Support.Assert(indexResultSetType != null, - "IMQEvaluator::evaluate:The StrcutType should have been initialized during index creation"); + "IMQEvaluator::evaluate:The StructType should have been initialized during index creation"); doNestedIterations(0, context); - } catch (IMQException imqe) { - throw imqe; + } catch (IMQException e) { + throw e; } catch (Exception e) { throw new IMQException(e); } finally { @@ -1097,7 +1037,6 @@ public void evaluate(RegionEntry target, boolean add) throws IMQException { */ @Override public void initializeIndex(boolean loadEntries) throws IMQException { - initEntriesUpdated = 0; try { // Since an index initialization can happen multiple times for a given region, due to clear // operation, we are using hardcoded scope ID of 1 , as otherwise if obtained from @@ -1123,8 +1062,8 @@ public void initializeIndex(boolean loadEntries) throws IMQException { if (loadEntries) { doNestedIterationsForIndexInit(0, initContext.getCurrentIterators()); } - } catch (IMQException imqe) { - throw imqe; + } catch (IMQException e) { + throw e; } catch (Exception e) { throw new IMQException(e); } finally { @@ -1132,17 +1071,14 @@ public void initializeIndex(boolean loadEntries) throws IMQException { } } - private void doNestedIterationsForIndexInit(int level, List runtimeIterators) + private void doNestedIterationsForIndexInit(int level, List runtimeIterators) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException, IMQException { - if (level == 1) { - ++initEntriesUpdated; - } if (level == iteratorSize) { applyProjectionForIndexInit(runtimeIterators); } else { - RuntimeIterator rIter = (RuntimeIterator) runtimeIterators.get(level); - Collection collection = rIter.evaluateCollection(initContext); + RuntimeIterator rIter = runtimeIterators.get(level); + Collection collection = rIter.evaluateCollection(initContext); if (collection == null) { return; } @@ -1168,7 +1104,7 @@ private void doNestedIterationsForIndexInit(int level, List runtimeIterators) * isFirstItrOnEntry is false, then the first attribute of the Struct object is obtained by * evaluating the additional projection attribute. */ - private void applyProjectionForIndexInit(List currrentRuntimeIters) + private void applyProjectionForIndexInit(List currentRuntimeIterators) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException, IMQException { @@ -1184,32 +1120,32 @@ private void applyProjectionForIndexInit(List currrentRuntimeIters) if (isFirstItrOnEntry && additionalProj != null) { temp = (NonTXEntry) additionalProj.evaluate(initContext); } else { - temp = (NonTXEntry) ((RuntimeIterator) currrentRuntimeIters.get(0)) + temp = (NonTXEntry) currentRuntimeIterators.get(0) .evaluate(initContext); } RegionEntry re = temp.getRegionEntry(); - Object indxResultSet; + Object indexResultSet; if (iteratorSize == 1) { - indxResultSet = isFirstItrOnEntry + indexResultSet = isFirstItrOnEntry ? additionalProj == null ? temp - : ((RuntimeIterator) currrentRuntimeIters.get(0)).evaluate(initContext) + : currentRuntimeIterators.get(0).evaluate(initContext) : additionalProj.evaluate(initContext); } else { Object[] tuple = new Object[iteratorSize]; int i = isFirstItrOnEntry ? 0 : 1; for (; i < iteratorSize; i++) { - RuntimeIterator iter = (RuntimeIterator) currrentRuntimeIters.get(i); + RuntimeIterator iter = currentRuntimeIterators.get(i); tuple[i] = iter.evaluate(initContext); } if (!isFirstItrOnEntry) { tuple[0] = additionalProj.evaluate(initContext); } Support.Assert(indexResultSetType instanceof StructTypeImpl, - "The Index ResultType should have been an instance of StructTypeImpl rather than ObjectTypeImpl. The indxeResultType is " + "The Index ResultType should have been an instance of StructTypeImpl rather than ObjectTypeImpl. The indexResultType is " + indexResultSetType); - indxResultSet = new StructImpl((StructTypeImpl) indexResultSetType, tuple); + indexResultSet = new StructImpl((StructTypeImpl) indexResultSetType, tuple); } // Key must be evaluated after indexResultSet evaluation is done as Entry might be getting @@ -1221,19 +1157,19 @@ private void applyProjectionForIndexInit(List currrentRuntimeIters) setPdxStringFlag(indexKey); } indexKey = getPdxStringForIndexedPdxKeys(indexKey); - addMapping(indexKey, indxResultSet, re); + addMapping(indexKey, indexResultSet, re); } private void doNestedIterations(int level, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException, IMQException { - List iterList = context.getCurrentIterators(); + List iterList = context.getCurrentIterators(); if (level == iteratorSize) { applyProjection(context); } else { RuntimeIterator rIter = (RuntimeIterator) iterList.get(level); - Collection collection = rIter.evaluateCollection(context); + Collection collection = rIter.evaluateCollection(context); if (collection == null) { return; } @@ -1248,33 +1184,33 @@ private void applyProjection(ExecutionContext context) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException, IMQException { - List currrentRuntimeIters = context.getCurrentIterators(); + List currentIterators = context.getCurrentIterators(); Object indexKey = indexedExpr.evaluate(context); // based on the first key convert the rest to PdxString or String if (!isIndexedPdxKeysFlagSet) { setPdxStringFlag(indexKey); } indexKey = getPdxStringForIndexedPdxKeys(indexKey); - Object indxResultSet; + Object indexResultSet; if (iteratorSize == 1) { - RuntimeIterator iter = (RuntimeIterator) currrentRuntimeIters.get(0); - indxResultSet = iter.evaluate(context); + RuntimeIterator iter = currentIterators.get(0); + indexResultSet = iter.evaluate(context); } else { Object[] tuple = new Object[iteratorSize]; for (int i = 0; i < iteratorSize; i++) { - RuntimeIterator iter = (RuntimeIterator) currrentRuntimeIters.get(i); + RuntimeIterator iter = currentIterators.get(i); tuple[i] = iter.evaluate(context); } Support.Assert(indexResultSetType instanceof StructTypeImpl, - "The Index ResultType should have been an instance of StructTypeImpl rather than ObjectTypeImpl. The indxeResultType is " + "The Index ResultType should have been an instance of StructTypeImpl rather than ObjectTypeImpl. The indexResultType is " + indexResultSetType); - indxResultSet = new StructImpl((StructTypeImpl) indexResultSetType, tuple); + indexResultSet = new StructImpl((StructTypeImpl) indexResultSetType, tuple); } // Keep Entry value in fly until all keys are evaluated RegionEntry entry = ((DummyQRegion) context.getBindArgument(1)).getEntry(); - saveMapping(indexKey, indxResultSet, entry); + saveMapping(indexKey, indexResultSet, entry); } /** @@ -1282,7 +1218,7 @@ private void applyProjection(ExecutionContext context) * on Entry */ private ObjectType createIndexResultSetType() { - List currentIterators = initContext.getCurrentIterators(); + List currentIterators = initContext.getCurrentIterators(); int len = currentIterators.size(); ObjectType[] fieldTypes = new ObjectType[len]; int start = isFirstItrOnEntry ? 0 : 1; @@ -1291,7 +1227,7 @@ private ObjectType createIndexResultSetType() { fieldTypes[start] = iter.getElementType(); } if (!isFirstItrOnEntry) { - fieldTypes[0] = addnlProjType; + fieldTypes[0] = additionalProjType; } return len == 1 ? fieldTypes[0] : new StructTypeImpl(canonicalIterNames, fieldTypes); } @@ -1310,24 +1246,25 @@ boolean isFirstItrOnKey() { } @Override - public List getAllDependentIterators() { + public List getAllDependentIterators() { return fromIterators; } } /** - * Checks the limit for the resultset for distinct and non-distinct queries separately. In case of + * Checks the limit for the results for distinct and non-distinct queries separately. In case of * non-distinct distinct elements size of result-set is matched against limit passed in as an * argument. * * @return true if limit is satisfied. */ - boolean verifyLimit(Collection result, int limit) { - return limit > 0 && result.size() == limit; + boolean verifyLimit(Collection results, int limit) { + return limit > 0 && results.size() == limit; } /** - * This will verify the consistency between RegionEntry and IndexEntry. RangeIndex has following + * This will verify the consistency between RegionEntry and IndexEntry. RangeIndex has the + * following * entry structure, * * IndexKey --> [RegionEntry, [Iterator1, Iterator2....., IteratorN]] @@ -1349,7 +1286,7 @@ boolean verifyLimit(Collection result, int limit) { // package-private to avoid synthetic accessor boolean verifyEntryAndIndexValue(RegionEntry re, Object value, ExecutionContext context) { IMQEvaluator evaluator = (IMQEvaluator) getEvaluator(); - List valuesInRegion = null; + List valuesInRegion = null; Object valueInIndex = null; try { @@ -1380,7 +1317,7 @@ boolean verifyEntryAndIndexValue(RegionEntry re, Object value, ExecutionContext } // We could have many index keys available in one Region entry or just one. - if (!valuesInRegion.isEmpty()) { + if (!CollectionUtils.isEmpty(valuesInRegion)) { for (Object valueInRegion : valuesInRegion) { if (compareStructWithNonStruct(valueInRegion, valueInIndex)) { return true; @@ -1403,7 +1340,7 @@ boolean verifyEntryAndIndexValue(RegionEntry re, Object value, ExecutionContext private boolean compareStructWithNonStruct(Object valueInRegion, Object valueInIndex) { if (valueInRegion instanceof Struct && valueInIndex instanceof Struct) { Object[] regFields = ((StructImpl) valueInRegion).getFieldValues(); - List indFields = Arrays.asList(((StructImpl) valueInIndex).getFieldValues()); + List indFields = Arrays.asList(((StructImpl) valueInIndex).getFieldValues()); for (Object regField : regFields) { if (!indFields.contains(regField)) { return false; @@ -1439,7 +1376,7 @@ private boolean compareStructWithNonStruct(Object valueInRegion, Object valueInI * @param context passed here is query context. * @return Evaluated second level collection. */ - private List evaluateIndexIteratorsFromRE(Object value, ExecutionContext context) + private List evaluateIndexIteratorsFromRE(Object value, ExecutionContext context) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { @@ -1449,44 +1386,44 @@ private List evaluateIndexIteratorsFromRE(Object value, ExecutionContext context value = new NonTXEntry((LocalRegion) getRegion(), (RegionEntry) value); } // Get all Independent and dependent iterators for this Index. - List itrs = getAllDependentRuntimeIterators(context); - - return evaluateLastColl(value, context, itrs, 0); + List iterators = getAllDependentRuntimeIterators(context); + return evaluateLastColl(value, context, iterators, 0); } - private List evaluateLastColl(Object value, ExecutionContext context, List itrs, int level) + private List evaluateLastColl(Object value, ExecutionContext context, + List runtimeIterators, int level) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { // A tuple is a value generated from RegionEntry value which could be a StructType (Multiple // Dependent Iterators) or ObjectType (Single Iterator) value. - List tuples = new ArrayList(1); + List tuples = new ArrayList<>(1); - RuntimeIterator currItrator = (RuntimeIterator) itrs.get(level); - currItrator.setCurrent(value); + RuntimeIterator currentIterator = runtimeIterators.get(level); + currentIterator.setCurrent(value); // If its last iterator then just evaluate final struct. - if (itrs.size() - 1 == level) { - if (itrs.size() > 1) { - Object[] tuple = new Object[itrs.size()]; - for (int i = 0; i < itrs.size(); i++) { - RuntimeIterator iter = (RuntimeIterator) itrs.get(i); + if (runtimeIterators.size() - 1 == level) { + if (runtimeIterators.size() > 1) { + Object[] tuple = new Object[runtimeIterators.size()]; + for (int i = 0; i < runtimeIterators.size(); i++) { + RuntimeIterator iter = runtimeIterators.get(i); tuple[i] = iter.evaluate(context); } - // Its ok to pass type as null as we are only interested in values. + // It's ok to pass type as null as we are only interested in values. tuples.add(new StructImpl(new StructTypeImpl(), tuple)); } else { - tuples.add(currItrator.evaluate(context)); + tuples.add(currentIterator.evaluate(context)); } } else { // Not the last iterator. - RuntimeIterator nextItr = (RuntimeIterator) itrs.get(level + 1); - Collection nextLevelValues = nextItr.evaluateCollection(context); + RuntimeIterator nextItr = runtimeIterators.get(level + 1); + Collection nextLevelValues = nextItr.evaluateCollection(context); // If value is null or INVALID then the evaluated collection would be Null. if (nextLevelValues != null) { for (Object nextLevelValue : nextLevelValues) { - tuples.addAll(evaluateLastColl(nextLevelValue, context, itrs, level + 1)); + tuples.addAll(evaluateLastColl(nextLevelValue, context, runtimeIterators, level + 1)); } } } @@ -1506,16 +1443,17 @@ private List evaluateLastColl(Object value, ExecutionContext context, List itrs, * @return {@link RuntimeIterator} this should not be null ever. */ RuntimeIterator getRuntimeIteratorForThisIndex(ExecutionContext context) { - List indItrs = context.getCurrentIterators(); - Region rgn = getRegion(); - if (rgn instanceof BucketRegion) { - rgn = ((Bucket) rgn).getPartitionedRegion(); + List currentIterators = context.getCurrentIterators(); + Region region = getRegion(); + if (region instanceof BucketRegion) { + region = ((Bucket) region).getPartitionedRegion(); } - String regionPath = rgn.getFullPath(); + String regionPath = region.getFullPath(); String definition = getCanonicalizedIteratorDefinitions()[0]; - for (RuntimeIterator itr : indItrs) { - if (itr.getDefinition().equals(regionPath) || itr.getDefinition().equals(definition)) { - return itr; + for (RuntimeIterator runtimeIterator : currentIterators) { + if (runtimeIterator.getDefinition().equals(regionPath) + || runtimeIterator.getDefinition().equals(definition)) { + return runtimeIterator; } } return null; @@ -1528,25 +1466,27 @@ RuntimeIterator getRuntimeIteratorForThisIndex(ExecutionContext context) { * @return {@link RuntimeIterator} */ RuntimeIterator getRuntimeIteratorForThisIndex(ExecutionContext context, IndexInfo info) { - List indItrs = context.getCurrentIterators(); - Region rgn = getRegion(); - if (rgn instanceof BucketRegion) { - rgn = ((Bucket) rgn).getPartitionedRegion(); + List currentIterators = context.getCurrentIterators(); + Region region = getRegion(); + if (region instanceof BucketRegion) { + region = ((Bucket) region).getPartitionedRegion(); } - String regionPath = rgn.getFullPath(); + String regionPath = region.getFullPath(); String definition = getCanonicalizedIteratorDefinitions()[0]; - for (RuntimeIterator itr : indItrs) { - if (itr.getDefinition().equals(regionPath) || itr.getDefinition().equals(definition)) { + for (RuntimeIterator iterator : currentIterators) { + if (iterator.getDefinition().equals(regionPath) + || iterator.getDefinition().equals(definition)) { // if iterator has name alias must be used in the query - if (itr.getName() != null) { + if (iterator.getName() != null) { CompiledValue path = info._path(); // match the iterator name with alias String pathName = getReceiverNameFromPath(path); - if (path.getType() == OQLLexerTokenTypes.Identifier || itr.getName().equals(pathName)) { - return itr; + if (path.getType() == OQLLexerTokenTypes.Identifier + || iterator.getName().equals(pathName)) { + return iterator; } } else { - return itr; + return iterator; } } } @@ -1574,20 +1514,21 @@ private String getReceiverNameFromPath(CompiledValue path) { * @param context from executing query. * @return List of all iterators pertaining to this Index. */ - private List getAllDependentRuntimeIterators(ExecutionContext context) { - List indItrs = context - .getCurrScopeDpndntItrsBasedOnSingleIndpndntItr(getRuntimeIteratorForThisIndex(context)); + private List getAllDependentRuntimeIterators(ExecutionContext context) { + List iterators = context + .getCurrentScopeDependentIteratorsBasedOnSingleIndependentIterator( + getRuntimeIteratorForThisIndex(context)); List definitions = Arrays.asList(getCanonicalizedIteratorDefinitions()); // These are the common iterators between query from clause and index from clause. - List itrs = new ArrayList(); + List dependentIterators = new ArrayList<>(); - for (RuntimeIterator itr : indItrs) { - if (definitions.contains(itr.getDefinition())) { - itrs.add(itr); + for (RuntimeIterator iterator : iterators) { + if (definitions.contains(iterator.getDefinition())) { + dependentIterators.add(iterator); } } - return itrs; + return dependentIterators; } /** @@ -1598,16 +1539,16 @@ private List getAllDependentRuntimeIterators(ExecutionContext context) { * map itself through addValuesToCollection() calls. */ class RegionEntryToValuesMap { - protected Map map; + protected Map map; private final boolean useList; volatile int numValues; RegionEntryToValuesMap(boolean useList) { - map = new ConcurrentHashMap(2, 0.75f, 1); + map = new ConcurrentHashMap<>(2, 0.75f, 1); this.useList = useList; } - RegionEntryToValuesMap(Map map, boolean useList) { + RegionEntryToValuesMap(Map map, boolean useList) { this.map = map; this.useList = useList; } @@ -1617,7 +1558,6 @@ class RegionEntryToValuesMap { * here. No two threads can be entering in this method together for a RegionEntry. */ public void add(RegionEntry entry, Object value) { - assert value != null; // Values must NOT be null and ConcurrentHashMap does not support null values. if (value == null) { return; @@ -1626,8 +1566,9 @@ public void add(RegionEntry entry, Object value) { if (object == null) { map.put(entry, value); } else if (object instanceof Collection) { - Collection coll = (Collection) object; - // If its a list query might get ConcurrentModificationException. + @SuppressWarnings("unchecked") + Collection coll = (Collection) object; + // If it's a list query might get ConcurrentModificationException. // This can only happen for Null mapped or Undefined entries in a // RangeIndex. So we are synchronizing on ArrayList. if (useList) { @@ -1638,7 +1579,8 @@ public void add(RegionEntry entry, Object value) { coll.add(value); } } else { - Collection coll = useList ? new ArrayList(2) : new IndexConcurrentHashSet(2, 0.75f, 1); + Collection coll = + useList ? new ArrayList<>(2) : new IndexConcurrentHashSet<>(2, 0.75f, 1); coll.add(object); coll.add(value); map.put(entry, coll); @@ -1646,17 +1588,18 @@ public void add(RegionEntry entry, Object value) { atomicUpdater.incrementAndGet(this); } - public void addAll(RegionEntry entry, Collection values) { + public void addAll(RegionEntry entry, Collection values) { Object object = map.get(entry); if (object == null) { - Collection coll = useList ? new ArrayList(values.size()) - : new IndexConcurrentHashSet(values.size(), 0.75f, 1); + Collection coll = useList ? new ArrayList<>(values.size()) + : new IndexConcurrentHashSet<>(values.size(), 0.75f, 1); coll.addAll(values); map.put(entry, coll); atomicUpdater.addAndGet(this, values.size()); } else if (object instanceof Collection) { - Collection coll = (Collection) object; - // If its a list query might get ConcurrentModificationException. + @SuppressWarnings("unchecked") + Collection coll = (Collection) object; + // If it's a list query might get ConcurrentModificationException. // This can only happen for Null mapped or Undefined entries in a // RangeIndex. So we are synchronizing on ArrayList. if (useList) { @@ -1667,8 +1610,8 @@ public void addAll(RegionEntry entry, Collection values) { coll.addAll(values); } } else { - Collection coll = useList ? new ArrayList(values.size() + 1) - : new IndexConcurrentHashSet(values.size() + 1, 0.75f, 1); + Collection coll = useList ? new ArrayList<>(values.size() + 1) + : new IndexConcurrentHashSet<>(values.size() + 1, 0.75f, 1); coll.addAll(values); coll.add(object); map.put(entry, coll); @@ -1690,9 +1633,10 @@ public void remove(RegionEntry entry, Object value) { return; } if (object instanceof Collection) { - Collection coll = (Collection) object; + @SuppressWarnings("unchecked") + Collection coll = (Collection) object; boolean removed; - // If its a list query might get ConcurrentModificationException. + // If it's a list query might get ConcurrentModificationException. // This can only happen for Null mapped or Undefined entries in a // RangeIndex. So we are synchronizing on ArrayList. if (useList) { @@ -1720,7 +1664,7 @@ public Object remove(RegionEntry entry) { Object retVal = map.remove(entry); if (retVal != null) { atomicUpdater.addAndGet(this, - retVal instanceof Collection ? -((Collection) retVal).size() : -1); + retVal instanceof Collection ? -((Collection) retVal).size() : -1); } return retVal; } @@ -1731,8 +1675,7 @@ int getNumValues(RegionEntry entry) { return 0; } if (object instanceof Collection) { - Collection coll = (Collection) object; - return coll.size(); + return ((Collection) object).size(); } else { return 1; } @@ -1746,26 +1689,24 @@ public int getNumEntries() { return map.keySet().size(); } - void addValuesToCollection(Collection result, int limit, ExecutionContext context) { - for (final Object o : map.entrySet()) { + void addValuesToCollection(Collection result, int limit, ExecutionContext context) { + for (final Map.Entry e : map.entrySet()) { // Check if query execution on this thread is canceled. QueryMonitor.throwExceptionIfQueryOnCurrentThreadIsCanceled(); if (verifyLimit(result, limit, context)) { return; } - Entry e = (Entry) o; - Object value = e.getValue(); - assert value != null; - RegionEntry re = (RegionEntry) e.getKey(); + final Object value = e.getValue(); + final RegionEntry re = e.getKey(); boolean reUpdateInProgress = re.isUpdateInProgress(); if (value instanceof Collection) { - // If its a list query might get ConcurrentModificationException. + // If it's a list query might get ConcurrentModificationException. // This can only happen for Null mapped or Undefined entries in a // RangeIndex. So we are synchronizing on ArrayList. if (useList) { synchronized (value) { - for (Object val : (Iterable) value) { + for (Object val : (Iterable) value) { // Compare the value in index with in RegionEntry. if (!reUpdateInProgress || verifyEntryAndIndexValue(re, val, context)) { result.add(val); @@ -1778,7 +1719,7 @@ void addValuesToCollection(Collection result, int limit, ExecutionContext contex } } } else { - for (Object val : (Iterable) value) { + for (Object val : (Iterable) value) { // Compare the value in index with in RegionEntry. if (!reUpdateInProgress || verifyEntryAndIndexValue(re, val, context)) { result.add(val); @@ -1793,7 +1734,7 @@ void addValuesToCollection(Collection result, int limit, ExecutionContext contex } else { if (!reUpdateInProgress || verifyEntryAndIndexValue(re, value, context)) { if (context.isCqQueryContext()) { - result.add(new CqEntry(((RegionEntry) e.getKey()).getKey(), value)); + result.add(new CqEntry(e.getKey().getKey(), value)); } else { result.add(verifyAndGetPdxDomainObject(value)); } @@ -1802,8 +1743,9 @@ void addValuesToCollection(Collection result, int limit, ExecutionContext contex } } - void addValuesToCollection(Collection result, CompiledValue iterOp, RuntimeIterator runtimeItr, - ExecutionContext context, List projAttrib, SelectResults intermediateResults, + void addValuesToCollection(Collection result, CompiledValue iterOp, + RuntimeIterator runtimeItr, + ExecutionContext context, List projAttrib, SelectResults intermediateResults, boolean isIntersection, int limit) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { @@ -1811,22 +1753,20 @@ void addValuesToCollection(Collection result, CompiledValue iterOp, RuntimeItera return; } - for (Object o : map.entrySet()) { + for (Entry e : map.entrySet()) { // Check if query execution on this thread is canceled. QueryMonitor.throwExceptionIfQueryOnCurrentThreadIsCanceled(); - Entry e = (Entry) o; Object value = e.getValue(); - // Key is a RegionEntry here. - RegionEntry entry = (RegionEntry) e.getKey(); + RegionEntry entry = e.getKey(); if (value != null) { boolean reUpdateInProgress = entry.isUpdateInProgress(); if (value instanceof Collection) { - // If its a list query might get ConcurrentModificationException. + // If it's a list query might get ConcurrentModificationException. // This can only happen for Null mapped or Undefined entries in a // RangeIndex. So we are synchronizing on ArrayList. if (useList) { synchronized (value) { - for (Object o1 : ((Iterable) value)) { + for (Object o1 : ((Iterable) value)) { boolean ok = true; if (reUpdateInProgress) { // Compare the value in index with value in RegionEntry. @@ -1846,7 +1786,7 @@ void addValuesToCollection(Collection result, CompiledValue iterOp, RuntimeItera } } } else { - for (Object o1 : ((Iterable) value)) { + for (Object o1 : ((Iterable) value)) { boolean ok = true; if (reUpdateInProgress) { // Compare the value in index with value in RegionEntry. @@ -1877,7 +1817,7 @@ void addValuesToCollection(Collection result, CompiledValue iterOp, RuntimeItera } if (ok) { if (context.isCqQueryContext()) { - result.add(new CqEntry(((RegionEntry) e.getKey()).getKey(), value)); + result.add(new CqEntry(entry.getKey(), value)); } else { applyProjection(projAttrib, context, result, value, intermediateResults, isIntersection); @@ -1888,7 +1828,7 @@ void addValuesToCollection(Collection result, CompiledValue iterOp, RuntimeItera } } - private boolean verifyLimit(Collection result, int limit, ExecutionContext context) { + private boolean verifyLimit(Collection result, int limit, ExecutionContext context) { if (limit > 0) { if (!context.isDistinct()) { return result.size() == limit; @@ -1902,17 +1842,12 @@ public boolean containsEntry(RegionEntry entry) { return map.containsKey(entry); } - public boolean containsValue(Object value) { - throw new RuntimeException( - "Not yet implemented"); - } - public void clear() { map.clear(); atomicUpdater.set(this, 0); } - public Set entrySet() { + public Set> entrySet() { return map.entrySet(); } @@ -1923,7 +1858,7 @@ public void replace(RegionEntry entry, Object values) { int numOldValues = getNumValues(entry); map.put(entry, values); atomicUpdater.addAndGet(this, - (values instanceof Collection ? ((Collection) values).size() : 1) - numOldValues); + (values instanceof Collection ? ((Collection) values).size() : 1) - numOldValues); } } @@ -1931,7 +1866,7 @@ public void replace(RegionEntry entry, Object values) { * This will populate resultSet from both type of indexes, {@link CompactRangeIndex} and * {@link RangeIndex}. */ - void populateListForEquiJoin(List list, Object outerEntries, Object innerEntries, + void populateListForEquiJoin(List list, Object outerEntries, Object innerEntries, ExecutionContext context, Object key) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { @@ -1939,7 +1874,7 @@ void populateListForEquiJoin(List list, Object outerEntries, Object innerEntries "OuterEntries or InnerEntries must not be null"); Object[][] values = new Object[2][]; - Iterator itr = null; + Iterator itr = null; int j = 0; while (j < 2) { @@ -1949,19 +1884,19 @@ void populateListForEquiJoin(List list, Object outerEntries, Object innerEntries itr = ((RegionEntryToValuesMap) outerEntries).map.entrySet().iterator(); isRangeIndex = true; } else if (outerEntries instanceof CloseableIterator) { - itr = (Iterator) outerEntries; + itr = (Iterator) outerEntries; } } else { if (innerEntries instanceof RegionEntryToValuesMap) { itr = ((RegionEntryToValuesMap) innerEntries).map.entrySet().iterator(); isRangeIndex = true; } else if (innerEntries instanceof CloseableIterator) { - itr = (Iterator) innerEntries; + itr = (Iterator) innerEntries; } } // extract the values from the RegionEntries - List dummy = new ArrayList(); + List dummy = new ArrayList<>(); RegionEntry re = null; IndexStoreEntry ie = null; Object val = null; @@ -1972,10 +1907,10 @@ void populateListForEquiJoin(List list, Object outerEntries, Object innerEntries while (itr.hasNext()) { if (isRangeIndex) { - Map.Entry entry = (Map.Entry) itr.next(); + Map.Entry entry = (Map.Entry) itr.next(); val = entry.getValue(); if (val instanceof Collection) { - entryVal = ((Iterable) val).iterator().next(); + entryVal = ((Iterable) val).iterator().next(); } else { entryVal = val; } @@ -1998,7 +1933,7 @@ void populateListForEquiJoin(List list, Object outerEntries, Object innerEntries if (ok) { if (isRangeIndex) { if (val instanceof Collection) { - dummy.addAll((Collection) val); + dummy.addAll((Collection) val); } else { dummy.add(val); } @@ -2031,7 +1966,7 @@ synchronized void setPdxStringFlag(Object key) { return; } if (!isIndexedPdxKeys) { - if (region.getAttributes().getEvictionAttributes().isNoEviction() == true + if (region.getAttributes().getEvictionAttributes().isNoEviction() && key instanceof PdxString && region.getAttributes().getCompressor() == null) { isIndexedPdxKeys = true; } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractMapIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractMapIndex.java index e6898f116865..652b5d5e3dd0 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractMapIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/AbstractMapIndex.java @@ -235,13 +235,14 @@ public void initializeIndex(boolean loadEntries) throws IMQException { @Override void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, - RuntimeIterator runtimeItr, ExecutionContext context, List projAttrib, + RuntimeIterator independentIterator, ExecutionContext context, List projAttrib, SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { Object[] mapKeyAndVal = (Object[]) key; AbstractIndex ri = mapKeyToValueIndex.get(mapKeyAndVal[1]); if (ri != null) { - ri.lockedQuery(mapKeyAndVal[0], operator, results, iterOps, runtimeItr, context, projAttrib, + ri.lockedQuery(mapKeyAndVal[0], operator, results, iterOps, independentIterator, context, + projAttrib, intermediateResults, isIntersection); } } @@ -376,15 +377,16 @@ public Object[] getMapKeysForTesting() { public abstract boolean containsEntry(RegionEntry entry); @Override - public boolean isMatchingWithIndexExpression(CompiledValue condnExpr, String conditionExprStr, + public boolean isMatchingWithIndexExpression(CompiledValue conditionExpression, + String conditionExpressionString, ExecutionContext context) throws TypeMismatchException, NameResolutionException { if (isAllKeys) { // check if the conditionExps is of type MapIndexable.If yes then check // the canonicalized string // stripped of the index arg & see if it matches. - if (condnExpr instanceof MapIndexable) { - MapIndexable mi = (MapIndexable) condnExpr; + if (conditionExpression instanceof MapIndexable) { + MapIndexable mi = (MapIndexable) conditionExpression; CompiledValue recvr = mi.getReceiverSansIndexArgs(); StringBuilder sb = new StringBuilder(); recvr.generateCanonicalizedExpression(sb, context); @@ -396,7 +398,7 @@ public boolean isMatchingWithIndexExpression(CompiledValue condnExpr, String con } } else { for (String expr : patternStr) { - if (expr.equals(conditionExprStr)) { + if (expr.equals(conditionExpressionString)) { return true; } } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/CompactRangeIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/CompactRangeIndex.java index 5d27434488e8..fe87e26b152d 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/CompactRangeIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/CompactRangeIndex.java @@ -1143,7 +1143,7 @@ class IMQEvaluator implements IndexedExpressionEvaluator { } else { indexInitIterators = ((FunctionalIndexCreationHelper) helper).indexInitIterators; modifiedIndexExpr = ((FunctionalIndexCreationHelper) helper).modifiedIndexExpr; - addnlProjType = ((FunctionalIndexCreationHelper) helper).addnlProjType; + addnlProjType = ((FunctionalIndexCreationHelper) helper).additionalProjType; } iteratorSize = indexInitIterators.size(); if (additionalProj instanceof CompiledPath) { @@ -1612,10 +1612,11 @@ public List getAllDependentIterators() { @Override void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, - RuntimeIterator indpndntItr, ExecutionContext context, List projAttrib, + RuntimeIterator independentIterator, ExecutionContext context, List projAttrib, SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { - lockedQueryPrivate(key, operator, results, iterOps, indpndntItr, context, null, projAttrib, + lockedQueryPrivate(key, operator, results, iterOps, independentIterator, context, null, + projAttrib, intermediateResults, isIntersection); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/FunctionalIndexCreationHelper.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/FunctionalIndexCreationHelper.java index 8b7e71197ecf..d70182b8b2ee 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/FunctionalIndexCreationHelper.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/FunctionalIndexCreationHelper.java @@ -83,7 +83,7 @@ class FunctionalIndexCreationHelper extends IndexCreationHelper { CompiledValue additionalProj = null; - ObjectType addnlProjType = null; + ObjectType additionalProjType = null; CompiledValue modifiedIndexExpr = null; @@ -207,7 +207,7 @@ private void prepareFromClause(IndexManager imgr) throws IndexInvalidException { if (i == 0) { CompiledValue cv = iterDef.getCollectionExpr(); - addnlProjType = rIter.getElementType(); + additionalProjType = rIter.getElementType(); String name = iterDef.getName(); if (isEmpty(name)) { // In case the name of iterator is null or blank set it to index_internal_id diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/HashIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/HashIndex.java index 06e818f111f7..f2a746e66c9e 100755 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/HashIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/HashIndex.java @@ -1023,7 +1023,7 @@ class IMQEvaluator implements IndexedExpressionEvaluator { } else { indexInitIterators = ((FunctionalIndexCreationHelper) helper).indexInitIterators; modifiedIndexExpr = ((FunctionalIndexCreationHelper) helper).modifiedIndexExpr; - addnlProjType = ((FunctionalIndexCreationHelper) helper).addnlProjType; + addnlProjType = ((FunctionalIndexCreationHelper) helper).additionalProjType; } iteratorSize = indexInitIterators.size(); if (additionalProj instanceof CompiledPath) { @@ -1379,10 +1379,11 @@ public int compare(Object arg0, Object arg1) { @Override void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, - RuntimeIterator indpndntItr, ExecutionContext context, List projAttrib, + RuntimeIterator independentIterator, ExecutionContext context, List projAttrib, SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { - lockedQueryPrivate(key, operator, results, iterOps, indpndntItr, context, null, projAttrib, + lockedQueryPrivate(key, operator, results, iterOps, independentIterator, context, null, + projAttrib, intermediateResults, isIntersection); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexProtocol.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexProtocol.java index d910e9aa4be7..e8b6ce648ff7 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexProtocol.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexProtocol.java @@ -65,7 +65,7 @@ public interface IndexProtocol extends Index { * fetched * @param context ExecutionContext object */ - void query(Collection results, Set keysToRemove, ExecutionContext context) + void query(Collection results, Set keysToRemove, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; @@ -84,7 +84,7 @@ void query(Collection results, Set keysToRemove, ExecutionContext context) * not contain index results corresponding to key + keys of keysToRemove set ) * @param context ExecutionContext object */ - void query(Object key, int operator, Collection results, Set keysToRemove, + void query(Object key, int operator, Collection results, Set keysToRemove, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; @@ -96,13 +96,14 @@ void query(Object key, int operator, Collection results, Set keysToRemove, * @param results The Collection object used for fetching the index results * @param context ExecutionContext object */ - void query(Object key, int operator, Collection results, ExecutionContext context) + void query(Object key, int operator, Collection results, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; - void query(Object key, int operator, Collection results, CompiledValue iterOp, - RuntimeIterator indpndntItr, ExecutionContext context, List projAttrib, - SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, + void query(Object key, int operator, Collection results, CompiledValue iterOp, + RuntimeIterator indpndntItr, ExecutionContext context, List projAttrib, + SelectResults intermediateResults, boolean isIntersection) + throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; /** @@ -118,7 +119,8 @@ void query(Object key, int operator, Collection results, CompiledValue iterOp, * @param context ExecutionContext object */ void query(Object lowerBoundKey, int lowerBoundOperator, Object upperBoundKey, - int upperBoundOperator, Collection results, Set keysToRemove, ExecutionContext context) + int upperBoundOperator, Collection results, Set keysToRemove, + ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; @@ -134,7 +136,7 @@ void query(Object lowerBoundKey, int lowerBoundOperator, Object upperBoundKey, * The Object array will have two rows. Each row ( one dimensional Object Array ) will * contain objects or type Struct or Object. */ - List queryEquijoinCondition(IndexProtocol index, ExecutionContext context) + List queryEquijoinCondition(IndexProtocol index, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException; diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexedExpressionEvaluator.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexedExpressionEvaluator.java index 824cb6ace8e7..673aa762a1db 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexedExpressionEvaluator.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/IndexedExpressionEvaluator.java @@ -39,8 +39,8 @@ public interface IndexedExpressionEvaluator { ObjectType getIndexResultSetType(); - void expansion(List expandedResults, Object lowerBoundKey, Object upperBoundKey, + void expansion(List expandedResults, Object lowerBoundKey, Object upperBoundKey, int lowerBoundOperator, int upperBoundOperator, Object value) throws IMQException; - List getAllDependentIterators(); + List getAllDependentIterators(); } diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PartitionedIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PartitionedIndex.java index ee85a0fb623f..8735304a3f25 100755 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PartitionedIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PartitionedIndex.java @@ -32,7 +32,6 @@ import org.apache.geode.cache.query.QueryException; import org.apache.geode.cache.query.QueryInvocationTargetException; import org.apache.geode.cache.query.SelectResults; -import org.apache.geode.cache.query.TypeMismatchException; import org.apache.geode.cache.query.internal.CompiledValue; import org.apache.geode.cache.query.internal.ExecutionContext; import org.apache.geode.cache.query.internal.RuntimeIterator; @@ -55,13 +54,14 @@ public class PartitionedIndex extends AbstractIndex { /** * Contains the reference for all the local indexed buckets. */ - private final Map> bucketIndexes = + private final Map, List> bucketIndexes = Collections.synchronizedMap(new HashMap<>()); - // An arbitrary bucket index from this PartiionedIndex that is used as a representative + // An arbitrary bucket index from this PartitionedIndex that is used as a representative // index for the entire PartitionIndex. Usually used for scoring/sizing of an index when // selecting which index to use private volatile Index arbitraryBucketIndex; + /** * Type on index represented by this partitioned index. * @@ -69,28 +69,29 @@ public class PartitionedIndex extends AbstractIndex { * @see IndexType#PRIMARY_KEY * @see IndexType#HASH */ + @Deprecated private final IndexType type; /** * Number of remote buckets indexed when creating an index on the partitioned region instance. */ - private int numRemoteBucektsIndexed; + private int numRemoteBucketsIndexed; /** * String for imports if needed for index creations */ private final String imports; - protected Set mapIndexKeys = Collections.newSetFromMap(new ConcurrentHashMap()); + protected Set mapIndexKeys = Collections.newSetFromMap(new ConcurrentHashMap<>()); - // Flag indicating that the populationg of this index is in progress + // Flag indicating that the populating of this index is in progress private volatile boolean populateInProgress; /** * Constructor for partitioned indexed. Creates the partitioned index on given a partitioned * region. An index can be created programmatically or through cache.xml during initialization. */ - public PartitionedIndex(InternalCache cache, IndexType iType, String indexName, Region r, + public PartitionedIndex(InternalCache cache, IndexType iType, String indexName, Region r, String indexedExpression, String fromClause, String imports) { super(cache, indexName, r, fromClause, indexedExpression, null, fromClause, indexedExpression, null, null); @@ -110,7 +111,7 @@ public PartitionedIndex(InternalCache cache, IndexType iType, String indexName, * * @param index bucket index to be added to the list. */ - public void addToBucketIndexes(Region r, Index index) { + public void addToBucketIndexes(Region r, Index index) { synchronized (bucketIndexes) { setArbitraryBucketIndex(index); List indexes = bucketIndexes.get(r); @@ -122,7 +123,7 @@ public void addToBucketIndexes(Region r, Index index) { } } - public void removeFromBucketIndexes(Region r, Index index) { + public void removeFromBucketIndexes(Region r, Index index) { synchronized (bucketIndexes) { List indexes = bucketIndexes.get(r); if (indexes != null) { @@ -157,7 +158,7 @@ public int getNumberOfIndexedBuckets() { * * @return bucketIndexes collection of all the bucket indexes. */ - public List getBucketIndexes() { + public List getBucketIndexes() { synchronized (bucketIndexes) { List indexes = new ArrayList<>(); for (List indexList : bucketIndexes.values()) { @@ -167,7 +168,7 @@ public List getBucketIndexes() { } } - public List getBucketIndexes(Region r) { + public List getBucketIndexes(Region r) { synchronized (bucketIndexes) { List indexes = new ArrayList<>(); List indexList = bucketIndexes.get(r); @@ -205,8 +206,8 @@ public Index getBucketIndex() { return arbitraryBucketIndex; } - protected Map.Entry> getFirstBucketIndex() { - Map.Entry> firstIndexEntry = null; + protected Map.Entry, List> getFirstBucketIndex() { + Map.Entry, List> firstIndexEntry = null; synchronized (bucketIndexes) { if (bucketIndexes.size() > 0) { firstIndexEntry = bucketIndexes.entrySet().iterator().next(); @@ -235,19 +236,17 @@ public static AbstractIndex getBucketIndex(PartitionedRegion pr, String indexNam } catch (Exception ex) { throw new QueryInvocationTargetException(ex.getMessage()); } - PartitionedRegionDataStore prds = pr.getDataStore(); - BucketRegion bukRegion; - bukRegion = prds.getLocalBucketById(bId); - if (bukRegion == null) { + final BucketRegion bucketRegion = pr.getDataStore().getLocalBucketById(bId); + if (bucketRegion == null) { throw new BucketMovedException("Bucket not found for the id :" + bId); } - AbstractIndex index = null; - if (bukRegion.getIndexManager() != null) { - index = (AbstractIndex) (bukRegion.getIndexManager().getIndex(indexName)); + final AbstractIndex index; + if (bucketRegion.getIndexManager() != null) { + index = (AbstractIndex) (bucketRegion.getIndexManager().getIndex(indexName)); } else { if (pr.getCache().getLogger().fineEnabled()) { pr.getCache().getLogger().fine("Index Manager not found for the bucket region " - + bukRegion.getFullPath() + " unable to fetch the index " + indexName); + + bucketRegion.getFullPath() + " unable to fetch the index " + indexName); } throw new QueryInvocationTargetException( "Index Manager not found, " + " unable to fetch the index " + indexName); @@ -259,34 +258,37 @@ public static AbstractIndex getBucketIndex(PartitionedRegion pr, String indexNam /** * Verify if the index is available of the buckets. If not create index on the bucket. */ - public void verifyAndCreateMissingIndex(List buckets) throws QueryInvocationTargetException { - PartitionedRegion pr = (PartitionedRegion) getRegion(); - PartitionedRegionDataStore prds = pr.getDataStore(); + public void verifyAndCreateMissingIndex(List buckets) + throws QueryInvocationTargetException { + final PartitionedRegion region = (PartitionedRegion) getRegion(); + final PartitionedRegionDataStore dataStore = region.getDataStore(); - for (Object bId : buckets) { + final boolean fineEnabled = region.getCache().getLogger().fineEnabled(); + + for (final Integer bucketId : buckets) { // create index - BucketRegion bukRegion = prds.getLocalBucketById((Integer) bId); + BucketRegion bukRegion = dataStore.getLocalBucketById(bucketId); if (bukRegion == null) { - throw new QueryInvocationTargetException("Bucket not found for the id :" + bId); + throw new QueryInvocationTargetException("Bucket not found for the id :" + bucketId); } IndexManager im = IndexUtils.getIndexManager(cache, bukRegion, true); if (im != null && im.getIndex(indexName) == null) { try { - if (pr.getCache().getLogger().fineEnabled()) { - pr.getCache().getLogger() + if (fineEnabled) { + region.getCache().getLogger() .fine("Verifying index presence on bucket region. " + " Found index " + indexName + " not present on the bucket region " + bukRegion.getFullPath() + ", index will be created on this region."); } ExecutionContext externalContext = new ExecutionContext(null, bukRegion.getCache()); - externalContext.setBucketRegion(pr, bukRegion); + externalContext.setBucketRegion(region, bukRegion); im.createIndex(indexName, type, originalIndexedExpression, fromClause, imports, externalContext, this, true); - } catch (IndexExistsException iee) { + } catch (IndexExistsException ignore) { // Index exists. - } catch (IndexNameConflictException ince) { + } catch (IndexNameConflictException ignore) { // ignore. } } @@ -304,8 +306,8 @@ protected boolean isCompactRangeIndex() { * * @param remoteBucketsIndexed int representing number of remote buckets. */ - public void setRemoteBucketesIndexed(int remoteBucketsIndexed) { - numRemoteBucektsIndexed = remoteBucketsIndexed; + public void setRemoteBucketsIndexed(int remoteBucketsIndexed) { + numRemoteBucketsIndexed = remoteBucketsIndexed; } /** @@ -314,7 +316,7 @@ public void setRemoteBucketesIndexed(int remoteBucketsIndexed) { * @return int number of remote indexed buckets. */ public int getNumRemoteBucketsIndexed() { - return numRemoteBucektsIndexed; + return numRemoteBucketsIndexed; } /** @@ -323,7 +325,7 @@ public int getNumRemoteBucketsIndexed() { * @return the Region for this index */ @Override - public Region getRegion() { + public Region getRegion() { return super.getRegion(); } @@ -350,12 +352,10 @@ public void initializeIndex(boolean loadEntries) throws IMQException { * Not supported on partitioned index. */ @Override - void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, - RuntimeIterator indpndntItr, ExecutionContext context, List projAttrib, - SelectResults intermediateResults, boolean isIntersection) { - throw new RuntimeException( - "Not supported on partitioned index"); - + void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, + RuntimeIterator independentIterator, ExecutionContext context, List projAttrib, + SelectResults intermediateResults, boolean isIntersection) { + throw new RuntimeException("Not supported on partitioned index"); } /** @@ -363,9 +363,7 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite */ @Override void recreateIndexData() throws IMQException { - throw new RuntimeException( - "Not supported on partitioned index"); - + throw new RuntimeException("Not supported on partitioned index"); } /** @@ -373,9 +371,7 @@ void recreateIndexData() throws IMQException { */ @Override void removeMapping(RegionEntry entry, int opCode) { - throw new RuntimeException( - "Not supported on partitioned index"); - + throw new RuntimeException("Not supported on partitioned index"); } /** @@ -585,9 +581,8 @@ public ObjectType getResultSetType() { */ @Override void lockedQuery(Object lowerBoundKey, int lowerBoundOperator, Object upperBoundKey, - int upperBoundOperator, Collection results, Set keysToRemove, - ExecutionContext context) - throws TypeMismatchException { + int upperBoundOperator, Collection results, Set keysToRemove, + ExecutionContext context) { throw new RuntimeException( "Not supported on partitioned index"); @@ -600,8 +595,8 @@ public int getSizeEstimate(Object key, int op, int matchLevel) { @Override - void lockedQuery(Object key, int operator, Collection results, Set keysToRemove, - ExecutionContext context) throws TypeMismatchException { + void lockedQuery(Object key, int operator, Collection results, Set keysToRemove, + ExecutionContext context) { throw new RuntimeException("Not supported on partitioned index"); } @@ -614,7 +609,7 @@ void addMapping(Object key, Object value, RegionEntry entry) throws IMQException } @Override - void saveMapping(Object key, Object value, RegionEntry entry) throws IMQException { + void saveMapping(Object key, Object value, RegionEntry entry) { throw new RuntimeException( "Not supported on partitioned index"); @@ -637,14 +632,12 @@ public void incNumBucketIndexes() { @Override public boolean isEmpty() { - boolean empty = true; - for (Object index : getBucketIndexes()) { - empty = ((AbstractIndex) index).isEmpty(); - if (!empty) { + for (final Index index : getBucketIndexes()) { + if (!((AbstractIndex) index).isEmpty()) { return false; } } - return empty; + return true; } public boolean isPopulateInProgress() { diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PrimaryKeyIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PrimaryKeyIndex.java index 8761fd8d3d82..968d0da85f70 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PrimaryKeyIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/PrimaryKeyIndex.java @@ -172,7 +172,7 @@ void lockedQuery(Object key, int operator, Collection results, Set keysToRemove, @Override void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, - RuntimeIterator runtimeItr, ExecutionContext context, List projAttrib, + RuntimeIterator independentIterator, ExecutionContext context, List projAttrib, SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { @@ -200,8 +200,8 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite Object value = entry.getValue(); if (value != null) { boolean ok = true; - if (runtimeItr != null) { - runtimeItr.setCurrent(value); + if (independentIterator != null) { + independentIterator.setCurrent(value); ok = QueryUtils.applyCondition(iterOps, context); } if (ok) { @@ -227,8 +227,8 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite // TODO: is this correct. What should be the behaviour of null values? if (val != null) { boolean ok = true; - if (runtimeItr != null) { - runtimeItr.setCurrent(val); + if (independentIterator != null) { + independentIterator.setCurrent(val); ok = QueryUtils.applyCondition(iterOps, context); } if (ok) { diff --git a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java index 6504de47595e..449d9d530ced 100644 --- a/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java +++ b/geode-core/src/main/java/org/apache/geode/cache/query/internal/index/RangeIndex.java @@ -562,15 +562,14 @@ void removeMapping(RegionEntry entry, int opCode) throws IMQException { internalIndexStats.incNumUpdates(); } - // Asif TODO: Provide explanation of the method. Test this method @Override - public List queryEquijoinCondition(IndexProtocol indx, ExecutionContext context) + public List queryEquijoinCondition(IndexProtocol indx, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { // get a read lock when doing a lookup long start = updateIndexUseStats(); ((AbstractIndex) indx).updateIndexUseStats(); - List data = new ArrayList(); + List data = new ArrayList<>(); Iterator inner = null; try { // We will iterate over each of the valueToEntries Map to obatin the keys @@ -1079,10 +1078,11 @@ private void addValuesToResultSingleKeyToRemove(Object entriesMap, Collection re } } - private void addValuesToResult(final Object entriesMap, final Collection result, + private void addValuesToResult(final Object entriesMap, final Collection result, final Object keyToRemove, final CompiledValue iterOps, final RuntimeIterator runtimeItr, final ExecutionContext context, final List projAttrib, - final SelectResults intermediateResults, final boolean isIntersection, final int limit) + final SelectResults intermediateResults, final boolean isIntersection, + final int limit) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { if (entriesMap == null || result == null) { @@ -1108,9 +1108,10 @@ private void addValuesToResult(final Object entriesMap, final Collection resu } private void addValuesToResultFromMap(final Map entriesMap, - final Collection result, final Object keyToRemove, final CompiledValue iterOps, + final Collection result, final Object keyToRemove, final CompiledValue iterOps, final RuntimeIterator runtimeItr, final ExecutionContext context, final List projAttrib, - final SelectResults intermediateResults, final boolean isIntersection, final int limit, + final SelectResults intermediateResults, final boolean isIntersection, + final int limit, final QueryObserver observer) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { // That means we aren't removing any keys (remember if we are matching for nulls, we have the @@ -1159,7 +1160,7 @@ void recreateIndexData() throws IMQException { @Override void lockedQuery(Object key, int operator, Collection results, CompiledValue iterOps, - RuntimeIterator runtimeItr, ExecutionContext context, List projAttrib, + RuntimeIterator independentIterator, ExecutionContext context, List projAttrib, SelectResults intermediateResults, boolean isIntersection) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException { int limit = -1; @@ -1185,7 +1186,8 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite if (key == null) { switch (operator) { case OQLLexerTokenTypes.TOK_EQ: { - nullMappedEntries.addValuesToCollection(results, iterOps, runtimeItr, context, projAttrib, + nullMappedEntries.addValuesToCollection(results, iterOps, independentIterator, context, + projAttrib, intermediateResults, isIntersection, limit); break; } @@ -1197,9 +1199,10 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite } // keysToRemove should be null, meaning we aren't removing any keys - addValuesToResult(sm, results, null, iterOps, runtimeItr, context, projAttrib, + addValuesToResult(sm, results, null, iterOps, independentIterator, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit); - undefinedMappedEntries.addValuesToCollection(results, iterOps, runtimeItr, context, + undefinedMappedEntries.addValuesToCollection(results, iterOps, independentIterator, + context, projAttrib, intermediateResults, isIntersection, limit); break; } @@ -1210,7 +1213,8 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite } else if (key == QueryService.UNDEFINED) { // do nothing switch (operator) { case OQLLexerTokenTypes.TOK_EQ: { - undefinedMappedEntries.addValuesToCollection(results, iterOps, runtimeItr, context, + undefinedMappedEntries.addValuesToCollection(results, iterOps, independentIterator, + context, projAttrib, intermediateResults, isIntersection, limit); break; } @@ -1222,9 +1226,10 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite sm = sm.descendingMap(); } // keysToRemove should be null - addValuesToResult(sm, results, null, iterOps, runtimeItr, context, projAttrib, + addValuesToResult(sm, results, null, iterOps, independentIterator, context, projAttrib, intermediateResults, isIntersection, multiColOrderBy ? -1 : limit); - nullMappedEntries.addValuesToCollection(results, iterOps, runtimeItr, context, projAttrib, + nullMappedEntries.addValuesToCollection(results, iterOps, independentIterator, context, + projAttrib, intermediateResults, isIntersection, limit); break; } @@ -1238,7 +1243,7 @@ void lockedQuery(Object key, int operator, Collection results, CompiledValue ite return; } key = getPdxStringForIndexedPdxKeys(key); - evaluate(key, operator, results, iterOps, runtimeItr, context, projAttrib, + evaluate(key, operator, results, iterOps, independentIterator, context, projAttrib, intermediateResults, isIntersection, limit, applyOrderBy, orderByAttrs); } // end else } diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java index 73d17ec575f1..b58770f09683 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ClusterDistributionManager.java @@ -12,10 +12,8 @@ * or implied. See the License for the specific language governing permissions and limitations under * the License. */ - package org.apache.geode.distributed.internal; - import java.io.NotSerializableException; import java.net.InetAddress; import java.net.UnknownHostException; @@ -71,6 +69,7 @@ import org.apache.geode.distributed.internal.membership.api.MembershipLocator; import org.apache.geode.distributed.internal.membership.api.MembershipView; import org.apache.geode.distributed.internal.membership.api.Message; +import org.apache.geode.distributed.internal.membership.api.MessageListener; import org.apache.geode.internal.Assert; import org.apache.geode.internal.NanoTimer; import org.apache.geode.internal.admin.remote.AdminConsoleDisconnectMessage; @@ -91,17 +90,14 @@ import org.apache.geode.logging.internal.log4j.api.LogService; /** - * The DistributionManager uses a {@link Membership} to distribute {@link + * The {@code DistributionManager} uses a {@link Membership} to distribute {@link * DistributionMessage messages}. It also reports on who is currently in the distributed system and * tracks the elder member for the distributed lock service. You may also register a membership * listener with the DistributionManager to receive notification of changes in membership. * *

- *

* Code that wishes to send a {@link DistributionMessage} must get the - * DistributionManager and invoke {@link #putOutgoing}. - * - *

+ * {@code DistributionManager} and invoke {@link #putOutgoing}. * * @see DistributionMessage#process * @see IgnoredByManager @@ -168,7 +164,7 @@ public class ClusterDistributionManager implements DistributionManager { private final int dmType; /** - * The MembershipListeners that are registered on this manager. + * The {@code MembershipListener}s that are registered on this manager. */ private final ConcurrentMap membershipListeners; private final ClusterElderManager clusterElderManager = new ClusterElderManager(this); @@ -182,7 +178,7 @@ public class ClusterDistributionManager implements DistributionManager { /** - * The MembershipListeners that are registered on this manager for ALL members. + * The {@code MembershipListener}s that are registered on this manager for ALL members. * * @since GemFire 5.7 */ @@ -433,6 +429,7 @@ static ClusterDistributionManager create(InternalDistributedSystem system, } } + @Override public OperationExecutors getExecutors() { return executors; } @@ -442,18 +439,40 @@ public ThreadsMonitoring getThreadMonitoring() { return executors.getThreadMonitoring(); } - /////////////////////// Constructors /////////////////////// + /** + * Creates a new distribution manager. + * + * @param system The distributed system to which this distribution manager will send messages + * @param transport The configuration for the communications transport + * @param alertingService Handles creation of Alerts + * @param membershipLocator The locator to use for membership + */ + private ClusterDistributionManager( + InternalDistributedSystem system, + RemoteTransportConfig transport, + AlertingService alertingService, + final MembershipLocator membershipLocator) { + this(system, transport, alertingService, membershipLocator, DistributionStats::new, + DistributionImpl::createDistribution); + } /** - * Creates a new DistributionManager by initializing itself, creating the membership - * manager and executors + * Creates a new distribution manager. * + * @param system The distributed system to which this distribution manager will send messages * @param transport The configuration for the communications transport + * @param alertingService Handles creation of Alerts + * @param membershipLocator The locator to use for membership + * @param distributionStatsFactory Creates DistributionStats for recording distribution stats + * @param distributionFactory Creates Distribution for membership and messaging */ - private ClusterDistributionManager(RemoteTransportConfig transport, + ClusterDistributionManager( InternalDistributedSystem system, + RemoteTransportConfig transport, AlertingService alertingService, - MembershipLocator locator) { + final MembershipLocator membershipLocator, + DistributionStatsFactory distributionStatsFactory, + DistributionFactory distributionFactory) { this.system = system; this.transport = transport; @@ -464,11 +483,11 @@ private ClusterDistributionManager(RemoteTransportConfig transport, distributedSystemId = system.getConfig().getDistributedSystemId(); long statId = OSProcess.getId(); - stats = new DistributionStats(system, statId); + stats = distributionStatsFactory.create(system, statId); DistributionStats.enableClockStats = system.getConfig().getEnableTimeStatistics(); exceptionInThreads = false; - boolean finishedConstructor = false; + boolean createdDistribution = false; try { executors = new ClusterOperationExecutors(stats, system); @@ -484,9 +503,8 @@ private ClusterDistributionManager(RemoteTransportConfig transport, long start = System.currentTimeMillis(); DMListener listener = new DMListener(this); - distribution = DistributionImpl - .createDistribution(this, transport, system, listener, - this::handleIncomingDMsg, locator); + distribution = distributionFactory.create(this, transport, system, listener, + this::handleIncomingDMsg, membershipLocator); sb.append(System.currentTimeMillis() - start); @@ -501,26 +519,14 @@ private ClusterDistributionManager(RemoteTransportConfig transport, description = "Distribution manager on " + localAddress + " started at " + (new Date(System.currentTimeMillis())); - finishedConstructor = true; + createdDistribution = true; } finally { - if (!finishedConstructor && executors != null) { + if (!createdDistribution && executors != null) { askThreadsToStop(); // fix for bug 42039 } } - } - - /** - * Creates a new distribution manager - * - * @param system The distributed system to which this distribution manager will send messages. - */ - private ClusterDistributionManager(InternalDistributedSystem system, - RemoteTransportConfig transport, - AlertingService alertingService, - final MembershipLocator membershipLocator) { - this(transport, system, alertingService, membershipLocator); - boolean finishedConstructor = false; + boolean startedEventProcessing = false; try { setIsStartupThread(); @@ -544,9 +550,9 @@ private ClusterDistributionManager(InternalDistributedSystem system, } } - finishedConstructor = true; + startedEventProcessing = true; } finally { - if (!finishedConstructor) { + if (!startedEventProcessing) { askThreadsToStop(); // fix for bug 42039 } } @@ -1095,7 +1101,7 @@ private void addNewMember(InternalDistributedMember member) { } /** - * Returns the identity of this DistributionManager + * Returns the identity of this {@code DistributionManager} */ @Override public InternalDistributedMember getId() { @@ -1363,7 +1369,7 @@ public Collection getMembershipListeners() { } /** - * Adds a MembershipListener to this distribution manager. + * Adds a {@code MembershipListener} to this distribution manager. */ private void addAllMembershipListener(MembershipListener l) { synchronized (allMembershipListenersLock) { @@ -1992,7 +1998,7 @@ private void sendShutdownMessage() { * @param message the message to send * @return list of recipients that did not receive the message because they left the view (null if * all received it or it was sent to {@link Message#ALL_RECIPIENTS}. - * @throws NotSerializableException If message cannot be serialized + * @throws NotSerializableException If {@code message} cannot be serialized */ Set sendOutgoing(DistributionMessage message) throws NotSerializableException { @@ -2024,7 +2030,7 @@ Set sendOutgoing(DistributionMessage message) /** * @return recipients who did not receive the message - * @throws NotSerializableException If message cannot be serialized + * @throws NotSerializableException If {@code message} cannot be serialized */ private Set sendMessage(DistributionMessage message) throws NotSerializableException { @@ -2150,7 +2156,7 @@ public RemoteGfManagerAgent getAgent() { * Returns a description of the distribution configuration used for this distribution manager. (in * ConsoleDistributionManager) * - * @return null if no admin {@linkplain #getAgent agent} is associated with this + * @return {@code null} if no admin {@linkplain #getAgent agent} is associated with this * distribution manager */ public String getDistributionConfigDescription() { @@ -2172,7 +2178,7 @@ public String getDistributionConfigDescription() { * Returns the health monitor for this distribution manager and owner. * * @param owner the agent that owns the returned monitor - * @return the health monitor created by the owner; null if the owner has now created + * @return the health monitor created by the owner; {@code null} if the owner has now created * a monitor. * @since GemFire 3.5 */ @@ -2822,6 +2828,7 @@ public Set getNormalDistributionManagerIds() { /** * test method to get the member IDs of all locators in the distributed system */ + @Override public Set getLocatorDistributionManagerIds() { return distribution.getMembersNotShuttingDown().stream() .filter((id) -> id.getVmKind() == LOCATOR_DM_TYPE).collect( @@ -2942,4 +2949,19 @@ public Comparator getComparator() { } } + @FunctionalInterface + interface DistributionStatsFactory { + DistributionStats create(InternalDistributedSystem system, long statId); + } + + @FunctionalInterface + interface DistributionFactory { + Distribution create( + ClusterDistributionManager clusterDistributionManager, + RemoteTransportConfig transport, + InternalDistributedSystem system, + org.apache.geode.distributed.internal.membership.api.MembershipListener listener, + MessageListener messageListener, + final MembershipLocator locator); + } } diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DirectReplyProcessor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DirectReplyProcessor.java index 13b75de5f912..4bb22b3c51cc 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DirectReplyProcessor.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DirectReplyProcessor.java @@ -72,7 +72,8 @@ public DirectReplyProcessor(DistributionManager dm, InternalDistributedMember me * @param dm the DistributionManager to use for messaging and membership * @param initMembers the Set of members this processor wants replies from */ - public DirectReplyProcessor(DistributionManager dm, Collection initMembers) { + public DirectReplyProcessor(DistributionManager dm, + Collection initMembers) { this(dm, dm.getSystem(), initMembers, null); } @@ -85,7 +86,8 @@ public DirectReplyProcessor(DistributionManager dm, Collection initMembers) { * @param system the DistributedSystem connection * @param initMembers the Set of members this processor wants replies from */ - public DirectReplyProcessor(InternalDistributedSystem system, Collection initMembers) { + public DirectReplyProcessor(InternalDistributedSystem system, + Collection initMembers) { this(system.getDistributionManager(), system, initMembers, null); } @@ -100,13 +102,14 @@ public DirectReplyProcessor(InternalDistributedSystem system, Collection initMem * @param cancelCriterion optional CancelCriterion to use; will use the DistributedSystem's * DistributionManager if null */ - public DirectReplyProcessor(InternalDistributedSystem system, Collection initMembers, + public DirectReplyProcessor(InternalDistributedSystem system, + Collection initMembers, CancelCriterion cancelCriterion) { this(system.getDistributionManager(), system, initMembers, cancelCriterion); } public DirectReplyProcessor(DistributionManager dm, InternalDistributedSystem system, - Collection initMembers, CancelCriterion cancelCriterion) { + Collection initMembers, CancelCriterion cancelCriterion) { super(dm, system, initMembers, cancelCriterion, false); } diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java index 88ecb3bf306c..38a851bc0630 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionAdvisor.java @@ -41,7 +41,6 @@ import org.apache.geode.GemFireIOException; import org.apache.geode.annotations.VisibleForTesting; import org.apache.geode.annotations.internal.MakeNotStatic; -import org.apache.geode.cache.server.CacheServer; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.Assert; import org.apache.geode.internal.InternalDataSerializer; @@ -58,6 +57,7 @@ import org.apache.geode.internal.serialization.SerializationContext; import org.apache.geode.internal.util.ArrayUtils; import org.apache.geode.logging.internal.log4j.api.LogService; +import org.apache.geode.util.internal.GeodeGlossary; /** * Provides advice on sending distribution messages. For a given operation, this advisor will @@ -124,6 +124,33 @@ public class DistributionAdvisor { */ private static final int ROLLOVER_THRESHOLD_LOWER = Integer.MIN_VALUE + ROLLOVER_THRESHOLD; + /** + * When we notice another member has crashed, we request its data. We delay that + * request so that any in-flight operations started on the crashed member, will + * be retried on the new primary before the sync. This constant determines that + * delay. + * + * The default value was chosen empirically, to allow enough time for a new + * primary to be chosen after a member has crashed, and to allow any operations + * begun on the crashed member to be subsequently completed (and acknowledged) + * by the new primary. + * + * There is a tradeoff here. If the delay is too short, then the probability that + * the initiator sees spurious EntryExistsException or EntryNotFoundException + * increases. But this delay, delays consistency. It delays consistency though, + * only in the situation described above where primary changes mid-operation. + * + * The default value might be too short in pathological situations like cascading + * member loss, or in the presence of very slow members or networks. + * + * The delay is expressed in milliseconds. The default is 60 seconds (60,000 ms). + * You can override the default by setting the Java system property + * gemfire.DistributionAdvisor.syncDelayForCrashedMemberMilliseconds. + */ + private static final long SYNC_DELAY_FOR_CRASHED_MEMBER_MILLISECONDS = + Long.getLong(GeodeGlossary.GEMFIRE_PREFIX + + "DistributionAdvisor.syncDelayForCrashedMemberMilliseconds", 60_000L); + /** * Incrementing serial number used to identify order of region creation * @@ -265,7 +292,7 @@ public void syncForCrashedMember(final InternalDistributedMember id, final Profi // interval. This allows client caches to retry an operation that might otherwise be recovered // through the sync operation. Without associated event information this could cause the // retried operation to be mishandled. See GEODE-5505 - final long delay = getDelay(dr); + final long delay = getSyncDelayForCrashedMemberMilliseconds(); if (dr.getDataPolicy().withPersistence() && persistentId == null) { // Fix for GEODE-6886 (#46704). The lost member may be an empty accessor @@ -291,9 +318,8 @@ PersistentMemberID getPersistentID(CacheProfile cp) { } @VisibleForTesting - long getDelay(DistributedRegion dr) { - return dr.getGemFireCache().getCacheServers().stream() - .mapToLong(CacheServer::getMaximumTimeBetweenPings).max().orElse(0L); + long getSyncDelayForCrashedMemberMilliseconds() { + return SYNC_DELAY_FOR_CRASHED_MEMBER_MILLISECONDS; } @VisibleForTesting diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java index c29ab1cf8c75..e6f5fe4f878c 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionManager.java @@ -343,9 +343,7 @@ Set addMembershipListenerAndGetDistributionManagerIds */ boolean areOnEquivalentHost(InternalDistributedMember member1, InternalDistributedMember member2); - default Set getEquivalents(InetAddress in) { - throw new UnsupportedOperationException(); - } + Set getEquivalents(InetAddress in); Set getGroupMembers(String group); diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java index 5614bf415733..2621633516d7 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/DistributionMessage.java @@ -212,11 +212,7 @@ public boolean orderedDelivery() { } } - /** - * Sets the intended recipient of the message. If recipient is Message.ALL_RECIPIENTS - * then the - * message will be sent to all distribution managers. - */ + @Override public void setRecipient(InternalDistributedMember recipient) { if (recipients != null) { throw new IllegalStateException( @@ -226,20 +222,10 @@ public void setRecipient(InternalDistributedMember recipient) { recipients = Collections.singletonList(recipient); } - /** - * Causes this message to be send using multicast if v is true. - * - * @since GemFire 5.0 - */ public void setMulticast(boolean v) { multicast = v; } - /** - * Return true if this message should be sent using multicast. - * - * @since GemFire 5.0 - */ public boolean getMulticast() { return multicast; } @@ -252,13 +238,8 @@ public boolean sendViaUDP() { return false; } - /** - * Sets the intended recipient of the message. If recipient set contains - * Message.ALL_RECIPIENTS - * then the message will be sent to all distribution managers. - */ @Override - public void setRecipients(Collection recipients) { + public void setRecipients(Collection recipients) { this.recipients = new ArrayList<>(recipients); } @@ -287,9 +268,7 @@ public void resetRecipients() { multicast = false; } - /** - * Returns true if message will be sent to everyone. - */ + @Override public boolean forAll() { return (recipients == null) || (multicast) || ((!recipients.isEmpty()) && (recipients.get(0) == ALL_RECIPIENTS)); @@ -313,18 +292,11 @@ public String getRecipientsDescription() { } } - /** - * Returns the sender of this message. Note that this value is not set until this message is - * received by a distribution manager. - */ + @Override public InternalDistributedMember getSender() { return sender; } - /** - * Sets the sender of this message. This method is only invoked when the message is - * received by a DistributionManager. - */ @Override public void setSender(InternalDistributedMember _sender) { sender = _sender; @@ -422,7 +394,7 @@ && getProcessorType() == OperationExecutors.SERIAL_EXECUTOR boolean forceInline = acker != null || getInlineProcess() || Connection.isDominoThread(); if (inlineProcess && !forceInline && isSharedReceiver()) { - // If processing this message notify a serial gateway sender then don't do it inline. + // If processing this message notifies a serial gateway sender then don't do it inline. if (mayNotifySerialGatewaySender(dm)) { inlineProcess = false; } @@ -486,14 +458,13 @@ protected boolean mayNotifySerialGatewaySender(ClusterDistributionManager dm) { * a server to be kicked out */ public static boolean isMembershipMessengerThread() { - String thrname = Thread.currentThread().getName(); - - return isMembershipMessengerThreadName(thrname); + String threadName = Thread.currentThread().getName(); + return isMembershipMessengerThreadName(threadName); } - public static boolean isMembershipMessengerThreadName(String thrname) { - return thrname.startsWith("unicast receiver") || thrname.startsWith("multicast receiver") - || thrname.startsWith("Geode UDP"); + public static boolean isMembershipMessengerThreadName(String threadName) { + return threadName.startsWith("unicast receiver") || threadName.startsWith("multicast receiver") + || threadName.startsWith("Geode UDP"); } @@ -540,22 +511,24 @@ private String getProcId() { */ public void setBreadcrumbsInSender() { if (Breadcrumbs.ENABLED) { - String procId = ""; + String processorId = ""; long pid = getProcessorId(); if (pid != 0) { - procId = "processorId=" + pid; + processorId = "processorId=" + pid; } - if (recipients != null && recipients.size() <= 10) { // set a limit on recipients - Breadcrumbs.setSendSide(procId + " recipients=" + getRecipients()); + // set a limit on recipients + if (recipients != null && recipients.size() <= 10) { + Breadcrumbs.setSendSide(processorId + " recipients=" + getRecipients()); } else { - if (procId.length() > 0) { - Breadcrumbs.setSendSide(procId); + if (processorId.length() > 0) { + Breadcrumbs.setSendSide(processorId); } } - Object evID = getEventID(); - if (evID != null) { - Breadcrumbs.setEventId(evID); + + final Object eventID = getEventID(); + if (eventID != null) { + Breadcrumbs.setEventId(eventID); } } } @@ -581,12 +554,7 @@ public void reset() { */ @Override public void toData(DataOutput out, - SerializationContext context) throws IOException { - // context.getSerializer().writeObject(this.recipients, out); // no need to serialize; filled in - // later - // ((IpAddress)this.sender).toData(out); // no need to serialize; filled in later - // out.writeLong(this.timeStamp); - } + SerializationContext context) throws IOException {} /** * Reads the contents of this DistributionMessage from the given input. Note that @@ -595,13 +563,7 @@ public void toData(DataOutput out, */ @Override public void fromData(DataInput in, - DeserializationContext context) throws IOException, ClassNotFoundException { - - // this.recipients = (Set)context.getDeserializer().readObject(in); // no to deserialize; filled - // in later - // this.sender = DataSerializer.readIpAddress(in); // no to deserialize; filled in later - // this.timeStamp = (long)in.readLong(); - } + DeserializationContext context) throws IOException, ClassNotFoundException {} /** * Returns a timestamp, in nanos, associated with this message. @@ -702,8 +664,7 @@ public String getShortClassName() { @Override public String toString() { - String cname = getShortClassName(); - return cname + '@' + Integer.toHexString(System.identityHashCode(this)) + return getShortClassName() + '@' + Integer.toHexString(System.identityHashCode(this)) + " processorId=" + getProcessorId() + " sender=" + getSender(); } diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java index 5ed2f2c66552..e0b0f91c2461 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/LonerDistributionManager.java @@ -14,10 +14,12 @@ */ package org.apache.geode.distributed.internal; +import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -1221,6 +1223,13 @@ public Set getMembersInSameZone( return Collections.singleton(acceptedMember); } + @Override + public Set getEquivalents(InetAddress in) { + Set value = new HashSet<>(); + value.add(getId().getInetAddress()); + return value; + } + @Override public Set getGroupMembers(String group) { if (getDistributionManagerId().getGroups().contains(group)) { diff --git a/geode-core/src/main/java/org/apache/geode/distributed/internal/ReplyProcessor21.java b/geode-core/src/main/java/org/apache/geode/distributed/internal/ReplyProcessor21.java index 4fdf8f6e4413..e87d657f83a0 100644 --- a/geode-core/src/main/java/org/apache/geode/distributed/internal/ReplyProcessor21.java +++ b/geode-core/src/main/java/org/apache/geode/distributed/internal/ReplyProcessor21.java @@ -245,7 +245,8 @@ public ReplyProcessor21(DistributionManager dm, InternalDistributedMember member * @param dm the DistributionManager to use for messaging and membership * @param initMembers the Set of members this processor wants replies from */ - public ReplyProcessor21(DistributionManager dm, Collection initMembers) { + public ReplyProcessor21(DistributionManager dm, + Collection initMembers) { this(dm, initMembers, null); } @@ -259,7 +260,7 @@ public ReplyProcessor21(DistributionManager dm, Collection initMembers) { * @param initMembers the Set of members this processor wants replies from * @param cancelCriterion optional CancelCriterion to use; will use the dm if null */ - public ReplyProcessor21(DistributionManager dm, Collection initMembers, + public ReplyProcessor21(DistributionManager dm, Collection initMembers, CancelCriterion cancelCriterion) { this(dm, dm.getSystem(), initMembers, cancelCriterion); } @@ -273,7 +274,8 @@ public ReplyProcessor21(DistributionManager dm, Collection initMembers, * @param system the DistributedSystem connection * @param initMembers the Set of members this processor wants replies from */ - public ReplyProcessor21(InternalDistributedSystem system, Collection initMembers) { + public ReplyProcessor21(InternalDistributedSystem system, + Collection initMembers) { this(system.getDistributionManager(), system, initMembers, null); } @@ -288,7 +290,8 @@ public ReplyProcessor21(InternalDistributedSystem system, Collection initMembers * @param cancelCriterion optional CancelCriterion to use; will use the DistributedSystem's * DistributionManager if null */ - public ReplyProcessor21(InternalDistributedSystem system, Collection initMembers, + public ReplyProcessor21(InternalDistributedSystem system, + Collection initMembers, CancelCriterion cancelCriterion) { this(system.getDistributionManager(), system, initMembers, cancelCriterion); } @@ -302,7 +305,7 @@ public ReplyProcessor21(InternalDistributedSystem system, Collection initMembers * @param cancelCriterion optional CancelCriterion to use; will use the dm if null */ private ReplyProcessor21(DistributionManager dm, InternalDistributedSystem system, - Collection initMembers, CancelCriterion cancelCriterion) { + Collection initMembers, CancelCriterion cancelCriterion) { this(dm, system, initMembers, cancelCriterion, true); } @@ -316,7 +319,8 @@ private ReplyProcessor21(DistributionManager dm, InternalDistributedSystem syste * @param cancelCriterion optional CancelCriterion to use; will use the dm if null */ protected ReplyProcessor21(DistributionManager dm, InternalDistributedSystem system, - Collection initMembers, CancelCriterion cancelCriterion, boolean register) { + Collection initMembers, CancelCriterion cancelCriterion, + boolean register) { if (!allowReplyFromSender()) { Assert.assertTrue(initMembers != null, "null initMembers"); Assert.assertTrue(system != null, "null system"); diff --git a/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java b/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java index 5b785e87b430..b682a77dd30b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java +++ b/geode-core/src/main/java/org/apache/geode/internal/InternalDataSerializer.java @@ -70,6 +70,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.TestOnly; import org.apache.geode.CancelException; @@ -365,6 +366,7 @@ public void invokeFromData(Object ds, DataInput in) public static String processIncomingClassName(String nameArg) { final String name = StaticSerialization.processIncomingClassName(nameArg); // using identity comparison on purpose because we are on the hot path + // noinspection StringEquality if (name != nameArg) { return name; } @@ -390,6 +392,7 @@ public static String processOutgoingClassName(final String nameArg, DataOutput o final String name = StaticSerialization.processOutgoingClassName(nameArg); // using identity comparison on purpose because we are on the hot path + // noinspection StringEquality if (name != nameArg) { return name; } @@ -521,7 +524,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { classesToSerializers.put("java.lang.Class", new WellKnownDS() { @Override public boolean toData(Object o, DataOutput out) throws IOException { - Class c = (Class) o; + Class c = (Class) o; if (c.isPrimitive()) { StaticSerialization.writePrimitiveClass(c, out); } else { @@ -759,7 +762,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { classesToSerializers.put("java.util.ArrayList", new WellKnownPdxDS() { @Override public boolean toData(Object o, DataOutput out) throws IOException { - ArrayList list = (ArrayList) o; + ArrayList list = (ArrayList) o; out.writeByte(DSCODE.ARRAY_LIST.toByte()); writeArrayList(list, out); return true; @@ -768,7 +771,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { classesToSerializers.put("java.util.LinkedList", new WellKnownDS() { @Override public boolean toData(Object o, DataOutput out) throws IOException { - LinkedList list = (LinkedList) o; + LinkedList list = (LinkedList) o; out.writeByte(DSCODE.LINKED_LIST.toByte()); writeLinkedList(list, out); return true; @@ -778,7 +781,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { @Override public boolean toData(Object o, DataOutput out) throws IOException { out.writeByte(DSCODE.VECTOR.toByte()); - writeVector((Vector) o, out); + writeVector((Vector) o, out); return true; } }); @@ -786,14 +789,14 @@ public boolean toData(Object o, DataOutput out) throws IOException { @Override public boolean toData(Object o, DataOutput out) throws IOException { out.writeByte(DSCODE.STACK.toByte()); - writeStack((Stack) o, out); + writeStack((Stack) o, out); return true; } }); classesToSerializers.put("java.util.HashSet", new WellKnownPdxDS() { @Override public boolean toData(Object o, DataOutput out) throws IOException { - HashSet list = (HashSet) o; + HashSet list = (HashSet) o; out.writeByte(DSCODE.HASH_SET.toByte()); writeHashSet(list, out); return true; @@ -803,14 +806,14 @@ public boolean toData(Object o, DataOutput out) throws IOException { @Override public boolean toData(Object o, DataOutput out) throws IOException { out.writeByte(DSCODE.LINKED_HASH_SET.toByte()); - writeLinkedHashSet((LinkedHashSet) o, out); + writeLinkedHashSet((LinkedHashSet) o, out); return true; } }); classesToSerializers.put("java.util.HashMap", new WellKnownPdxDS() { @Override public boolean toData(Object o, DataOutput out) throws IOException { - HashMap list = (HashMap) o; + HashMap list = (HashMap) o; out.writeByte(DSCODE.HASH_MAP.toByte()); writeHashMap(list, out); return true; @@ -820,7 +823,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { @Override public boolean toData(Object o, DataOutput out) throws IOException { out.writeByte(DSCODE.IDENTITY_HASH_MAP.toByte()); - writeIdentityHashMap((IdentityHashMap) o, out); + writeIdentityHashMap((IdentityHashMap) o, out); return true; } }); @@ -828,7 +831,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { @Override public boolean toData(Object o, DataOutput out) throws IOException { out.writeByte(DSCODE.HASH_TABLE.toByte()); - writeHashtable((Hashtable) o, out); + writeHashtable((Hashtable) o, out); return true; } }); @@ -845,7 +848,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { @Override public boolean toData(Object o, DataOutput out) throws IOException { out.writeByte(DSCODE.TREE_MAP.toByte()); - writeTreeMap((TreeMap) o, out); + writeTreeMap((TreeMap) o, out); return true; } }); @@ -853,7 +856,7 @@ public boolean toData(Object o, DataOutput out) throws IOException { @Override public boolean toData(Object o, DataOutput out) throws IOException { out.writeByte(DSCODE.TREE_SET.toByte()); - writeTreeSet((TreeSet) o, out); + writeTreeSet((TreeSet) o, out); return true; } }); @@ -994,14 +997,14 @@ public static DataSerializer _register(DataSerializer s, boolean distribute) { throw new IllegalArgumentException( "Cannot create a DataSerializer with id 0."); } - final Class[] classes = s.getSupportedClasses(); + final Class[] classes = s.getSupportedClasses(); if (classes == null || classes.length == 0) { final String msg = "The DataSerializer %s has no supported classes. It's getSupportedClasses method must return at least one class"; throw new IllegalArgumentException(String.format(msg, s.getClass().getName())); } - for (Class aClass : classes) { + for (Class aClass : classes) { if (aClass == null) { final String msg = "The DataSerializer getSupportedClasses method for %s returned an array that contained a null element."; @@ -1257,8 +1260,8 @@ public static void unregister(int id) { } if (o instanceof DataSerializer) { DataSerializer s = (DataSerializer) o; - Class[] classes = s.getSupportedClasses(); - for (Class aClass : classes) { + Class[] classes = s.getSupportedClasses(); + for (Class aClass : classes) { classesToSerializers.remove(aClass.getName(), s); supportedClassesToHolders.remove(aClass.getName()); } @@ -1282,7 +1285,7 @@ public static void reinitialize() { * null} is returned. Remember that it is okay to return {@code null} in this case. This method is * invoked when writing an object. If a serializer isn't available, then its the user's fault. */ - private static DataSerializer getSerializer(Class c) { + private static DataSerializer getSerializer(Class c) { DataSerializer ds = classesToSerializers.get(c.getName()); if (ds == null) { SerializerAttributesHolder sah = supportedClassesToHolders.get(c.getName()); @@ -1293,7 +1296,7 @@ private static DataSerializer getSerializer(Class c) { DataSerializer serializer = register(dsClass, false); dsClassesToHolders.remove(dsClass.getName()); idsToHolders.remove(serializer.getId()); - for (Class clazz : serializer.getSupportedClasses()) { + for (Class clazz : serializer.getSupportedClasses()) { supportedClassesToHolders.remove(clazz.getName()); } return serializer; @@ -1338,7 +1341,7 @@ public static DataSerializer getSerializer(int id) { DataSerializer ds = register(dsClass, false); dsClassesToHolders.remove(sah.getClassName()); idsToHolders.remove(id); - for (Class clazz : ds.getSupportedClasses()) { + for (Class clazz : ds.getSupportedClasses()) { supportedClassesToHolders.remove(clazz.getName()); } return ds; @@ -1383,7 +1386,7 @@ public static DataSerializer[] getSerializers() { coll.add(ds); iterator.remove(); idsToHolders.remove(ds.getId()); - for (Class clazz : ds.getSupportedClasses()) { + for (Class clazz : ds.getSupportedClasses()) { supportedClassesToHolders.remove(clazz.getName()); } } catch (ClassNotFoundException ignored) { @@ -1763,6 +1766,50 @@ public static void checkIn(DataInput in) { } } + /** + * Writes a {@link List} to a {@link DataOutput}. + *

+ * This method is internal because its semantics (that is, its ability to write any kind of + * {@link List}) are different from the {@code write}XXX methods of the external + * {@link DataSerializer}. + * + * @throws IOException A problem occurs while writing to {@code out} + * @see #readList(DataInput) + */ + public static void writeList(final List list, final DataOutput out) throws IOException { + checkOut(out); + + if (list == null) { + if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) { + logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing null List"); + } + writeArrayLength(-1, out); + } else { + final int size = list.size(); + if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) { + logger.trace(LogMarker.SERIALIZER_VERBOSE, "Writing List with {} elements: {}", size, + list); + } + writeArrayLength(size, out); + for (Object element : list) { + writeObject(element, out); + } + } + } + + /** + * Reads an {@link List} from a {@link DataInput}. + * + * @throws IOException A problem occurs while reading from in + * @throws ClassNotFoundException The class of one of the {@link List} elements cannot be found. + * + * @see #writeList(List, DataOutput) + */ + public static List readList(final DataInput in) + throws IOException, ClassNotFoundException { + return readArrayList(in); + } + /** * Writes a {@code Set} to a {@code DataOutput}. *

@@ -1804,7 +1851,7 @@ public static void writeSet(Collection set, DataOutput out) throws IOExceptio * @see #writeSet * @since GemFire 4.0 */ - public static Set readSet(DataInput in) throws IOException, ClassNotFoundException { + public static Set readSet(DataInput in) throws IOException, ClassNotFoundException { return readHashSet(in); } @@ -1815,15 +1862,14 @@ public static Set readSet(DataInput in) throws IOException, ClassNotFoundExcepti * @param hasLongIDs if false, write only ints, not longs * @param out the output stream */ - public static void writeSetOfLongs(Set set, boolean hasLongIDs, DataOutput out) + public static void writeSetOfLongs(Set set, boolean hasLongIDs, DataOutput out) throws IOException { if (set == null) { out.writeInt(-1); } else { out.writeInt(set.size()); out.writeBoolean(hasLongIDs); - for (Object aSet : set) { - Long l = (Long) aSet; + for (Long l : set) { if (hasLongIDs) { out.writeLong(l); } else { @@ -2042,7 +2088,7 @@ public static void basicWriteObject(Object o, DataOutput out, boolean ensurePdxC } checkPdxCompatible(o, ensurePdxCompatibility); - Class c = o.getClass(); + Class c = o.getClass(); // Is "c" a user class registered with an Instantiator? int classId = InternalInstantiator.getClassId(c); if (classId != 0) { @@ -2181,14 +2227,15 @@ public static void writeSerializableObject(Object o, DataOutput out) throws IOEx } else { final DataOutput out2 = out; - stream = new OutputStream() { + stream = new OutputStream() { // lgtm [java/inefficient-output-stream] + // lgtm false-positive - https://github.com/github/codeql/issues/7610 @Override public void write(int b) throws IOException { out2.write(b); } @Override - public void write(byte[] b, int off, int len) throws IOException { + public void write(byte @NotNull [] b, int off, int len) throws IOException { out2.write(b, off, len); } }; @@ -2517,17 +2564,13 @@ public static Object basicReadObject(final DataInput in) checkIn(in); // Read the header byte - byte header = in.readByte(); - DSCODE headerDSCode = DscodeHelper.toDSCODE(header); + final byte header = in.readByte(); + final DSCODE headerDSCode = DscodeHelper.toDSCODE(header); if (logger.isTraceEnabled(LogMarker.SERIALIZER_VERBOSE)) { logger.trace(LogMarker.SERIALIZER_VERBOSE, "basicReadObject: header={}", header); } - if (headerDSCode == null) { - throw new IOException("Unknown header byte: " + header); - } - switch (headerDSCode) { case DS_FIXED_ID_BYTE: return dsfidFactory.create(in.readByte(), in); @@ -2953,15 +2996,15 @@ public static int getLoadedDataSerializers() { return idsToSerializers.size(); } - public static Map getDsClassesToHoldersMap() { + public static Map getDsClassesToHoldersMap() { return dsClassesToHolders; } - public static Map getIdsToHoldersMap() { + public static Map getIdsToHoldersMap() { return idsToHolders; } - public static Map getSupportedClassesToHoldersMap() { + public static Map getSupportedClassesToHoldersMap() { return supportedClassesToHolders; } @@ -3252,10 +3295,11 @@ public static class SerializerAttributesHolder { SerializerAttributesHolder() {} - SerializerAttributesHolder(String name, EventID event, ClientProxyMembershipID proxy, int id) { - className = name; - eventId = event; - proxyId = proxy; + SerializerAttributesHolder(String className, EventID eventId, ClientProxyMembershipID proxyId, + int id) { + this.className = className; + this.eventId = eventId; + this.proxyId = proxyId; this.id = id; } @@ -3557,7 +3601,7 @@ private static class DSObjectInputStream extends ObjectInputStream { } @Override - protected Class resolveClass(ObjectStreamClass desc) + protected Class resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException { String className = desc.getName(); @@ -3574,16 +3618,16 @@ protected Class resolveClass(ObjectStreamClass desc) } @Override - protected Class resolveProxyClass(String[] interfaces) throws ClassNotFoundException { + protected Class resolveProxyClass(String[] interfaces) throws ClassNotFoundException { ClassLoader nonPublicLoader = null; boolean hasNonPublicInterface = false; // define proxy in class loader of non-public // interface(s), if any - Class[] classObjs = new Class[interfaces.length]; + Class[] classObjs = new Class[interfaces.length]; for (int i = 0; i < interfaces.length; i++) { - Class cl = getCachedClass(interfaces[i]); + Class cl = getCachedClass(interfaces[i]); if ((cl.getModifiers() & Modifier.PUBLIC) == 0) { if (hasNonPublicInterface) { if (nonPublicLoader != cl.getClassLoader()) { @@ -3625,7 +3669,7 @@ public int getId() { } @Override - public Class[] getSupportedClasses() { + public Class[] getSupportedClasses() { // illegal for a customer to return null but we can do it since we never register // this serializer. return null; diff --git a/geode-core/src/main/java/org/apache/geode/internal/JvmSizeUtils.java b/geode-core/src/main/java/org/apache/geode/internal/JvmSizeUtils.java index 673f1a0c55bb..baacbf01921a 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/JvmSizeUtils.java +++ b/geode-core/src/main/java/org/apache/geode/internal/JvmSizeUtils.java @@ -21,6 +21,7 @@ import org.apache.geode.annotations.Immutable; import org.apache.geode.internal.lang.SystemUtils; +import org.apache.geode.internal.size.WellKnownClassSizer; import org.apache.geode.unsafe.internal.sun.misc.Unsafe; /** @@ -186,4 +187,12 @@ public static int memoryOverhead(Object[] objectArray) { public static int memoryOverhead(Class clazz) { return (int) sizeof(clazz); } + + /** + * Returns the amount of memory used to store the given + * String instance. + */ + public static int memoryOverhead(String string) { + return WellKnownClassSizer.sizeof(string); + } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java index d39e2fbffe6c..32efb209bc8b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/AbstractRegion.java @@ -15,7 +15,6 @@ package org.apache.geode.internal.cache; import static org.apache.geode.internal.cache.LocalRegion.InitializationLevel.ANY_INIT; -import static org.apache.geode.internal.statistics.StatisticsClockFactory.disabledClock; import java.io.DataInput; import java.io.DataOutput; @@ -36,6 +35,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.DataSerializer; import org.apache.geode.StatisticsFactory; @@ -165,7 +165,7 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato protected float loadFactor; - private DataPolicy dataPolicy; + private final @NotNull DataPolicy dataPolicy; protected int regionIdleTimeout; @@ -178,13 +178,13 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato @Immutable public static final Scope DEFAULT_SCOPE = Scope.DISTRIBUTED_NO_ACK; - protected Scope scope = DEFAULT_SCOPE; + protected final Scope scope; protected boolean statisticsEnabled; protected boolean isLockGrantor; - private boolean mcastEnabled; + private final boolean mcastEnabled; protected int concurrencyLevel; @@ -218,7 +218,7 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato */ protected boolean offHeap; - private boolean cloningEnable = false; + private boolean cloningEnable; private DiskWriteAttributes diskWriteAttributes; @@ -226,7 +226,7 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato protected int[] diskSizes; protected String diskStoreName; protected boolean isDiskSynchronous; - private boolean indexMaintenanceSynchronous = false; + private final boolean indexMaintenanceSynchronous; protected volatile IndexManager indexManager = null; @@ -267,7 +267,7 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato private final AtomicLong missCount = new AtomicLong(); - protected String poolName; + protected final String poolName; protected Compressor compressor; @@ -278,14 +278,15 @@ public abstract class AbstractRegion implements InternalRegion, AttributesMutato private final ExtensionPoint> extensionPoint = new SimpleExtensionPoint>(this, this); - protected final InternalCache cache; + protected final @NotNull InternalCache cache; private final PoolFinder poolFinder; private final StatisticsClock statisticsClock; /** Creates a new instance of AbstractRegion */ - protected AbstractRegion(InternalCache cache, RegionAttributes attrs, String regionName, + protected AbstractRegion(@NotNull InternalCache cache, RegionAttributes attrs, + String regionName, InternalRegionArguments internalRegionArgs, PoolFinder poolFinder, StatisticsClock statisticsClock) { this.poolFinder = poolFinder; @@ -397,18 +398,6 @@ && supportsConcurrencyChecks()) { } } - @VisibleForTesting - AbstractRegion() { - statisticsClock = disabledClock(); - cache = null; - serialNumber = 0; - isPdxTypesRegion = false; - lastAccessedTime = new AtomicLong(0); - lastModifiedTime = new AtomicLong(0); - evictionAttributes = new EvictionAttributesImpl(); - poolFinder = (a) -> null; - } - /** * configure this region to ignore or not ignore in-progress JTA transactions. Setting this to * true will cause cache operations to no longer notice JTA transactions. The default setting is @@ -680,7 +669,7 @@ public String getPoolName() { } @Override - public DataPolicy getDataPolicy() { + public @NotNull DataPolicy getDataPolicy() { return dataPolicy; } @@ -1778,7 +1767,7 @@ public int getSerialNumber() { } @Override - public InternalCache getCache() { + public @NotNull InternalCache getCache() { return cache; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java index e6dcd3fb8fcd..76b0124d0fe9 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketAdvisor.java @@ -246,7 +246,7 @@ public Lock getPrimaryMoveReadLock() { } /** - * Try to lock the primary bucket to make sure no operation is on-going at current bucket. + * Try to lock the primary bucket to make sure no operation is ongoing at current bucket. * */ void tryLockIfPrimary() { @@ -304,17 +304,15 @@ public boolean deposePrimary() { private void deposePrimaryForColocatedChildren() { boolean deposedChildPrimaries = true; List colocatedChildPRs = ColocationHelper.getColocatedChildRegions(pRegion); - if (colocatedChildPRs != null) { - for (PartitionedRegion pr : colocatedChildPRs) { - Bucket b = pr.getRegionAdvisor().getBucket(getBucket().getId()); - if (b != null) { - BucketAdvisor ba = b.getBucketAdvisor(); - deposedChildPrimaries = ba.deposePrimary() && deposedChildPrimaries; - if (b instanceof BucketRegionQueue) { - BucketRegionQueue brq = (BucketRegionQueue) b; - brq.decQueueSize(brq.size()); - brq.incSecondaryQueueSize(brq.size()); - } + for (PartitionedRegion pr : colocatedChildPRs) { + Bucket b = pr.getRegionAdvisor().getBucket(getBucket().getId()); + if (b != null) { + BucketAdvisor ba = b.getBucketAdvisor(); + deposedChildPrimaries = ba.deposePrimary() && deposedChildPrimaries; + if (b instanceof BucketRegionQueue) { + BucketRegionQueue brq = (BucketRegionQueue) b; + brq.decQueueSize(brq.size()); + brq.incSecondaryQueueSize(brq.size()); } } } @@ -376,7 +374,6 @@ public InternalDistributedMember getPreferredNode() { * */ private Profile selectNotInitializingProfile(Profile[] inProfiles) { - int index = 0; int offset = 0; if (inProfiles.length > 1) { // Pick random offset. @@ -384,7 +381,7 @@ private Profile selectNotInitializingProfile(Profile[] inProfiles) { } for (int i = 0; i < inProfiles.length; i++) { - index = (offset + i) % inProfiles.length; + int index = (offset + i) % inProfiles.length; BucketProfile bp = (BucketProfile) inProfiles[index]; if (!bp.isInitializing) { return inProfiles[index]; @@ -656,6 +653,7 @@ public Object[] toArray() { return backingSet.toArray(); } + @SuppressWarnings("SuspiciousToArrayCall") @Override public T[] toArray(T[] contents) { return backingSet.toArray(contents); @@ -1276,40 +1274,38 @@ private DistributedMemberLock getPrimaryLock(boolean createDLS) { private void acquirePrimaryRecursivelyForColocated() { final List colocatedWithList = ColocationHelper.getColocatedChildRegions(regionAdvisor.getPartitionedRegion()); - if (colocatedWithList != null) { - for (PartitionedRegion childPR : colocatedWithList) { - Bucket b = childPR.getRegionAdvisor().getBucket(getBucket().getId()); - BucketAdvisor childBA = b.getBucketAdvisor(); - Assert.assertHoldsLock(childBA, false); - boolean acquireForChild = false; + for (PartitionedRegion childPR : colocatedWithList) { + Bucket b = childPR.getRegionAdvisor().getBucket(getBucket().getId()); + BucketAdvisor childBA = b.getBucketAdvisor(); + Assert.assertHoldsLock(childBA, false); + boolean acquireForChild = false; - if (logger.isDebugEnabled()) { - logger.debug( - "BucketAdvisor.acquirePrimaryRecursivelyForColocated: about to take lock for bucket: {} of PR: {} with isHosting={}", - getBucket().getId(), childPR.getFullPath(), childBA.isHosting()); - } - childBA.primaryMoveWriteLock.lock(); - try { - if (childBA.isHosting()) { - if (isPrimary()) { - if (!childBA.isPrimary()) { - childBA.setVolunteering(); - boolean acquired = childBA.acquiredPrimaryLock(); - acquireForChild = true; - if (acquired && pRegion.isFixedPartitionedRegion()) { - childBA.acquirePrimaryForRestOfTheBucket(); - } - } else { - acquireForChild = true; + if (logger.isDebugEnabled()) { + logger.debug( + "BucketAdvisor.acquirePrimaryRecursivelyForColocated: about to take lock for bucket: {} of PR: {} with isHosting={}", + getBucket().getId(), childPR.getFullPath(), childBA.isHosting()); + } + childBA.primaryMoveWriteLock.lock(); + try { + if (childBA.isHosting()) { + if (isPrimary()) { + if (!childBA.isPrimary()) { + childBA.setVolunteering(); + boolean acquired = childBA.acquiredPrimaryLock(); + acquireForChild = true; + if (acquired && pRegion.isFixedPartitionedRegion()) { + childBA.acquirePrimaryForRestOfTheBucket(); } + } else { + acquireForChild = true; } - } // if isHosting - if (acquireForChild) { - childBA.acquirePrimaryRecursivelyForColocated(); } - } finally { - childBA.primaryMoveWriteLock.unlock(); + } // if isHosting + if (acquireForChild) { + childBA.acquirePrimaryRecursivelyForColocated(); } + } finally { + childBA.primaryMoveWriteLock.unlock(); } } } @@ -1344,7 +1340,7 @@ private void acquirePrimaryForRestOfTheBucket() { /** * Sets volunteering to true. Returns true if the state of volunteering was changed. Returns false - * if voluntering was already equal to true. Caller should do nothing if false is returned. + * if volunteering was already equal to true. Caller should do nothing if false is returned. */ private boolean setVolunteering() { synchronized (this) { @@ -1419,7 +1415,7 @@ private InternalDistributedMember waitForPrimaryMember(long timeout) { // log a warning; loggedWarning = true; } else { - timeLeft = timeLeft > timeUntilWarning ? timeUntilWarning : timeLeft; + timeLeft = Math.min(timeLeft, timeUntilWarning); } } wait(timeLeft); // spurious wakeup ok @@ -1438,7 +1434,7 @@ private InternalDistributedMember waitForPrimaryMember(long timeout) { } /** - * How long to wait, in millisecs, for redundant buckets to exist + * How long to wait, in millisecond, for redundant buckets to exist */ private static final long BUCKET_REDUNDANCY_WAIT = 15000L; // 15 seconds @@ -1625,7 +1621,7 @@ protected Profile instantiateProfile(InternalDistributedMember memberId, int ver if (!pRegion.isShadowPR()) { Set serverLocations = getBucketServerLocations(version); if (!serverLocations.isEmpty()) { - return new ServerBucketProfile(memberId, version, getBucket(), (HashSet) serverLocations); + return new ServerBucketProfile(memberId, version, getBucket(), serverLocations); } } return new BucketProfile(memberId, version, getBucket()); @@ -1871,23 +1867,13 @@ private boolean requestPrimaryState(byte requestedState) { case NO_PRIMARY_NOT_HOSTING: switch (requestedState) { case NO_PRIMARY_NOT_HOSTING: - // race condition ok, return false - return false; - case NO_PRIMARY_HOSTING: - primaryState = requestedState; - break; - case OTHER_PRIMARY_NOT_HOSTING: - primaryState = requestedState; - break; - case OTHER_PRIMARY_HOSTING: - primaryState = requestedState; - break; case BECOMING_HOSTING: - // race condition during close is ok, return false - return false; case VOLUNTEERING_HOSTING: // race condition during close is ok, return false return false; + case NO_PRIMARY_HOSTING: + case OTHER_PRIMARY_NOT_HOSTING: + case OTHER_PRIMARY_HOSTING: case CLOSED: primaryState = requestedState; break; @@ -1899,32 +1885,20 @@ private boolean requestPrimaryState(byte requestedState) { case NO_PRIMARY_HOSTING: switch (requestedState) { case NO_PRIMARY_NOT_HOSTING: + case OTHER_PRIMARY_HOSTING: + case CLOSED: primaryState = requestedState; break; - // case OTHER_PRIMARY_NOT_HOSTING: -- enable for bucket migration - // this.primaryState = requestedState; - // break; case NO_PRIMARY_HOSTING: // race condition ok, return false return false; case VOLUNTEERING_HOSTING: - primaryState = requestedState; { - PartitionedRegionStats stats = getPartitionedRegionStats(); - stats.putStartTime(this, stats.startVolunteering()); - } - break; case BECOMING_HOSTING: primaryState = requestedState; { PartitionedRegionStats stats = getPartitionedRegionStats(); stats.putStartTime(this, stats.startVolunteering()); } break; - case OTHER_PRIMARY_HOSTING: - primaryState = requestedState; - break; - case CLOSED: - primaryState = requestedState; - break; default: throw new IllegalStateException(String.format("Cannot change from %s to %s", primaryStateToString(), @@ -1934,23 +1908,17 @@ private boolean requestPrimaryState(byte requestedState) { case OTHER_PRIMARY_NOT_HOSTING: switch (requestedState) { case NO_PRIMARY_NOT_HOSTING: + case OTHER_PRIMARY_HOSTING: + case CLOSED: primaryState = requestedState; break; case OTHER_PRIMARY_NOT_HOSTING: // race condition ok, return false return false; - case OTHER_PRIMARY_HOSTING: - primaryState = requestedState; - break; case BECOMING_HOSTING: - // race condition during close is ok, return false - return false; case VOLUNTEERING_HOSTING: // race condition during close is ok, return false return false; - case CLOSED: - primaryState = requestedState; - break; default: throw new IllegalStateException(String.format("Cannot change from %s to %s", primaryStateToString(), @@ -1959,22 +1927,19 @@ private boolean requestPrimaryState(byte requestedState) { break; case OTHER_PRIMARY_HOSTING: switch (requestedState) { - // case NO_PRIMARY_NOT_HOSTING: -- enable for bucket migration - // this.primaryState = requestedState; - // break; case OTHER_PRIMARY_NOT_HOSTING: + case NO_PRIMARY_HOSTING: + case IS_PRIMARY_HOSTING: + case CLOSED: + // race condition ok, probably race in HA where other becomes + // primary and immediately leaves while we have try-lock message + // en-route to grantor // May occur when setHosting(false) is called primaryState = requestedState; break; case OTHER_PRIMARY_HOSTING: // race condition ok, return false return false; - case NO_PRIMARY_HOSTING: - primaryState = requestedState; - break; - case CLOSED: - primaryState = requestedState; - break; case VOLUNTEERING_HOSTING: // race condition ok, return false to abort volunteering return false; @@ -1984,12 +1949,6 @@ private boolean requestPrimaryState(byte requestedState) { stats.putStartTime(this, stats.startVolunteering()); } break; - case IS_PRIMARY_HOSTING: - // race condition ok, probably race in HA where other becomes - // primary and immediately leaves while we have try-lock message - // enroute to grantor - primaryState = requestedState; - break; default: throw new IllegalStateException(String.format("Cannot change from %s to %s", primaryStateToString(), @@ -1999,15 +1958,10 @@ private boolean requestPrimaryState(byte requestedState) { case VOLUNTEERING_HOSTING: switch (requestedState) { case NO_PRIMARY_NOT_HOSTING: - // May occur when setHosting(false) is called - primaryState = requestedState; { - PartitionedRegionStats stats = getPartitionedRegionStats(); - stats.endVolunteeringClosed(stats.removeStartTime(this)); - } - break; + case CLOSED: case OTHER_PRIMARY_NOT_HOSTING: - // May occur when setHosting(false) is called // Profile update for other primary may have slipped in + // May occur when setHosting(false) is called primaryState = requestedState; { PartitionedRegionStats stats = getPartitionedRegionStats(); stats.endVolunteeringClosed(stats.removeStartTime(this)); @@ -2030,17 +1984,9 @@ private boolean requestPrimaryState(byte requestedState) { } break; case VOLUNTEERING_HOSTING: - // race condition ok, return false to abort volunteering - return false; case BECOMING_HOSTING: // race condition ok, return false to abort volunteering return false; - case CLOSED: - primaryState = requestedState; { - PartitionedRegionStats stats = getPartitionedRegionStats(); - stats.endVolunteeringClosed(stats.removeStartTime(this)); - } - break; default: throw new IllegalStateException(String.format("Cannot change from %s to %s", primaryStateToString(), @@ -2050,15 +1996,10 @@ private boolean requestPrimaryState(byte requestedState) { case BECOMING_HOSTING: switch (requestedState) { case NO_PRIMARY_NOT_HOSTING: - // May occur when setHosting(false) is called - primaryState = requestedState; { - PartitionedRegionStats stats = getPartitionedRegionStats(); - stats.endVolunteeringClosed(stats.removeStartTime(this)); - } - break; + case CLOSED: case OTHER_PRIMARY_NOT_HOSTING: - // May occur when setHosting(false) is called // Profile update for other primary may have slipped in + // May occur when setHosting(false) is called primaryState = requestedState; { PartitionedRegionStats stats = getPartitionedRegionStats(); stats.endVolunteeringClosed(stats.removeStartTime(this)); @@ -2077,17 +2018,9 @@ private boolean requestPrimaryState(byte requestedState) { case OTHER_PRIMARY_HOSTING: return false; case VOLUNTEERING_HOSTING: - // race condition ok, return false to abort volunteering - return false; case BECOMING_HOSTING: // race condition ok, return false to abort volunteering return false; - case CLOSED: - primaryState = requestedState; { - PartitionedRegionStats stats = getPartitionedRegionStats(); - stats.endVolunteeringClosed(stats.removeStartTime(this)); - } - break; default: throw new IllegalStateException(String.format("Cannot change from %s to %s", primaryStateToString(), @@ -2097,30 +2030,18 @@ private boolean requestPrimaryState(byte requestedState) { case IS_PRIMARY_HOSTING: switch (requestedState) { case NO_PRIMARY_HOSTING: - // rebalancing must have moved the primary - changeFromPrimaryTo(requestedState); - break; - // case OTHER_PRIMARY_HOSTING: -- enable for bucket migration - // // rebalancing must have moved the primary - // changeFromPrimaryTo(requestedState); - // break; + case CLOSED: + case NO_PRIMARY_NOT_HOSTING: case OTHER_PRIMARY_NOT_HOSTING: // rebalancing must have moved the primary and primary - changeFromPrimaryTo(requestedState); - break; - case NO_PRIMARY_NOT_HOSTING: // May occur when setHosting(false) is called due to closing + // rebalancing must have moved the primary changeFromPrimaryTo(requestedState); break; case VOLUNTEERING_HOSTING: - // race condition ok, return false to abort volunteering - return false; case BECOMING_HOSTING: // race condition ok, return false to abort volunteering return false; - case CLOSED: - changeFromPrimaryTo(requestedState); - break; default: throw new IllegalStateException("Cannot change from " + primaryStateToString() + " to " + primaryStateToString(requestedState)); @@ -2135,8 +2056,6 @@ private boolean requestPrimaryState(byte requestedState) { e); break; case VOLUNTEERING_HOSTING: - // race condition ok, return false to abort volunteering - return false; case BECOMING_HOSTING: // race condition ok, return false to abort volunteering return false; @@ -2165,8 +2084,8 @@ private void changeFromPrimaryTo(byte requestedState) { } @Override - public Set adviseDestroyRegion() { - // fix for bug 37604 - tell all owners of the pr that the bucket is being + public Set adviseDestroyRegion() { + // tell all owners of the pr that the bucket is being // destroyed. This is needed when bucket cleanup is performed return regionAdvisor.adviseAllPRNodes(); } @@ -2193,7 +2112,7 @@ private Set adviseNotInitialized() { @Override - public Set adviseNetWrite() { + public Set adviseNetWrite() { return regionAdvisor.adviseNetWrite(); } @@ -2236,8 +2155,8 @@ public static class BucketProfile extends CacheProfile { /** True if this profile's member is the primary for this bucket */ public boolean isPrimary; /** - * True if the profile is coming from a real BucketRegion acceptible states hosting = false, - * init = true hosting = true, init = false hosting = false, init = false unacceptible states + * True if the profile is coming from a real BucketRegion acceptable states hosting = false, + * init = true hosting = true, init = false hosting = false, init = false unacceptable states * hosting = true, init = true */ public boolean isHosting; @@ -2304,7 +2223,7 @@ public static class ServerBucketProfile extends BucketProfile { public ServerBucketProfile() {} public ServerBucketProfile(InternalDistributedMember memberId, int version, Bucket bucket, - HashSet serverLocations) { + Set serverLocations) { super(memberId, version, bucket); bucketId = bucket.getId(); bucketServerLocations = serverLocations; @@ -2457,7 +2376,7 @@ void doVolunteerForPrimary() { } DistributedMemberLock thePrimaryLock = null; while (continueVolunteering()) { - // Fix for 41865 - We can't send out profiles while holding the + // We can't send out profiles while holding the // sync on this advisor, because that will cause a deadlock. // Holding the primaryMoveWriteLock here instead prevents any // operations from being performed on this primary until the child regions @@ -2470,7 +2389,7 @@ void doVolunteerForPrimary() { // Check our parent advisor and set our state // accordingly if (parentBA != null) { - // Fix for 44350 - we don't want to get a primary move lock on + // we don't want to get a primary move lock on // the advisor, because that might deadlock with a user thread. // However, since all depose/elect operations on the parent bucket // cascade to the child bucket and get the child bucket move lock, @@ -2552,12 +2471,6 @@ void doVolunteerForPrimary() { dlsDestroyed); } endVolunteering(); - // if (isPrimary()) { - // Bucket bucket = getBucket(); - // if (bucket instanceof ProxyBucketRegion) { - // bucket = ((ProxyBucketRegion)bucket).getHostedBucketRegion(); - // } - // } } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketDump.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketDump.java index f9763ef6c7db..0f830780d82f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketDump.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketDump.java @@ -18,22 +18,22 @@ import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; +import java.util.Objects; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.cache.versions.RegionVersionVector; +import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionTag; /** * This class is used for getting the contents of buckets and then optionally compare them. It may - * contain the region version vector for the bucket as well as all of the entries. - * - * + * contain the region version vector for the bucket as well as all the entries. */ public class BucketDump { /** * The version vector for this bucket */ - private final RegionVersionVector rvv; + private final RegionVersionVector> rvv; /** * The contents of the bucket @@ -43,14 +43,15 @@ public class BucketDump { /** * The contents of the bucket */ - private final Map versions; + private final Map> versions; private final int bucketId; private final InternalDistributedMember member; - public BucketDump(int bucketId, InternalDistributedMember member, RegionVersionVector rvv, - Map values, Map versions) { + public BucketDump(int bucketId, InternalDistributedMember member, + RegionVersionVector> rvv, + Map values, Map> versions) { this.bucketId = bucketId; this.member = member; this.rvv = rvv; @@ -58,7 +59,7 @@ public BucketDump(int bucketId, InternalDistributedMember member, RegionVersionV this.versions = versions; } - public RegionVersionVector getRvv() { + public RegionVersionVector> getRvv() { return rvv; } @@ -67,7 +68,7 @@ public Map getValues() { return values; } - public Map getVersions() { + public Map> getVersions() { return versions; } @@ -96,10 +97,6 @@ public InternalDistributedMember getMember() { @Override public String toString() { - // int sz; - // synchronized(this) { - // sz = this.size(); - // } return "Bucket id = " + bucketId + " from member = " + member + ": " + super.toString(); } @@ -131,9 +128,6 @@ public boolean equals(Object obj) { } else if (!values.equals(other.values)) { return false; } - if (versions == null) { - return other.versions == null; - } else - return versions.equals(other.versions); + return Objects.equals(versions, other.versions); } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java index ffb110911570..32a301952d40 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketRegion.java @@ -191,10 +191,9 @@ public String toString() { public Object getDeserialized(boolean copyOnRead) { if (isValueByteArray()) { if (copyOnRead) { - // TODO move this code to CopyHelper.copy? byte[] src = (byte[]) rawValue; byte[] dest = new byte[src.length]; - System.arraycopy(rawValue, 0, dest, 0, dest.length); + System.arraycopy(src, 0, dest, 0, dest.length); return dest; } else { return rawValue; @@ -235,7 +234,7 @@ AtomicLong5 getEventSeqNum() { return eventSeqNum; } - public BucketRegion(String regionName, RegionAttributes attrs, LocalRegion parentRegion, + public BucketRegion(String regionName, RegionAttributes attrs, LocalRegion parentRegion, InternalCache cache, InternalRegionArguments internalRegionArgs, StatisticsClock statisticsClock) { super(regionName, attrs, parentRegion, cache, internalRegionArgs, statisticsClock); @@ -694,7 +693,7 @@ public long basicPutPart2(EntryEventImpl event, RegionEntry entry, boolean isIni if (!event.isOriginRemote()) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { boolean eventHasDelta = event.getDeltaBytes() != null; - VersionTag v = entry.generateVersionTag(null, eventHasDelta, this, event); + VersionTag v = entry.generateVersionTag(null, eventHasDelta, this, event); if (v != null) { if (logger.isDebugEnabled()) { logger.debug("generated version tag {} in region {}", v, getName()); @@ -981,7 +980,7 @@ void basicInvalidatePart2(final RegionEntry regionEntry, final EntryEventImpl ev try { if (!event.isOriginRemote()) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { - VersionTag v = regionEntry.generateVersionTag(null, false, this, event); + VersionTag v = regionEntry.generateVersionTag(null, false, this, event); if (logger.isDebugEnabled() && v != null) { logger.debug("generated version tag {} in region {}", v, getName()); } @@ -1267,7 +1266,7 @@ public void basicDestroyBeforeRemoval(RegionEntry entry, EntryEventImpl event) { && !(event.isExpiration() && isEntryEvictDestroyEnabled())) { if (event.getVersionTag() == null || event.getVersionTag().isGatewayTag()) { - VersionTag v = entry.generateVersionTag(null, false, this, event); + VersionTag v = entry.generateVersionTag(null, false, this, event); if (logger.isDebugEnabled() && v != null) { logger.debug("generated version tag {} in region {}", v, getName()); } @@ -1491,7 +1490,7 @@ private RawValue getSerialized(Object key, boolean updateStats, boolean doNotLoc } } if (clientEvent != null) { - VersionStamp stamp = re.getVersionStamp(); + VersionStamp stamp = re.getVersionStamp(); if (stamp != null) { clientEvent.setVersionTag(stamp.asVersionTag()); } @@ -1807,10 +1806,10 @@ && getEventTracker().isInitialImageProvider(event.getDistributedMember())) { * @param filterRoutingInfo routing information for all members having the region * @param processor the reply processor, or null if there isn't one */ - void performAdjunctMessaging(EntryEventImpl event, Set cacheOpRecipients, - Set adjunctRecipients, FilterRoutingInfo filterRoutingInfo, - DirectReplyProcessor processor, - boolean calculateDelta, boolean sendDeltaWithFullValue) { + void performAdjunctMessaging(EntryEventImpl event, + Set cacheOpRecipients, + Set adjunctRecipients, FilterRoutingInfo filterRoutingInfo, + DirectReplyProcessor processor, boolean calculateDelta, boolean sendDeltaWithFullValue) { PartitionMessage msg = event.getPartitionMessage(); if (calculateDelta) { @@ -1882,13 +1881,11 @@ private void setDeltaIfNeeded(EntryEventImpl event) { * members that should be attached to the operation's reply processor (if any) * * @param dpao DistributedPutAllOperation object for PutAllMessage - * @param cacheOpRecipients set of receiver which got cacheUpdateOperation. * @param adjunctRecipients recipients that must unconditionally get the event - * @param filterRoutingInfo routing information for all members having the region * @param processor the reply processor, or null if there isn't one */ - void performPutAllAdjunctMessaging(DistributedPutAllOperation dpao, Set cacheOpRecipients, - Set adjunctRecipients, FilterRoutingInfo filterRoutingInfo, + void performPutAllAdjunctMessaging(DistributedPutAllOperation dpao, + Set adjunctRecipients, DirectReplyProcessor processor) { PutAllPRMessage prMsg = dpao.createPRMessagesNotifyOnly(getId()); prMsg.initMessage(partitionedRegion, adjunctRecipients, true, processor); @@ -1901,14 +1898,11 @@ void performPutAllAdjunctMessaging(DistributedPutAllOperation dpao, Set cacheOpR * members that should be attached to the operation's reply processor (if any) * * @param op DistributedRemoveAllOperation object for RemoveAllMessage - * @param cacheOpRecipients set of receiver which got cacheUpdateOperation. * @param adjunctRecipients recipients that must unconditionally get the event - * @param filterRoutingInfo routing information for all members having the region * @param processor the reply processor, or null if there isn't one */ void performRemoveAllAdjunctMessaging(DistributedRemoveAllOperation op, - Set cacheOpRecipients, Set adjunctRecipients, - FilterRoutingInfo filterRoutingInfo, + Set adjunctRecipients, DirectReplyProcessor processor) { // create a RemoveAllPRMessage out of RemoveAllMessage to send to adjunct nodes RemoveAllPRMessage prMsg = op.createPRMessagesNotifyOnly(getId()); @@ -2519,7 +2513,7 @@ public void txApplyPut(Operation putOp, Object key, Object newValue, boolean did void superTxApplyPut(Operation putOp, Object key, Object wrappedNewValue, boolean didDestroy, TransactionId transactionId, TXRmtEvent event, EventID eventId, Object aCallbackArgument, List pendingCallbacks, FilterRoutingInfo filterRoutingInfo, - ClientProxyMembershipID bridgeContext, TXEntryState txEntryState, VersionTag versionTag, + ClientProxyMembershipID bridgeContext, TXEntryState txEntryState, VersionTag versionTag, long tailKey) { super.txApplyPut(putOp, key, wrappedNewValue, didDestroy, transactionId, event, eventId, aCallbackArgument, pendingCallbacks, filterRoutingInfo, bridgeContext, txEntryState, diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketServerLocation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketServerLocation.java deleted file mode 100755 index e98ec224d790..000000000000 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketServerLocation.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.internal.cache; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.geode.DataSerializer; -import org.apache.geode.distributed.internal.ServerLocation; - -/** - * Represents the {@link ServerLocation} of a {@link BucketRegion} - * - * - * @since GemFire 6.5 - */ -@SuppressWarnings("serial") -public class BucketServerLocation extends ServerLocation { - - private byte version; - - private int bucketId; - - private boolean isPrimary; - - public BucketServerLocation() {} - - public BucketServerLocation(int bucketId, int port, String host, boolean isPrimary, - byte version) { - super(host, port); - this.bucketId = bucketId; - this.isPrimary = isPrimary; - this.version = version; - } - - @Override - public void fromData(DataInput in) throws IOException, ClassNotFoundException { - super.fromData(in); - bucketId = DataSerializer.readInteger(in); - isPrimary = DataSerializer.readBoolean(in); - version = DataSerializer.readByte(in); - } - - @Override - public void toData(DataOutput out) throws IOException { - super.toData(out); - DataSerializer.writeInteger(bucketId, out); - DataSerializer.writeBoolean(isPrimary, out); - DataSerializer.writeByte(version, out); - } - - public boolean isPrimary() { - return isPrimary; - } - - public byte getVersion() { - return version; - } - - public int getBucketId() { - return bucketId; - } -} diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketServerLocation66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketServerLocation66.java index 48808b699462..e025218957d6 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketServerLocation66.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketServerLocation66.java @@ -24,10 +24,8 @@ /** * Represents the {@link ServerLocation} of a {@link BucketRegion} * - * * @since GemFire 6.5 */ -@SuppressWarnings("serial") public class BucketServerLocation66 extends ServerLocation { private byte version; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketSetHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketSetHelper.java index 9ea977cf786c..a350e8affd3f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/BucketSetHelper.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/BucketSetHelper.java @@ -18,37 +18,40 @@ import java.util.HashSet; import java.util.Set; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + public class BucketSetHelper { - public static int get(int[] bucketSet, int index) { + public static int get(final int @NotNull [] bucketSet, int index) { return bucketSet[index + 1]; } - public static int length(int[] bucketSet) { + public static int length(final int @Nullable [] bucketSet) { return null == bucketSet || bucketSet.length < 2 ? 0 : bucketSet[0]; } - public static void add(int[] bucketSet, int value) { + public static void add(final int @NotNull [] bucketSet, int value) { int index = bucketSet[0] + 1; bucketSet[index] = value; bucketSet[0] = index; } - public static Set toSet(int[] bucketSet) { + public static @NotNull Set toSet(final int @NotNull [] bucketSet) { Set resultSet; int arrayLength = length(bucketSet); if (arrayLength > 0) { - resultSet = new HashSet(arrayLength); + resultSet = new HashSet<>(arrayLength); for (int i = 1; i <= arrayLength; i++) { resultSet.add(bucketSet[i]); } } else { - resultSet = new HashSet(); + resultSet = new HashSet<>(); } return resultSet; } - public static int[] fromSet(Set bucketSet) { + public static int @NotNull [] fromSet(final @NotNull Set bucketSet) { int setSize = bucketSet.size(); int[] resultArray = new int[setSize + 1]; resultArray[0] = setSize; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java index e0cddc8266a4..84765c097498 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CacheDistributionAdvisee.java @@ -46,7 +46,7 @@ public interface CacheDistributionAdvisee extends DistributionAdvisee { * * @return the RegionAttributes of this advisee */ - RegionAttributes getAttributes(); + RegionAttributes getAttributes(); /** * notifies the advisee that a new remote member has registered a profile showing that it is now diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java index b23e4d26daf4..6fb49941333d 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ColocationHelper.java @@ -15,6 +15,7 @@ package org.apache.geode.internal.cache; +import static java.lang.String.format; import static org.apache.geode.cache.Region.SEPARATOR; import static org.apache.geode.internal.cache.LocalRegion.InitializationLevel.ANY_INIT; @@ -30,6 +31,7 @@ import java.util.Set; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.annotations.internal.MutableForTesting; import org.apache.geode.cache.DiskStore; @@ -49,7 +51,7 @@ import org.apache.geode.util.internal.GeodeGlossary; /** - * An utility class to retrieve colocated regions in a colocation hierarchy in various scenarios + * A utility class to retrieve colocated regions in a colocation hierarchy in various scenarios * * @since GemFire 6.0 */ @@ -78,13 +80,13 @@ public static PartitionedRegion getColocatedRegion(final PartitionedRegion parti // the region is not colocated with any region return null; } - Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); + Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); PartitionRegionConfig prConf = (PartitionRegionConfig) prRoot.get(getRegionIdentifier(colocatedWith)); if (prConf == null) { partitionedRegion.getCache().getCancelCriterion().checkCancelInProgress(null); throw new IllegalStateException( - String.format( + format( "Region specified in 'colocated-with' (%s) for region %s does not exist. It should be created before setting 'colocated-with' attribute for this region.", colocatedWith, partitionedRegion.getFullPath())); } @@ -97,7 +99,7 @@ public static PartitionedRegion getColocatedRegion(final PartitionedRegion parti } else { partitionedRegion.getCache().getCancelCriterion().checkCancelInProgress(null); throw new IllegalStateException( - String.format( + format( "Region specified in 'colocated-with' (%s) for region %s does not exist. It should be created before setting 'colocated-with' attribute for this region.", colocatedWith, partitionedRegion.getFullPath())); } @@ -119,7 +121,7 @@ public static PartitionedRegion getColocatedRegion(final PartitionedRegion parti public static boolean checkMembersColocation(PartitionedRegion partitionedRegion, InternalDistributedMember member) { List tempcolocatedRegions = new ArrayList<>(); - Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); + Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); PartitionRegionConfig regionConfig = (PartitionRegionConfig) prRoot.get(partitionedRegion.getRegionIdentifier()); // The region was probably concurrently destroyed @@ -127,9 +129,8 @@ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion return false; } tempcolocatedRegions.add(regionConfig); - List colocatedRegions = - new ArrayList<>(tempcolocatedRegions); - PartitionRegionConfig prConf = null; + List colocatedRegions = new ArrayList<>(tempcolocatedRegions); + PartitionRegionConfig prConf; do { PartitionRegionConfig tempToBeColocatedWith = tempcolocatedRegions.remove(0); for (final Object o : prRoot.keySet()) { @@ -140,8 +141,6 @@ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion continue; } if (prConf == null) { - // darrel says: I'm seeing an NPE in this code after pr->rem - // merge so I added this check and continue continue; } if (prConf.getColocatedWith() != null) { @@ -156,7 +155,6 @@ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion } while (!tempcolocatedRegions.isEmpty()); PartitionRegionConfig tempColocatedWith = regionConfig; - prConf = null; while (true) { String colocatedWithRegionName = tempColocatedWith.getColocatedWith(); if (colocatedWithRegionName == null) { @@ -171,9 +169,8 @@ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion } } - // Now check to make sure that all of the colocated regions + // Now check to make sure that all the colocated regions // Have this member. - // We don't need a hostname because the equals method doesn't check it. for (PartitionRegionConfig config : colocatedRegions) { if (config.isColocationComplete() && !config.containsMember(member)) { @@ -181,7 +178,7 @@ public static boolean checkMembersColocation(PartitionedRegion partitionedRegion } } - // Check to make sure all of the persisted regions that are colocated + // Check to make sure all the persisted regions that are colocated // with this region have been created. return !hasOfflineColocatedChildRegions(partitionedRegion); } @@ -200,9 +197,9 @@ private static boolean hasOfflineColocatedChildRegions(PartitionedRegion region) try { InternalCache cache = region.getCache(); Collection stores = cache.listDiskStores(); - // Look through all of the disk stores for offline colocated child regions + // Look through all the disk stores for offline colocated child regions for (DiskStore diskStore : stores) { - // Look at all of the partitioned regions. + // Look at all the partitioned regions. for (Map.Entry entry : ((DiskStoreImpl) diskStore).getAllPRs() .entrySet()) { @@ -254,11 +251,12 @@ private static boolean ignoreUnrecoveredQueue(PartitionedRegion region, String c } /** - * A utility to check to see if a region has been created on all of the VMs that host the regions + * A utility to check to see if a region has been created on + * all the VMs that host the regions * this region is colocated with. */ public static boolean isColocationComplete(PartitionedRegion region) { - Region prRoot = PartitionedRegionHelper.getPRRoot(region.getCache()); + Region prRoot = PartitionedRegionHelper.getPRRoot(region.getCache()); PartitionRegionConfig config = (PartitionRegionConfig) prRoot.get(region.getRegionIdentifier()); // Fix for bug 40075. There is race between this call and the region being concurrently // destroyed. @@ -271,7 +269,7 @@ public static boolean isColocationComplete(PartitionedRegion region) { } /** - * An utility method to retrieve all partitioned regions(excluding self) in a colocation chain
+ * A utility method to retrieve all partitioned regions(excluding self) in a colocation chain
*

* For example, shipmentPR is colocated with orderPR and orderPR is colocated with customerPR
*
@@ -287,14 +285,14 @@ public static Map getAllColocationRegions( Map colocatedRegions = new HashMap<>(); List colocatedByRegion = partitionedRegion.getColocatedByList(); if (colocatedByRegion.size() != 0) { - List tempcolocatedRegions = + List tempColocatedRegions = new ArrayList<>(colocatedByRegion); do { - PartitionedRegion pRegion = tempcolocatedRegions.remove(0); + PartitionedRegion pRegion = tempColocatedRegions.remove(0); pRegion.waitOnBucketMetadataInitialization(); colocatedRegions.put(pRegion.getFullPath(), pRegion); - tempcolocatedRegions.addAll(pRegion.getColocatedByList()); - } while (!tempcolocatedRegions.isEmpty()); + tempColocatedRegions.addAll(pRegion.getColocatedByList()); + } while (!tempColocatedRegions.isEmpty()); } PartitionedRegion tempColocatedWith = partitionedRegion; while (true) { @@ -315,49 +313,45 @@ public static Map getAllColocationRegions( * @return map of region name to local colocated regions * @since GemFire 5.8Beta */ - public static Map getAllColocatedLocalDataSets( - PartitionedRegion partitionedRegion, InternalRegionFunctionContext context) { + public static Map> getAllColocatedLocalDataSets( + PartitionedRegion partitionedRegion, InternalRegionFunctionContext context) { Map colocatedRegions = getAllColocationRegions(partitionedRegion); - Map colocatedLocalRegions = new HashMap<>(); - for (final Entry stringPartitionedRegionEntry : colocatedRegions - .entrySet()) { - Entry me = (Entry) stringPartitionedRegionEntry; - final Region pr = (Region) me.getValue(); - colocatedLocalRegions.put((String) me.getKey(), context.getLocalDataSet(pr)); + Map> colocatedLocalRegions = new HashMap<>(); + for (final Entry entry : colocatedRegions.entrySet()) { + final Region pr = entry.getValue(); + colocatedLocalRegions.put(entry.getKey(), context.getLocalDataSet(pr)); } return colocatedLocalRegions; } - public static Map constructAndGetAllColocatedLocalDataSet( + public static Map> constructAndGetAllColocatedLocalDataSet( PartitionedRegion region, int[] bucketArray) { - Map colocatedLocalDataSets = new HashMap<>(); + Map> colocatedLocalDataSets = new HashMap<>(); if (region.getColocatedWith() == null && (!region.isColocatedBy())) { - colocatedLocalDataSets.put(region.getFullPath(), - new LocalDataSet(region, bucketArray)); + colocatedLocalDataSets.put(region.getFullPath(), new LocalDataSet<>(region, bucketArray)); return colocatedLocalDataSets; } Map colocatedRegions = ColocationHelper.getAllColocationRegions(region); - for (Region colocatedRegion : colocatedRegions.values()) { + for (PartitionedRegion colocatedRegion : colocatedRegions.values()) { colocatedLocalDataSets.put(colocatedRegion.getFullPath(), - new LocalDataSet((PartitionedRegion) colocatedRegion, bucketArray)); + new LocalDataSet<>(colocatedRegion, bucketArray)); } - colocatedLocalDataSets.put(region.getFullPath(), - new LocalDataSet(region, bucketArray)); + colocatedLocalDataSets.put(region.getFullPath(), new LocalDataSet<>(region, bucketArray)); return colocatedLocalDataSets; } - public static Map getColocatedLocalDataSetsForBuckets( + public static Map> getColocatedLocalDataSetsForBuckets( PartitionedRegion region, Set bucketSet) { if (region.getColocatedWith() == null && (!region.isColocatedBy())) { return Collections.emptyMap(); } - Map ret = new HashMap<>(); + Map> ret = new HashMap<>(); Map colocatedRegions = ColocationHelper.getAllColocationRegions(region); - for (Region colocatedRegion : colocatedRegions.values()) { + for (PartitionedRegion colocatedRegion : colocatedRegions.values()) { ret.put(colocatedRegion.getFullPath(), - new LocalDataSet((PartitionedRegion) colocatedRegion, bucketSet)); + new LocalDataSet<>(colocatedRegion, bucketSet)); } return ret; } @@ -375,13 +369,13 @@ public static Map getColocatedLocalDataSetsForBuckets( * @return list of all child partitioned regions colocated with the region * @since GemFire 5.8Beta */ - public static List getColocatedChildRegions( + public static @NotNull List getColocatedChildRegions( PartitionedRegion partitionedRegion) { List colocatedChildRegions = new ArrayList<>(); - Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); + Region prRoot = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); PartitionRegionConfig prConf = null; // final List allPRNamesList = new ArrayList(prRoot.keySet()); - Iterator itr = prRoot.keySet().iterator(); + Iterator itr = prRoot.keySet().iterator(); while (itr.hasNext()) { try { String prName = (String) itr.next(); @@ -395,8 +389,6 @@ public static List getColocatedChildRegions( continue; } if (prConf == null) { - // darrel says: I'm seeing an NPE in this code after pr->rem - // merge so I added this check and continue continue; } int prID = prConf.getPRId(); @@ -425,9 +417,9 @@ public static List getColocatedChildRegions( } } - // Fix for 44484 - Make the list of colocated child regions + // Make the list of colocated child regions // is always in the same order on all nodes. - Collections.sort(colocatedChildRegions, (o1, o2) -> { + colocatedChildRegions.sort((o1, o2) -> { if (o1.isShadowPR() == o2.isShadowPR()) { return o1.getFullPath().compareTo(o2.getFullPath()); } @@ -439,15 +431,14 @@ public static List getColocatedChildRegions( return colocatedChildRegions; } - // TODO why do we have this method here? - public static Function getFunctionInstance(Serializable function) { - Function functionInstance = null; + public static Function getFunctionInstance(Serializable function) { + final Function functionInstance; if (function instanceof String) { functionInstance = FunctionService.getFunction((String) function); Assert.assertTrue(functionInstance != null, "Function " + function + " is not registered on this node "); } else { - functionInstance = (Function) function; + functionInstance = (Function) function; } return functionInstance; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java index 3b3bf40ffeda..5ea8c6b86600 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/CreateRegionProcessor.java @@ -50,6 +50,7 @@ import org.apache.geode.internal.InternalDataSerializer; import org.apache.geode.internal.cache.CacheDistributionAdvisor.CacheProfile; import org.apache.geode.internal.cache.CacheDistributionAdvisor.InitialImageAdvice; +import org.apache.geode.internal.cache.FilterProfile.OperationMessage; import org.apache.geode.internal.cache.LocalRegion.InitializationLevel; import org.apache.geode.internal.cache.event.EventSequenceNumberHolder; import org.apache.geode.internal.cache.ha.ThreadIdentifier; @@ -80,16 +81,15 @@ public CreateRegionProcessor(CacheDistributionAdvisee newRegion) { /** this method tells other members that the region is being created */ @Override public void initializeRegion() { - InternalDistributedSystem system = newRegion.getSystem(); // try 5 times, see CreateRegionMessage#skipDuringInitialization for (int retry = 0; retry < 5; retry++) { - Set recps = getRecipients(); + Set recipients = getRecipients(); if (logger.isDebugEnabled()) { logger.debug("Creating region {}", newRegion); } - if (recps.isEmpty()) { + if (recipients.isEmpty()) { if (logger.isDebugEnabled()) { logger.debug("CreateRegionProcessor.initializeRegion, no recipients, msg not sent"); } @@ -99,11 +99,11 @@ public void initializeRegion() { return; } - CreateRegionReplyProcessor replyProc = new CreateRegionReplyProcessor(recps); + CreateRegionReplyProcessor replyProc = new CreateRegionReplyProcessor(recipients); newRegion.registerCreateRegionReplyProcessor(replyProc); boolean useMcast = false; // multicast is disabled for this message for now - CreateRegionMessage msg = getCreateRegionMessage(recps, replyProc, useMcast); + CreateRegionMessage msg = getCreateRegionMessage(recipients, replyProc, useMcast); // since PR buckets can be created during cache entry operations, enable // severe alert processing if we're creating one of them @@ -114,7 +114,7 @@ public void initializeRegion() { newRegion.getDistributionManager().putOutgoing(msg); // this was in a while() loop, which is incorrect use of a reply processor. - // Reply procs are deregistered when they return from waitForReplies + // Reply procs are registered when they return from waitForReplies try { // Don't allow a region to be created if the distributed system is // disconnecting @@ -152,17 +152,16 @@ public void initializeRegion() { newRegion.getDistributionAdvisor().setInitialized(); } - protected Set getRecipients() { + protected Set getRecipients() { DistributionAdvisee parent = newRegion.getParentAdvisee(); - Set recps = null; - if (parent == null) { // root region, all recipients + if (parent == null) { + // root region, all recipients InternalDistributedSystem system = newRegion.getSystem(); - recps = system.getDistributionManager().getOtherDistributionManagerIds(); + return system.getDistributionManager().getOtherDistributionManagerIds(); } else { // get recipients that have the parent region defined as distributed. - recps = getAdvice(); + return getAdvice(); } - return recps; } @Override @@ -170,7 +169,7 @@ public InitialImageAdvice getInitialImageAdvice(InitialImageAdvice previousAdvic return newRegion.getCacheDistributionAdvisor().adviseInitialImage(previousAdvice); } - private Set getAdvice() { + private Set getAdvice() { if (newRegion instanceof BucketRegion) { return ((Bucket) newRegion).getBucketAdvisor().adviseProfileExchange(); } else { @@ -180,7 +179,8 @@ private Set getAdvice() { } } - protected CreateRegionMessage getCreateRegionMessage(Set recps, ReplyProcessor21 proc, + protected CreateRegionMessage getCreateRegionMessage(Set recipients, + ReplyProcessor21 proc, boolean useMcast) { CreateRegionMessage msg = new CreateRegionMessage(); msg.regionPath = newRegion.getFullPath(); @@ -188,7 +188,7 @@ protected CreateRegionMessage getCreateRegionMessage(Set recps, ReplyProcessor21 msg.processorId = proc.getProcessorId(); msg.concurrencyChecksEnabled = newRegion.getAttributes().getConcurrencyChecksEnabled(); msg.setMulticast(useMcast); - msg.setRecipients(recps); + msg.setRecipients(recipients); return msg; } @@ -199,7 +199,7 @@ public void setOnline(InternalDistributedMember target) { class CreateRegionReplyProcessor extends ReplyProcessor21 { - CreateRegionReplyProcessor(Set members) { + CreateRegionReplyProcessor(Set members) { super((InternalDistributedSystem) newRegion.getCache() .getDistributedSystem(), members); } @@ -269,7 +269,8 @@ public void process(DistributionMessage msg) { FilterProfile localFP = ((LocalRegion) newRegion).filterProfile; // localFP can be null and remoteFP not null when upgrading from 7.0.1.14 to 7.0.1.15 if (localFP != null) { - List messages = localFP.getQueuedFilterProfileMsgs(reply.getSender()); + List messages = + localFP.getQueuedFilterProfileMsgs(reply.getSender()); // Thread init level is set since region is used during CQ registration. final InitializationLevel oldLevel = LocalRegion.setThreadInitLevelRequirement(ANY_INIT); @@ -319,8 +320,8 @@ public static class CreateRegionMessage extends HighPriorityDistributionMessage private transient boolean incompatible = false; private transient ReplyException replyException; private transient CacheProfile replyProfile; - private transient ArrayList replyBucketProfiles; - private transient Object eventState; + private transient List replyBucketProfiles; + private transient Map eventState; protected transient boolean severeAlertCompatible; private transient boolean skippedCompatibilityChecks; @@ -560,17 +561,17 @@ protected String checkCompatibility(CacheDistributionAdvisee rgn, CacheProfile p } } - Set otherAsynEventQueueIds = ((LocalRegion) rgn).getVisibleAsyncEventQueueIds(); + Set otherAsyncEventQueueIds = ((LocalRegion) rgn).getVisibleAsyncEventQueueIds(); Set myAsyncEventQueueIds = profile.asyncEventQueueIds; if (!isLocalOrRemoteAccessor(rgn, profile) - && !otherAsynEventQueueIds.equals(myAsyncEventQueueIds)) { + && !otherAsyncEventQueueIds.equals(myAsyncEventQueueIds)) { result = String.format( "Cannot create Region %s with %s async event ids because another cache has the same region defined with %s async event ids", - regionPath, myAsyncEventQueueIds, otherAsynEventQueueIds); + regionPath, myAsyncEventQueueIds, otherAsyncEventQueueIds); } - final PartitionAttributes pa = rgn.getAttributes().getPartitionAttributes(); + final PartitionAttributes pa = rgn.getAttributes().getPartitionAttributes(); if (pa == null && profile.isPartitioned) { result = String.format( @@ -627,8 +628,8 @@ protected String checkCompatibility(CacheDistributionAdvisee rgn, CacheProfile p if (cspResult == null) { if (myProfiles.size() > profile.cacheServiceProfiles.size()) { for (CacheServiceProfile localProfile : myProfiles.values()) { - if (!profile.cacheServiceProfiles.stream() - .anyMatch(remoteProfile -> remoteProfile.getId().equals(localProfile.getId()))) { + if (profile.cacheServiceProfiles.stream() + .noneMatch(remoteProfile -> remoteProfile.getId().equals(localProfile.getId()))) { cspResult = getMissingProfileMessage(localProfile, false); break; } @@ -660,7 +661,7 @@ protected String getMissingProfileMessage(CacheServiceProfile profile, * When many members are started concurrently, it is possible that an accessor or non-version * generating replicate receives CreateRegionMessage before it is initialized, thus preventing * persistent members from starting. We skip compatibilityChecks if the region is not - * initialized, and let other members check compatibility. If all members skipCompatabilit + * initialized, and let other members check compatibility. If all members skipCompatibility * checks, then the CreateRegionMessage should be retried. fixes #45186 */ private boolean skipDuringInitialization(CacheDistributionAdvisee rgn) { @@ -668,7 +669,8 @@ private boolean skipDuringInitialization(CacheDistributionAdvisee rgn) { if (rgn instanceof LocalRegion) { LocalRegion lr = (LocalRegion) rgn; if (!lr.isInitialized()) { - Set recipients = new CreateRegionProcessor(rgn).getRecipients(); + Set recipients = + new CreateRegionProcessor(rgn).getRecipients(); recipients.remove(getSender()); if (!recipients.isEmpty()) { skip = true; @@ -705,7 +707,7 @@ private boolean skipCheckForAccessor(CacheDistributionAdvisee rgn, CacheProfile } /** - * @return true if profile being exchanged or region is an accessor i.e has no storage + * @return true if profile being exchanged or region is an accessor i.e. has no storage */ protected static boolean isLocalOrRemoteAccessor(CacheDistributionAdvisee region, CacheProfile profile) { @@ -779,8 +781,8 @@ public String toString() { public static class CreateRegionReplyMessage extends ReplyMessage { protected CacheProfile profile; - protected ArrayList bucketProfiles; - protected Object eventState; + protected List bucketProfiles; + protected Map eventState; /** * Added to fix 42051. If the region is in the middle of being destroyed, return the destroyed * profile @@ -811,7 +813,7 @@ public void fromData(DataInput in, if (size == 0) { bucketProfiles = null; } else { - bucketProfiles = new ArrayList(size); + bucketProfiles = new ArrayList<>(size); for (int i = 0; i < size; i++) { RegionAdvisor.BucketProfileAndId bp = new RegionAdvisor.BucketProfileAndId(); InternalDataSerializer.invokeFromData(bp, in); @@ -842,9 +844,7 @@ public void toData(DataOutput out, } else { int size = bucketProfiles.size(); out.writeInt(size); - for (Object bucketProfile : bucketProfiles) { - RegionAdvisor.BucketProfileAndId bp = - (RegionAdvisor.BucketProfileAndId) bucketProfile; + for (RegionAdvisor.BucketProfileAndId bp : bucketProfiles) { InternalDataSerializer.invokeToData(bp, out); } } @@ -853,7 +853,7 @@ public void toData(DataOutput out, // The isHARegion flag is false here because // we currently only include the event state in the profile // for bucket regions. - EventStateHelper.dataSerialize(out, (Map) eventState, false, getSender()); + EventStateHelper.dataSerialize(out, eventState, false, getSender()); } else { out.writeBoolean(false); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistPeerTXStateStub.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistPeerTXStateStub.java index de1cb78778e9..a30851312649 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistPeerTXStateStub.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistPeerTXStateStub.java @@ -38,8 +38,8 @@ */ public class DistPeerTXStateStub extends PeerTXStateStub implements DistTXCoordinatorInterface { - private ArrayList primaryTransactionalOperations = null; - private ArrayList secondaryTransactionalOperations = null; + private final ArrayList primaryTransactionalOperations = new ArrayList<>(); + private final ArrayList secondaryTransactionalOperations = new ArrayList<>(); private DistTXPrecommitMessage precommitDistTxMsg = null; private DistTXCommitMessage commitDistTxMsg = null; private DistTXRollbackMessage rollbackDistTxMsg = null; @@ -48,8 +48,6 @@ public class DistPeerTXStateStub extends PeerTXStateStub implements DistTXCoordi public DistPeerTXStateStub(TXStateProxy stateProxy, DistributedMember target, InternalDistributedMember onBehalfOfClient) { super(stateProxy, target, onBehalfOfClient); - primaryTransactionalOperations = new ArrayList<>(); - secondaryTransactionalOperations = new ArrayList<>(); } @Override @@ -59,13 +57,11 @@ public void precommit() throws CommitConflictException { + " ,primaryTransactionalOperations=" + primaryTransactionalOperations + " ,secondaryTransactionalOperations=" + secondaryTransactionalOperations); } - assert target != null; - assert primaryTransactionalOperations != null || secondaryTransactionalOperations != null; // [DISTTX] TODO Handle Stats precommitDistTxMsg.setSecondaryTransactionalOperations(secondaryTransactionalOperations); - final Set recipients = Collections.singleton(target); + final Set recipients = getRecipients(); precommitDistTxMsg.setRecipients(recipients); dm.putOutgoing(precommitDistTxMsg); precommitDistTxMsg.resetRecipients(); @@ -82,7 +78,7 @@ public void commit() throws CommitConflictException { // [DISTTX] TODO Handle Stats dm.getStats().incSentCommitMessages(1L); - final Set recipients = Collections.singleton(target); + final Set recipients = getRecipients(); commitDistTxMsg.setRecipients(recipients); dm.putOutgoing(commitDistTxMsg); commitDistTxMsg.resetRecipients(); @@ -94,12 +90,7 @@ public void rollback() { logger.debug("DistPeerTXStateStub.rollback target=" + target); } - // [DISTTX] TODO Handle callbacks - // if (this.internalAfterSendRollback != null) { - // this.internalAfterSendRollback.run(); - // } - - final Set recipients = Collections.singleton(target); + final Set recipients = getRecipients(); rollbackDistTxMsg.setRecipients(recipients); dm.putOutgoing(rollbackDistTxMsg); rollbackDistTxMsg.resetRecipients(); @@ -226,8 +217,6 @@ public void destroyOnRemote(EntryEventImpl event, boolean cacheWrite, Object exp @Override public void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry) { - // logger - // .debug("DistPeerTXStateStub.invalidateExistingEntry", new Throwable()); super.invalidateExistingEntry(event, invokeCallbacks, forceNewEntry); primaryTransactionalOperations.add(new DistTxEntryEvent(event)); } @@ -359,4 +348,9 @@ public boolean isCreatedOnDistTxCoordinator() { public void finalCleanup() { cleanup(); } + + private Set getRecipients() { + return Collections.singleton((InternalDistributedMember) target); + } + } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXStateProxyImplOnCoordinator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXStateProxyImplOnCoordinator.java index 9d5cf4f01b3c..2abaa589c3ae 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXStateProxyImplOnCoordinator.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistTXStateProxyImplOnCoordinator.java @@ -138,9 +138,9 @@ private HashMap getSecondariesAnd for (DistTxEntryEvent dtop : primaryTxOps) { InternalRegion internalRegion = dtop.getRegion(); // replicas or secondaries - Set otherNodes = null; + Set otherNodes = null; if (internalRegion instanceof PartitionedRegion) { - Set allNodes = ((PartitionedRegion) dtop.getRegion()) + Set allNodes = ((PartitionedRegion) dtop.getRegion()) .getRegionAdvisor().getBucketOwners(dtop.getKeyInfo().getBucketId()); allNodes.remove(originalTarget); otherNodes = allNodes; @@ -151,13 +151,13 @@ private HashMap getSecondariesAnd } if (otherNodes != null) { - for (InternalDistributedMember dm : otherNodes) { + for (DistributedMember dm : otherNodes) { // whether the target already exists due to other Tx op on the node DistTXCoordinatorInterface existingDistPeerTXStateStub = target2realDeals.get(dm); if (existingDistPeerTXStateStub == null) { existingDistPeerTXStateStub = secondaryTarget2realDeals.get(dm); if (existingDistPeerTXStateStub == null) { - DistTXCoordinatorInterface newTxStub = null; + final DistTXCoordinatorInterface newTxStub; if (currentNode.equals(dm)) { // [DISTTX] TODO add a test case for this condition? newTxStub = new DistTXStateOnCoordinator(this, false, getStatisticsClock()); @@ -295,9 +295,6 @@ public void rollback() { } } - /** - * {@inheritDoc} - */ @Override public TXStateInterface getRealDeal(KeyInfo key, InternalRegion r) { if (r != null) { @@ -401,7 +398,7 @@ public TXStateInterface getRealDeal(DistributedMember t) { */ private DistributedMember getRRTarget(KeyInfo key, InternalRegion r) { if (rrTargets == null) { - rrTargets = new HashMap(); + rrTargets = new HashMap<>(); } DistributedMember m = rrTargets.get(r); if (m == null) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java index 955cb5fd6bea..5243876e6e28 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedCacheOperation.java @@ -631,10 +631,10 @@ protected void _distribute() { if (isPutAll) { ((BucketRegion) region).performPutAllAdjunctMessaging((DistributedPutAllOperation) this, - recipients, adjunctRecipients, filterRouting, processor); + adjunctRecipients, processor); } else if (isRemoveAll) { ((BucketRegion) region).performRemoveAllAdjunctMessaging( - (DistributedRemoveAllOperation) this, recipients, adjunctRecipients, filterRouting, + (DistributedRemoveAllOperation) this, adjunctRecipients, processor); } else { boolean calculateDelta = diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java index 403c07c00906..d39b6a3d19fc 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedPutAllOperation.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.io.DataInput; import java.io.DataOutput; import java.io.Externalizable; @@ -59,12 +61,10 @@ import org.apache.geode.internal.offheap.annotations.Released; import org.apache.geode.internal.offheap.annotations.Retained; import org.apache.geode.internal.offheap.annotations.Unretained; -import org.apache.geode.internal.serialization.ByteArrayDataInput; import org.apache.geode.internal.serialization.DataSerializableFixedID; import org.apache.geode.internal.serialization.DeserializationContext; import org.apache.geode.internal.serialization.KnownVersion; import org.apache.geode.internal.serialization.SerializationContext; -import org.apache.geode.internal.serialization.StaticSerialization; import org.apache.geode.logging.internal.log4j.api.LogService; /** @@ -80,7 +80,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation { public int putAllDataSize; - protected boolean isBridgeOp = false; + protected boolean isBridgeOp; static final byte USED_FAKE_EVENT_ID = 0x01; static final byte NOTIFY_ONLY = 0x02; @@ -88,7 +88,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation { static final byte VERSION_TAG = 0x08; static final byte POSDUP = 0x10; static final byte PERSISTENT_TAG = 0x20; - static final byte HAS_CALLBACKARG = 0x40; + // static final byte HAS_CALLBACKARG = 0x40; static final byte HAS_TAILKEY = (byte) 0x80; // flags for CachedDeserializable; additional flags can be combined @@ -98,7 +98,7 @@ public class DistributedPutAllOperation extends AbstractUpdateOperation { // private boolean containsCreate = false; - public DistributedPutAllOperation(CacheEvent event, int size, boolean isBridgeOp) { + public DistributedPutAllOperation(CacheEvent event, int size, boolean isBridgeOp) { super(event, ((EntryEventImpl) event).getEventTime(0L)); putAllData = new PutAllEntryData[size]; putAllDataSize = 0; @@ -117,9 +117,7 @@ public PutAllEntryData[] getPutAllEntryData() { } public void setPutAllEntryData(PutAllEntryData[] putAllEntryData) { - for (int i = 0; i < putAllEntryData.length; i++) { - putAllData[i] = putAllEntryData[i]; - } + System.arraycopy(putAllEntryData, 0, putAllData, 0, putAllEntryData.length); putAllDataSize = putAllEntryData.length; } @@ -182,8 +180,8 @@ public void setUseFakeEventId(boolean status) { * operation. This is cached for listener notification purposes. The iterator is guaranteed to * return events in the order they are present in putAllData[] */ - public Iterator eventIterator() { - return new Iterator() { + public Iterator eventIterator() { + return new Iterator() { int position = 0; @Override @@ -193,7 +191,7 @@ public boolean hasNext() { @Override @Unretained - public Object next() { + public EntryEventImpl next() { @Unretained EntryEventImpl ev = getEventForPosition(position); position++; @@ -236,9 +234,10 @@ public EntryEventImpl getEventForPosition(int position) { try { ev.setPossibleDuplicate(entry.isPossibleDuplicate()); if (entry.versionTag != null && region.getConcurrencyChecksEnabled()) { - VersionSource id = entry.versionTag.getMemberID(); + VersionSource id = entry.versionTag.getMemberID(); if (id != null) { - entry.versionTag.setMemberID(ev.getRegion().getVersionVector().getCanonicalId(id)); + entry.versionTag + .setMemberID(uncheckedCast(ev.getRegion().getVersionVector().getCanonicalId(id))); } ev.setVersionTag(entry.versionTag); } @@ -304,7 +303,7 @@ public static class PutAllEntryData { // parallel wan is enabled private Long tailKey = 0L; - public VersionTag versionTag; + public VersionTag> versionTag; transient boolean inhibitDistribution; @@ -534,12 +533,10 @@ public Integer getBucketId() { * change event id into fake event id The algorithm is to change the threadid into * bucketid*MAX_THREAD_PER_CLIENT+oldthreadid. So from the log, we can derive the original * thread id. - * - * @return wether current event id is fake or not new bucket id */ - public boolean setFakeEventID() { + public void setFakeEventID() { if (bucketId < 0) { - return false; + return; } if (!isUsedFakeEventId()) { @@ -550,7 +547,6 @@ public boolean setFakeEventID() { eventID = new EventID(eventID.getMembershipID(), threadId, eventID.getSequenceID()); setUsedFakeEventId(true); } - return true; } public boolean isUsedFakeEventId() { @@ -606,7 +602,7 @@ public void setCallbacksInvoked(boolean callbacksInvoked) { } } - public static class EntryVersionsList extends ArrayList + public static class EntryVersionsList extends ArrayList>> implements DataSerializableFixedID, Externalizable { public EntryVersionsList() { @@ -624,28 +620,6 @@ public static EntryVersionsList create(DataInput in) return newList; } - private boolean extractVersion(PutAllEntryData entry) { - - VersionTag versionTag = entry.versionTag; - // version tag can be null if only keys are sent in InitialImage. - if (versionTag != null) { - add(versionTag); - // Add entry without version tag in entries array. - entry.versionTag = null; - return true; - } - - return false; - } - - private VersionTag getVersionTag(int index) { - VersionTag tag = null; - if (size() > 0) { - tag = get(index); - } - return tag; - } - /** * replace null membership IDs in version tags with the given member ID. VersionTags received * from a server may have null IDs because they were operations performed by that server. We @@ -654,7 +628,7 @@ private VersionTag getVersionTag(int index) { * */ public void replaceNullIDs(DistributedMember sender) { - for (VersionTag versionTag : this) { + for (VersionTag> versionTag : this) { if (versionTag != null) { versionTag.replaceNullIDs((InternalDistributedMember) sender); } @@ -680,7 +654,7 @@ public void toData(DataOutput out, if (size() > 0) { flags |= 0x04; hasTags = true; - for (VersionTag tag : this) { + for (VersionTag> tag : this) { if (tag != null) { if (tag instanceof DiskVersionTag) { flags |= 0x20; @@ -699,13 +673,13 @@ public void toData(DataOutput out, if (hasTags) { InternalDataSerializer.writeUnsignedVL(size(), out); - Object2IntOpenHashMap ids = new Object2IntOpenHashMap(size()); + Object2IntOpenHashMap> ids = new Object2IntOpenHashMap<>(size()); int idCount = 0; - for (VersionTag tag : this) { + for (VersionTag> tag : this) { if (tag == null) { out.writeByte(FLAG_NULL_TAG); } else { - VersionSource id = tag.getMemberID(); + VersionSource id = tag.getMemberID(); if (id == null) { out.writeByte(FLAG_FULL_TAG); InternalDataSerializer.invokeToData(tag, out); @@ -719,7 +693,7 @@ public void toData(DataOutput out, } else { out.writeByte(FLAG_TAG_WITH_NUMBER_ID); tag.toData(out, false); - tag.setMemberID(id); + tag.setMemberID(uncheckedCast(id)); InternalDataSerializer.writeUnsignedVL(idNumber - 1, out); } } @@ -746,7 +720,7 @@ public void fromData(DataInput in, if (logger.isTraceEnabled(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE)) { logger.trace(LogMarker.INITIAL_IMAGE_VERSIONED_VERBOSE, "reading {} version tags", size); } - List ids = new ArrayList<>(size); + List> ids = new ArrayList<>(size); for (int i = 0; i < size; i++) { byte entryType = in.readByte(); switch (entryType) { @@ -757,14 +731,14 @@ public void fromData(DataInput in, add(VersionTag.create(persistent, in)); break; case FLAG_TAG_WITH_NEW_ID: - VersionTag tag = VersionTag.create(persistent, in); + VersionTag> tag = VersionTag.create(persistent, in); ids.add(tag.getMemberID()); add(tag); break; case FLAG_TAG_WITH_NUMBER_ID: tag = VersionTag.create(persistent, in); int idNumber = (int) InternalDataSerializer.readUnsignedVL(in); - tag.setMemberID(ids.get(idNumber)); + tag.setMemberID(uncheckedCast(ids.get(idNumber))); add(tag); break; } @@ -789,7 +763,8 @@ public KnownVersion[] getSerializationVersions() { } @Override - protected FilterRoutingInfo getRecipientFilterRouting(Set cacheOpRecipients) { + protected FilterRoutingInfo getRecipientFilterRouting( + Set cacheOpRecipients) { // for putAll, we need to determine the routing information for each event and // create a consolidated routing object representing all events that can be // used for distribution @@ -889,14 +864,13 @@ public PutAllPRMessage createPRMessagesNotifyOnly(int bucketId) { * * @return a HashMap contain PutAllPRMessages, key is bucket id */ - public HashMap createPRMessages() { - // getFilterRecipients(Collections.EMPTY_SET); // establish filter recipient routing information - HashMap prMsgMap = new HashMap(); + public HashMap createPRMessages() { + HashMap prMsgMap = new HashMap<>(); final EntryEventImpl event = getBaseEvent(); for (int i = 0; i < putAllDataSize; i++) { Integer bucketId = putAllData[i].bucketId; - PutAllPRMessage prMsg = (PutAllPRMessage) prMsgMap.get(bucketId); + PutAllPRMessage prMsg = prMsgMap.get(bucketId); if (prMsg == null) { prMsg = new PutAllPRMessage(bucketId, putAllDataSize, false, event.isPossibleDuplicate(), !event.isGenerateCallbacks(), event.getCallbackArgument()); @@ -912,8 +886,7 @@ public HashMap createPRMessages() { // Modify the event id, assign new thread id and new sequence id // We have to set fake event id here, because we cannot derive old event id from baseId+idx as - // we - // did in DR's PutAllMessage. + // we did in DR's PutAllMessage. putAllData[i].setFakeEventID(); // we only save the reference in prMsg. No duplicate copy prMsg.addEntry(putAllData[i]); @@ -932,7 +905,7 @@ protected void initMessage(CacheOperationMessage msg, DirectReplyProcessor proc) // if so, cull them out and send a 1-hop message to a replicate that // can generate a version for the operation - RegionAttributes attr = event.getRegion().getAttributes(); + RegionAttributes attr = event.getRegion().getAttributes(); if (attr.getConcurrencyChecksEnabled() && !attr.getDataPolicy().withReplication() && attr.getScope() != Scope.GLOBAL) { if (attr.getDataPolicy() == DataPolicy.EMPTY) { @@ -962,7 +935,6 @@ protected void initMessage(CacheOperationMessage msg, DirectReplyProcessor proc) boolean success = RemotePutAllMessage.distribute((EntryEventImpl) event, versionless, versionless.length); if (success) { - versionless = null; PutAllEntryData[] versioned = selectVersionedEntries(); if (logger.isTraceEnabled()) { logger.trace("Found these remaining versioned entries: {}", @@ -1123,7 +1095,7 @@ public void appendFields(StringBuilder sb) { public void doEntryPut(PutAllEntryData entry, DistributedRegion rgn) { @Released EntryEventImpl ev = PutAllMessage.createEntryEvent(entry, getSender(), context, rgn, - possibleDuplicate, needsRouting, callbackArg, true, skipCallbacks); + possibleDuplicate, callbackArg, true, skipCallbacks); // we don't need to set old value here, because the msg is from remote. local old value will // get from next step try { @@ -1146,7 +1118,7 @@ public void doEntryPut(PutAllEntryData entry, DistributedRegion rgn) { @Retained public static EntryEventImpl createEntryEvent(PutAllEntryData entry, InternalDistributedMember sender, ClientProxyMembershipID context, DistributedRegion rgn, - boolean possibleDuplicate, boolean needsRouting, Object callbackArg, boolean originRemote, + boolean possibleDuplicate, Object callbackArg, boolean originRemote, boolean skipCallbacks) { final Object key = entry.getKey(); EventID evId = entry.getEventID(); @@ -1165,13 +1137,6 @@ public static EntryEventImpl createEntryEvent(PutAllEntryData entry, ev.setNewValue(entryValue); ev.setPossibleDuplicate(possibleDuplicate); ev.setVersionTag(entry.versionTag); - // if (needsRouting) { - // FilterProfile fp = rgn.getFilterProfile(); - // if (fp != null) { - // FilterInfo fi = fp.getLocalFilterRouting(ev); - // ev.setLocalFilterInfo(fi); - // } - // } if (entry.filterRouting != null) { InternalDistributedMember id = rgn.getMyId(); ev.setLocalFilterInfo(entry.filterRouting.getFilterInfo(id)); @@ -1224,8 +1189,6 @@ public void fromData(DataInput in, putAllDataSize = (int) InternalDataSerializer.readUnsignedVL(in); putAllData = new PutAllEntryData[putAllDataSize]; if (putAllDataSize > 0) { - final KnownVersion version = StaticSerialization.getVersionForDataStreamOrNull(in); - final ByteArrayDataInput bytesIn = new ByteArrayDataInput(); for (int i = 0; i < putAllDataSize; i++) { putAllData[i] = new PutAllEntryData(in, context, eventId, i); } @@ -1259,7 +1222,7 @@ public void toData(DataOutput out, if (!hasTags && putAllData[i].versionTag != null) { hasTags = true; } - VersionTag tag = putAllData[i].versionTag; + VersionTag> tag = putAllData[i].versionTag; versionTags.add(tag); putAllData[i].versionTag = null; putAllData[i].toData(out, context); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRemoveAllOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRemoveAllOperation.java index 92c3fa827fa3..9e433abcdc10 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRemoveAllOperation.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/DistributedRemoveAllOperation.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -76,7 +78,7 @@ public class DistributedRemoveAllOperation extends AbstractUpdateOperation { public int removeAllDataSize; - protected boolean isBridgeOp = false; + protected boolean isBridgeOp; static final byte USED_FAKE_EVENT_ID = 0x01; static final byte NOTIFY_ONLY = 0x02; @@ -84,10 +86,10 @@ public class DistributedRemoveAllOperation extends AbstractUpdateOperation { static final byte VERSION_TAG = 0x08; static final byte POSDUP = 0x10; static final byte PERSISTENT_TAG = 0x20; - static final byte HAS_CALLBACKARG = 0x40; + // static final byte HAS_CALLBACKARG = 0x40; static final byte HAS_TAILKEY = (byte) 0x80; - public DistributedRemoveAllOperation(CacheEvent event, int size, boolean isBridgeOp) { + public DistributedRemoveAllOperation(CacheEvent event, int size, boolean isBridgeOp) { super(event, ((EntryEventImpl) event).getEventTime(0L)); removeAllData = new RemoveAllEntryData[size]; removeAllDataSize = 0; @@ -106,9 +108,7 @@ public RemoveAllEntryData[] getRemoveAllEntryData() { } public void setRemoveAllEntryData(RemoveAllEntryData[] removeAllEntryData) { - for (int i = 0; i < removeAllEntryData.length; i++) { - removeAllData[i] = removeAllEntryData[i]; - } + System.arraycopy(removeAllEntryData, 0, removeAllData, 0, removeAllEntryData.length); removeAllDataSize = removeAllEntryData.length; } @@ -167,8 +167,8 @@ public void setUseFakeEventId(boolean status) { * removeAll operation. This is cached for listener notification purposes. The iterator is * guaranteed to return events in the order they are present in putAllData[] */ - public Iterator eventIterator() { - return new Iterator() { + public Iterator eventIterator() { + return new Iterator() { int position = 0; @Override @@ -178,7 +178,7 @@ public boolean hasNext() { @Override @Unretained - public Object next() { + public EntryEventImpl next() { @Unretained EntryEventImpl ev = getEventForPosition(position); position++; @@ -223,9 +223,10 @@ public EntryEventImpl getEventForPosition(int position) { ev.setPossibleDuplicate(entry.isPossibleDuplicate()); ev.setIsRedestroyedEntry(entry.getRedestroyedEntry()); if (entry.versionTag != null && region.getConcurrencyChecksEnabled()) { - VersionSource id = entry.versionTag.getMemberID(); + VersionSource id = entry.versionTag.getMemberID(); if (id != null) { - entry.versionTag.setMemberID(ev.getRegion().getVersionVector().getCanonicalId(id)); + entry.versionTag + .setMemberID(uncheckedCast(ev.getRegion().getVersionVector().getCanonicalId(id))); } ev.setVersionTag(entry.versionTag); } @@ -283,7 +284,7 @@ public static class RemoveAllEntryData { // parallel wan is enabled private Long tailKey = 0L; - public VersionTag versionTag; + public VersionTag> versionTag; transient boolean inhibitDistribution; @@ -350,8 +351,8 @@ public String toString() { sb.append(", b").append(bucketId); } if (versionTag != null) { - sb.append(",v").append(versionTag.getEntryVersion()) - .append(",rv=" + versionTag.getRegionVersion()); + sb.append(",v").append(versionTag.getEntryVersion()).append(",rv=") + .append(versionTag.getRegionVersion()); } if (filterRouting != null) { sb.append(", ").append(filterRouting); @@ -479,11 +480,10 @@ public Integer getBucketId() { * bucketid*MAX_THREAD_PER_CLIENT+oldthreadid. So from the log, we can derive the original * thread id. * - * @return wether current event id is fake or not new bucket id */ - public boolean setFakeEventID() { + public void setFakeEventID() { if (bucketId < 0) { - return false; + return; } if (!isUsedFakeEventId()) { @@ -494,7 +494,6 @@ public boolean setFakeEventID() { eventID = new EventID(eventID.getMembershipID(), threadId, eventID.getSequenceID()); setUsedFakeEventId(true); } - return true; } public boolean isUsedFakeEventId() { @@ -559,7 +558,8 @@ public void setCallbacksInvoked(boolean callbacksInvoked) { } @Override - protected FilterRoutingInfo getRecipientFilterRouting(Set cacheOpRecipients) { + protected FilterRoutingInfo getRecipientFilterRouting( + Set cacheOpRecipients) { // for removeAll, we need to determine the routing information for each event and // create a consolidated routing object representing all events that can be // used for distribution @@ -699,7 +699,7 @@ protected void initMessage(CacheOperationMessage msg, DirectReplyProcessor proc) // if so, cull them out and send a 1-hop message to a replicate that // can generate a version for the operation - RegionAttributes attr = event.getRegion().getAttributes(); + RegionAttributes attr = event.getRegion().getAttributes(); if (attr.getConcurrencyChecksEnabled() && !attr.getDataPolicy().withReplication() && attr.getScope() != Scope.GLOBAL) { if (attr.getDataPolicy() == DataPolicy.EMPTY) { @@ -729,7 +729,6 @@ protected void initMessage(CacheOperationMessage msg, DirectReplyProcessor proc) boolean success = RemoteRemoveAllMessage.distribute((EntryEventImpl) event, versionless, versionless.length); if (success) { - versionless = null; RemoveAllEntryData[] versioned = selectVersionedEntries(); if (logger.isTraceEnabled()) { logger.trace("Found these remaining versioned entries: {}", @@ -893,7 +892,7 @@ public void appendFields(StringBuilder sb) { public void doEntryRemove(RemoveAllEntryData entry, DistributedRegion rgn) { @Released EntryEventImpl ev = RemoveAllMessage.createEntryEvent(entry, getSender(), context, rgn, - possibleDuplicate, needsRouting, callbackArg, true, skipCallbacks); + possibleDuplicate, callbackArg, true, skipCallbacks); // rgn.getLogWriterI18n().info(String.format("%s", "RemoveAllMessage.doEntryRemove // sender=" + getSender() + // " event="+ev)); @@ -929,7 +928,7 @@ public void doEntryRemove(RemoveAllEntryData entry, DistributedRegion rgn) { @Retained public static EntryEventImpl createEntryEvent(RemoveAllEntryData entry, InternalDistributedMember sender, ClientProxyMembershipID context, DistributedRegion rgn, - boolean possibleDuplicate, boolean needsRouting, Object callbackArg, boolean originRemote, + boolean possibleDuplicate, Object callbackArg, boolean originRemote, boolean skipCallbacks) { final Object key = entry.getKey(); EventID evId = entry.getEventID(); @@ -943,13 +942,6 @@ public static EntryEventImpl createEntryEvent(RemoveAllEntryData entry, } ev.setPossibleDuplicate(possibleDuplicate); ev.setVersionTag(entry.versionTag); - // if (needsRouting) { - // FilterProfile fp = rgn.getFilterProfile(); - // if (fp != null) { - // FilterInfo fi = fp.getLocalFilterRouting(ev); - // ev.setLocalFilterInfo(fi); - // } - // } if (entry.filterRouting != null) { InternalDistributedMember id = rgn.getMyId(); ev.setLocalFilterInfo(entry.filterRouting.getFilterInfo(id)); @@ -1035,7 +1027,7 @@ public void toData(DataOutput out, if (!hasTags && removeAllData[i].versionTag != null) { hasTags = true; } - VersionTag tag = removeAllData[i].versionTag; + VersionTag> tag = removeAllData[i].versionTag; versionTags.add(tag); removeAllData[i].versionTag = null; removeAllData[i].serializeTo(out, context); @@ -1068,9 +1060,5 @@ public ClientProxyMembershipID getContext() { return context; } - public RemoveAllEntryData[] getRemoveAllEntryData() { - return removeAllData; - } - } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EntriesSet.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EntriesSet.java index 526dd1aaad84..b139205fdf04 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/EntriesSet.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EntriesSet.java @@ -15,6 +15,8 @@ package org.apache.geode.internal.cache; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.AbstractSet; import java.util.ArrayList; import java.util.Collection; @@ -22,6 +24,8 @@ import java.util.List; import java.util.NoSuchElementException; +import org.jetbrains.annotations.NotNull; + import org.apache.geode.cache.EntryDestroyedException; import org.apache.geode.cache.Region; import org.apache.geode.internal.cache.LocalRegion.IteratorType; @@ -105,11 +109,10 @@ private class EntriesIterator implements Iterator { /** reusable KeyInfo */ protected final KeyInfo keyInfo = new KeyInfo(null, null, null); - @SuppressWarnings("unchecked") protected EntriesIterator() { if (recursive) { // FIFO queue of regions - regions = new ArrayList<>(topRegion.subregions(true)); + regions = new ArrayList<>(uncheckedCast(topRegion.subregions(true))); numSubRegions = regions.size(); } else { regions = null; @@ -170,7 +173,7 @@ private Object moveNext() { return result; } } else { - Region.Entry re = (Region.Entry) view.getEntryForIterator(keyInfo, currRgn, + Region.Entry re = (Region.Entry) view.getEntryForIterator(keyInfo, currRgn, rememberReads, allowTombstones); if (re != null) { try { @@ -242,22 +245,18 @@ public int size() { @Override public Object[] toArray() { - return toArray(null); + return toArray(new Object[0]); } @Override - public Object[] toArray(final Object[] array) { + public Object[] toArray(final Object @NotNull [] array) { checkTX(); final ArrayList temp = new ArrayList<>(size()); final Iterator iter = new EntriesIterator(); while (iter.hasNext()) { temp.add(iter.next()); } - if (array == null) { - return temp.toArray(); - } else { - return temp.toArray(array); - } + return temp.toArray(array); } @@ -274,8 +273,4 @@ public void setIgnoreCopyOnReadForQuery(boolean ignoreCopyOnReadForQuery) { this.ignoreCopyOnReadForQuery = ignoreCopyOnReadForQuery; } - public boolean isIgnoreCopyOnReadForQuery() { - return ignoreCopyOnReadForQuery; - } - } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryOperationImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryOperationImpl.java index 8b01055c4869..9fa420ebeccf 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/EntryOperationImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/EntryOperationImpl.java @@ -25,19 +25,19 @@ * @since GemFire 5.8 * */ -public class EntryOperationImpl implements EntryOperation { +public class EntryOperationImpl implements EntryOperation { - private final Region region; + private final Region region; private final Operation operation; - private final Object value; + private final V value; - private final Object key; + private final K key; private final Object callbackArgument; - public EntryOperationImpl(Region region, Operation operation, Object key, Object value, + public EntryOperationImpl(Region region, Operation operation, K key, V value, Object callbackArgument) { this.region = region; this.operation = operation; @@ -53,7 +53,7 @@ public EntryOperationImpl(Region region, Operation operation, Object key, Object * @return the region associated with this object or the region that raised this event. */ @Override - public Region getRegion() { + public Region getRegion() { return region; } @@ -74,7 +74,7 @@ public Operation getOperation() { * @return the key */ @Override - public Object getKey() { + public K getKey() { return key; } @@ -97,7 +97,7 @@ public boolean isCallbackArgumentAvailable() { } @Override - public Object getNewValue() { + public V getNewValue() { return value; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java b/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java index ed52e2419016..75efa6d3b640 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/FilterProfile.java @@ -1743,10 +1743,10 @@ public boolean isLocalProfile() { * @param member whose messages are returned. * @return filter profile messages that are queued for the member. */ - public List getQueuedFilterProfileMsgs(InternalDistributedMember member) { - synchronized (filterProfileMsgQueue) { - if (filterProfileMsgQueue.containsKey(member)) { - return new LinkedList(filterProfileMsgQueue.get(member)); + public List getQueuedFilterProfileMsgs(InternalDistributedMember member) { + synchronized (this.filterProfileMsgQueue) { + if (this.filterProfileMsgQueue.containsKey(member)) { + return new LinkedList<>(this.filterProfileMsgQueue.get(member)); } } return Collections.emptyList(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java index e385581fbf1c..ea21b38d9550 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/GemFireCacheImpl.java @@ -114,6 +114,7 @@ import io.micrometer.core.instrument.MeterRegistry; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.CancelCriterion; import org.apache.geode.CancelException; @@ -403,7 +404,7 @@ public class GemFireCacheImpl implements InternalCache, InternalClientCache, Has private final DistributionManager dm; - private final ConcurrentMap rootRegions; + private final ConcurrentMap rootRegions; /** * True if this cache is being created by a ClientCacheFactory. @@ -2315,7 +2316,7 @@ boolean doClose(String reason, Throwable systemFailureCause, boolean keepAlive, if (isDebugEnabled) { logger.debug("{}: finishing partitioned region close...", this); } - PartitionedRegion.afterRegionsClosedByCacheClose(this); + PartitionedRegion.afterRegionsClosedByCacheClose(); if (prRoot != null) { // do the PR meta root region last prRoot.handleCacheClose(op); @@ -3192,7 +3193,7 @@ public Region getRegion(String path) { public Set getAllRegions() { Set result = new HashSet<>(); - for (Region region : rootRegions.values()) { + for (InternalRegion region : rootRegions.values()) { if (region instanceof PartitionedRegion) { PartitionedRegion partitionedRegion = (PartitionedRegion) region; PartitionedRegionDataStore dataStore = partitionedRegion.getDataStore(); @@ -3203,10 +3204,9 @@ public Set getAllRegions() { result.add(entry.getValue()); } } - } else if (region instanceof InternalRegion) { - InternalRegion internalRegion = (InternalRegion) region; - result.add(internalRegion); - result.addAll(internalRegion.basicSubregions(true)); + } else { + result.add(region); + result.addAll(region.basicSubregions(true)); } } @@ -3217,14 +3217,13 @@ public Set getAllRegions() { public Set getApplicationRegions() { Set result = new HashSet<>(); - for (Object region : rootRegions.values()) { - InternalRegion internalRegion = (InternalRegion) region; - if (internalRegion.isInternalRegion()) { + for (InternalRegion region : rootRegions.values()) { + if (region.isInternalRegion()) { // Skip internal regions continue; } - result.add(internalRegion); - result.addAll(internalRegion.basicSubregions(true)); + result.add(region); + result.addAll(region.basicSubregions(true)); } return result; @@ -3715,7 +3714,7 @@ public CacheServer addCacheServer() { @Override @VisibleForTesting public boolean removeCacheServer(CacheServer cacheServer) { - boolean removed = allCacheServers.remove(cacheServer); + boolean removed = allCacheServers.remove((InternalCacheServer) cacheServer); sendRemoveCacheServerProfileMessage(); return removed; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java index 42f746088a7d..3ab24a8cd78d 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InitialImageOperation.java @@ -358,7 +358,7 @@ GIIStatus getFromOne(Set recipientSet, boolean target // remote_rvv will be filled with the versions of unfinished keys // then if recoveredRVV is still newer than the filled remote_rvv, do fullGII remote_rvv = received_rvv.getCloneForTransmission(); - keysOfUnfinishedOps = processReceivedRVV(remote_rvv, recoveredRVV, received_rvv); + keysOfUnfinishedOps = processReceivedRVV(remote_rvv, recoveredRVV); if (internalAfterCalculatedUnfinishedOps != null && internalAfterCalculatedUnfinishedOps.getRegionName().equals(region.getName())) { internalAfterCalculatedUnfinishedOps.run(); @@ -820,9 +820,13 @@ boolean processChunk(List entries, InternalDistributedMember sender) // only once during GII life cycle & so it does not matter if the HTree ref changes after the // clear // whenever a conflict is detected in DiskRegion it is Ok to abort the operation + Set foundIds; final DiskRegion diskRegion = region.getDiskRegion(); if (diskRegion != null) { diskRegion.setClearCountReference(); + foundIds = Collections.emptySet(); + } else { + foundIds = new HashSet<>(); } try { int entryCount = entries.size(); @@ -979,6 +983,9 @@ record = (tmpValue != Token.TOMBSTONE); } this.entries.initialImagePut(entry.key, lastModified, tmpValue, wasRecovered, false, tag, sender, isSynchronizing); + if (diskRegion == null && tag != null) { + foundIds.add(tag.getMemberID()); + } if (isSynchronizing) { entriesToSynchronize.add(entry); } @@ -998,6 +1005,10 @@ record = (tmpValue != Token.TOMBSTONE); logger.debug("processed these initial image keys: {}", keys); } } + if (diskRegion == null && region.getVersionVector() != null + && !region.getVersionVector().getDepartedMembersSet().isEmpty()) { + region.getVersionVector().removeOldMembers(foundIds); + } if (internalBeforeCleanExpiredTombstones != null && internalBeforeCleanExpiredTombstones.getRegionName().equals(region.getName())) { internalBeforeCleanExpiredTombstones.run(); @@ -1054,29 +1065,19 @@ protected RegionVersionVector getRVVFromProvider(final ClusterDistributionManage * * @param remoteRVV RVV from provider to be filled with unfinished operations * @param localRVV RVV recovered from disk - * @param receivedRVV original RVV from provider to remove departed members * @return set for keys of unfinished operations. */ protected Set processReceivedRVV(RegionVersionVector remoteRVV, - RegionVersionVector localRVV, RegionVersionVector receivedRVV) { + RegionVersionVector localRVV) { if (remoteRVV == null) { return null; } // calculate keys for unfinished ops HashSet keys = new HashSet<>(); - Set departedMemberSet = receivedRVV.getDepartedMembersSet(); boolean isPersistentRegion = region.getDataPolicy().withPersistence(); - Set foundIds; - if (!isPersistentRegion) { - foundIds = new HashSet<>(); - } else { - foundIds = Collections.emptySet(); - } - if ((isPersistentRegion && localRVV.isNewerThanOrCanFillExceptionsFor(remoteRVV)) - || !departedMemberSet.isEmpty()) { + if (isPersistentRegion && localRVV.isNewerThanOrCanFillExceptionsFor(remoteRVV)) { // Only search for unfinished keys when localRVV has something newer // and the region is persistent region. - // Search for departed members if region is not persistent region Iterator it = region.getBestIterator(false); int count = 0; VersionSource myId = region.getVersionMember(); @@ -1087,9 +1088,7 @@ protected Set processReceivedRVV(RegionVersionVector remoteRVV, if (id == null) { id = myId; } - if (!isPersistentRegion) { - foundIds.add(id); - } else if (!remoteRVV.contains(id, stamp.getRegionVersion())) { + if (!remoteRVV.contains(id, stamp.getRegionVersion())) { // found an unfinished operation keys.add(mapEntry.getKey()); remoteRVV.recordVersion(id, stamp.getRegionVersion()); @@ -1112,13 +1111,6 @@ protected Set processReceivedRVV(RegionVersionVector remoteRVV, } } } - if (!departedMemberSet.isEmpty()) { - if (localRVV != null) { - localRVV.removeOldMembers(foundIds); - } - receivedRVV.removeOldMembers(foundIds); - remoteRVV.removeOldMembers(foundIds); - } return keys; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalDataView.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalDataView.java index be9665f64e44..07be30949942 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalDataView.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalDataView.java @@ -73,14 +73,14 @@ void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallbacks, boolean containsValueForKey(KeyInfo keyInfo, LocalRegion localRegion); - Entry getEntry(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones); + Entry getEntry(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones); /** * get entry for the key. Called only on farside. * * @return the entry on the remote data store */ - Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) + Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) throws DataLocationException; boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, @@ -156,7 +156,7 @@ Object getEntryForIterator(KeyInfo key, LocalRegion currRgn, boolean rememberRea Object getKeyForIterator(KeyInfo keyInfo, LocalRegion currRgn, boolean rememberReads, boolean allowTombstones); - Set getAdditionalKeysForIterator(LocalRegion currRgn); + Set getAdditionalKeysForIterator(LocalRegion currRgn); /** * @@ -185,7 +185,7 @@ Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLoc * @param allowTombstones whether to include destroyed entries in the result * @return Set of keys in the given bucket */ - Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones); + Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones); void postPutAll(DistributedPutAllOperation putallOp, VersionedObjectList successfulPuts, InternalRegion reg); @@ -193,7 +193,7 @@ void postPutAll(DistributedPutAllOperation putallOp, VersionedObjectList success void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList successfulOps, InternalRegion reg); - Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion); + Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion); void updateEntryVersion(EntryEventImpl event) throws EntryNotFoundException; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java index a929ad3ac9c7..3fefa0d3d3c0 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/InternalRegion.java @@ -283,7 +283,8 @@ long basicPutPart2(EntryEventImpl event, RegionEntry re, boolean isInitialized, int calculateValueSize(Object v); - void cacheWriteBeforePut(EntryEventImpl event, Set netWriteRecipients, CacheWriter cacheWriter, + void cacheWriteBeforePut(EntryEventImpl event, Set netWriteRecipients, + CacheWriter cacheWriter, boolean requireOldValue, Object expectedOldValue); void updateSizeOnPut(Object key, int oldSize, int newBucketSize); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/KeyInfo.java b/geode-core/src/main/java/org/apache/geode/internal/cache/KeyInfo.java index ee074be9c9f7..7d90bf50f696 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/KeyInfo.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/KeyInfo.java @@ -22,9 +22,6 @@ public class KeyInfo { - // Rahul: This class should actually be renamed as RoutingInfo or BucketIdInfo - // since that is exactly what an instance of this class is. - public static final int UNKNOWN_BUCKET = -1; private Object key; @@ -37,17 +34,18 @@ public class KeyInfo { private final Object value; public KeyInfo(Object key, Object value, Object callbackArg) { - this.key = key; - this.callbackArg = callbackArg; - bucketId = UNKNOWN_BUCKET; - this.value = value; + this(key, value, callbackArg, UNKNOWN_BUCKET); } public KeyInfo(Object key, Object callbackArg, int bucketId) { + this(key, null, callbackArg, bucketId); + } + + private KeyInfo(Object key, Object value, Object callbackArg, int bucketId) { this.key = key; this.callbackArg = callbackArg; this.bucketId = bucketId; - value = null; + this.value = value; } public KeyInfo(KeyInfo keyInfo) { @@ -95,9 +93,6 @@ public String toString() { */ public boolean isCheckPrimary() throws UnsupportedOperationInTransactionException { return true; - // throw new UnsupportedOperationInTransactionException( - // String.format("precommit() operation %s meant for Dist Tx is not supported", - // "isCheckPrimary")); } /* diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java index fc0b2c2c5a1f..9503bf218433 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalDataSet.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -64,58 +66,62 @@ import org.apache.geode.internal.cache.snapshot.RegionSnapshotServiceImpl; import org.apache.geode.logging.internal.log4j.api.LogService; -public class LocalDataSet implements Region, QueryExecutor { +public class LocalDataSet implements Region, QueryExecutor { private static final Logger logger = LogService.getLogger(); private final PartitionedRegion proxy; private final Set buckets; - private InternalRegionFunctionContext rfContext; + private InternalRegionFunctionContext rfContext; - public LocalDataSet(PartitionedRegion pr, int[] buckets) { - proxy = pr; - this.buckets = BucketSetHelper.toSet(buckets); + public LocalDataSet(PartitionedRegion proxy, int[] buckets) { + this(proxy, BucketSetHelper.toSet(buckets)); } - public LocalDataSet(PartitionedRegion pr, Set buckets) { - proxy = pr; + public LocalDataSet(PartitionedRegion proxy, Set buckets) { + this.proxy = proxy; this.buckets = buckets; } @Override - public Set entrySet(boolean recursive) { + public Set> entrySet(boolean recursive) { return proxy.entrySet(getBucketSet()); } @Override - public Set entrySet() { - return entrySet(false); + public Set> entrySet() { + return uncheckedCast(entrySet(false)); } + @SuppressWarnings("unchecked") @Override - public Collection values() { + public Collection values() { proxy.checkReadiness(); return proxy.new ValuesSet(getBucketSet()); } - public Set keys() { - return proxy.keySet(getBucketSet()); + @SuppressWarnings("unchecked") + public Set keys() { + return (Set) proxy.keySet(getBucketSet()); } @Override - public Set keySet() { + public Set keySet() { return keys(); } - public Collection localValues() { + @SuppressWarnings("unchecked") + public Collection localValues() { return new LocalEntriesSet(IteratorType.VALUES); } - public Set localEntrySet() { + @SuppressWarnings("unchecked") + public Set> localEntrySet() { return new LocalEntriesSet(IteratorType.ENTRIES); } - public Set localKeys() { + @SuppressWarnings("unchecked") + public Set> localKeys() { return new LocalEntriesSet(IteratorType.KEYS); } @@ -127,33 +133,33 @@ int getHashKey(Operation op, Object key, Object value, Object callbackArg) { } private boolean isInDataSet(Object key, Object callbackArgument) { - Integer bucketIdInt = getHashKey(Operation.CONTAINS_KEY, key, null, callbackArgument); - return buckets.contains(bucketIdInt); + int bucketId = getHashKey(Operation.CONTAINS_KEY, key, null, callbackArgument); + return buckets.contains(bucketId); } - public InternalRegionFunctionContext getFunctionContext() { + public InternalRegionFunctionContext getFunctionContext() { return rfContext; } - public void setFunctionContext(InternalRegionFunctionContext fContext) { + public void setFunctionContext(InternalRegionFunctionContext fContext) { rfContext = fContext; } @Override - public SelectResults query(String queryPredicate) throws FunctionDomainException, + public SelectResults query(String queryPredicate) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { QueryService qs = getCache().getLocalQueryService(); DefaultQuery query = (DefaultQuery) qs .newQuery("select * from " + getFullPath() + " this where " + queryPredicate); final ExecutionContext executionContext = new QueryExecutionContext(null, getCache(), query); Object[] params = null; - return (SelectResults) executeQuery(query, executionContext, params, getBucketSet()); + return uncheckedCast(executeQuery(query, executionContext, params, getBucketSet())); } @Override public Object selectValue(String queryPredicate) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { - SelectResults result = query(queryPredicate); + SelectResults result = query(queryPredicate); if (result.isEmpty()) { return null; } @@ -177,7 +183,7 @@ public Object selectValue(String queryPredicate) throws FunctionDomainException, @Override public Object executeQuery(DefaultQuery query, final ExecutionContext executionContext, - Object[] parameters, Set buckets) + Object[] parameters, Set buckets) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { long startTime = 0L; @@ -245,21 +251,23 @@ public void create(Object key, Object value, Object callbackArgument) } @Override - public Region createSubregion(String subregionName, RegionAttributes regionAttributes) + public Region createSubregion(String subregionName, + RegionAttributes regionAttributes) throws RegionExistsException, TimeoutException { throw new UnsupportedOperationException(); } @Override - public Object destroy(Object key) + public V destroy(Object key) throws TimeoutException, EntryNotFoundException, CacheWriterException { return destroy(key, null); } + @SuppressWarnings("unchecked") @Override - public Object destroy(Object key, Object callbackArgument) + public V destroy(Object key, Object callbackArgument) throws TimeoutException, EntryNotFoundException, CacheWriterException { - return proxy.destroy(key, callbackArgument); + return (V) proxy.destroy(key, callbackArgument); } @Override @@ -293,13 +301,14 @@ public String getFullPath() { return proxy.getFullPath(); } + @SuppressWarnings("unchecked") @Override - public List getInterestList() throws CacheWriterException { - return proxy.getInterestList(); + public List getInterestList() throws CacheWriterException { + return (List) proxy.getInterestList(); } @Override - public List getInterestListRegex() throws CacheWriterException { + public List getInterestListRegex() throws CacheWriterException { return proxy.getInterestListRegex(); } @@ -308,8 +317,9 @@ public String getName() { return proxy.getName(); } + @SuppressWarnings("unchecked") @Override - public Region getParentRegion() { + public Region getParentRegion() { return proxy.getParentRegion(); } @@ -323,18 +333,21 @@ public CacheStatistics getStatistics() throws StatisticsDisabledException { throw new UnsupportedOperationException(); } + @SuppressWarnings("unchecked") @Override - public Region getSubregion(String path) { + public Region getSubregion(String path) { return proxy.getSubregion(path); } + @SuppressWarnings("unchecked") @Override - public RegionAttributes getAttributes() { + public RegionAttributes getAttributes() { return proxy.getAttributes(); } + @SuppressWarnings("unchecked") @Override - public AttributesMutator getAttributesMutator() { + public AttributesMutator getAttributesMutator() { return proxy.getAttributesMutator(); } @@ -384,7 +397,7 @@ public int[] getDiskDirSizes() { } @Override - public Set subregions(boolean recursive) { + public Set> subregions(boolean recursive) { return proxy.subregions(recursive); } @@ -408,9 +421,10 @@ public void setUserAttribute(Object value) { proxy.setUserAttribute(value); } + @SuppressWarnings("unchecked") @Override - public Object remove(Object key) { - return proxy.remove(key); + public V remove(Object key) { + return (V) proxy.remove(key); } @Override @@ -457,8 +471,9 @@ public void registerInterestRegex(String regex, InterestResultPolicy policy, boo throw new UnsupportedOperationException(); } + @SuppressWarnings("unchecked") @Override - public Set keySetOnServer() { + public Set keySetOnServer() { return proxy.keySetOnServer(); } @@ -479,20 +494,18 @@ public void loadSnapshot(InputStream inputStream) } @Override - public Map getAll(Collection keys) { - return getAll(keys, null); + public Map getAll(Collection keys) { + return getAll(uncheckedCast(keys), null); } @Override - public Map getAll(Collection keys, Object callback) { - HashMap result = new HashMap(); - for (Object key : keys) { + public Map getAll(Collection keys, Object callback) { + Map result = new HashMap<>(); + for (T key : keys) { try { result.put(key, get(key, callback)); } catch (Exception e) { - logger.warn(String.format("The following exception occurred attempting to get key=%s", - key), - e); + logger.warn("The following exception occurred attempting to get key={}", key, e); } } return result; @@ -557,12 +570,12 @@ public Object put(Object key, Object value, Object callbackArgument) } @Override - public void putAll(Map map) { + public void putAll(Map map) { proxy.putAll(map); } @Override - public void putAll(Map map, Object callbackArg) { + public void putAll(Map map, Object callbackArg) { proxy.putAll(map, callbackArg); } @@ -588,8 +601,9 @@ public boolean containsValueForKey(Object key) { } } + @SuppressWarnings("unchecked") @Override - public Entry getEntry(Object key) { + public Entry getEntry(Object key) { if (isInDataSet(key, null)) { return proxy.getEntry(key); } else { @@ -603,15 +617,16 @@ public int size() { } @Override - public Object get(Object key) throws CacheLoaderException, TimeoutException { + public V get(Object key) throws CacheLoaderException, TimeoutException { return get(key, null); } + @SuppressWarnings("unchecked") @Override - public Object get(Object key, Object aCallbackArgument) + public V get(Object key, Object aCallbackArgument) throws TimeoutException, CacheLoaderException { if (isInDataSet(key, aCallbackArgument)) { - return proxy.get(key, aCallbackArgument); + return (V) proxy.get(key, aCallbackArgument); } else { return null; } @@ -657,42 +672,23 @@ public void registerInterestRegex(String regex, InterestResultPolicy policy, boo } - /* - * (non-Javadoc) - * - * @see java.util.concurrent.ConcurrentMap#putIfAbsent(java.lang.Object, java.lang.Object) - */ + @SuppressWarnings("unchecked") @Override - public Object putIfAbsent(Object key, Object value) { - return proxy.putIfAbsent(key, value); + public V putIfAbsent(K key, V value) { + return (V) proxy.putIfAbsent(key, value); } - /* - * (non-Javadoc) - * - * @see java.util.concurrent.ConcurrentMap#remove(java.lang.Object, java.lang.Object) - */ @Override public boolean remove(Object key, Object value) { return proxy.remove(key, value); } - /* - * (non-Javadoc) - * - * @see java.util.concurrent.ConcurrentMap#replace(java.lang.Object, java.lang.Object) - */ + @SuppressWarnings("unchecked") @Override - public Object replace(Object key, Object value) { - return proxy.replace(key, value); + public V replace(K key, V value) { + return (V) proxy.replace(key, value); } - /* - * (non-Javadoc) - * - * @see java.util.concurrent.ConcurrentMap#replace(java.lang.Object, java.lang.Object, - * java.lang.Object) - */ @Override public boolean replace(Object key, Object oldValue, Object newValue) { return proxy.replace(key, oldValue, newValue); @@ -704,8 +700,8 @@ public RegionService getRegionService() { } @Override - public RegionSnapshotService getSnapshotService() { - return new RegionSnapshotServiceImpl(this); + public RegionSnapshotService getSnapshotService() { + return new RegionSnapshotServiceImpl<>(this); } protected class LocalEntriesSet extends EntriesSet { @@ -714,23 +710,19 @@ public LocalEntriesSet(IteratorType type) { super(proxy, false, type, false); } - public LocalEntriesSet() { - this(IteratorType.ENTRIES); - } - @Override - public Iterator iterator() { + public Iterator iterator() { return new LocalEntriesSetIterator(); } protected class LocalEntriesSetIterator implements Iterator { - Iterator curBucketIter = null; + Iterator> curBucketIter = null; Integer curBucketId; List localBuckets = new ArrayList<>(buckets); int index = 0; int localBucketsSize = localBuckets.size(); boolean hasNext = false; - Object next = null; + Object next; LocalEntriesSetIterator() { next = moveNext(); @@ -756,8 +748,7 @@ private Object moveNext() { proxy.checkReadiness(); try { for (;;) { // Loop till we get valid value - while (curBucketIter == null || !(hasNext = curBucketIter.hasNext())) { // Loop all the - // buckets. + while (curBucketIter == null || !(hasNext = curBucketIter.hasNext())) { if (index >= localBucketsSize) { return null; } @@ -768,12 +759,12 @@ private Object moveNext() { "The Bucket region with id " + curBucketId + " is moved/destroyed."); } br.waitForData(); - curBucketIter = br.entrySet().iterator(); + curBucketIter = uncheckedCast(br.entrySet().iterator()); } // Check if there is a valid value. if (hasNext) { - Map.Entry e = (Map.Entry) curBucketIter.next(); + Map.Entry e = curBucketIter.next(); try { if (iterType == IteratorType.VALUES) { if (isKeepSerialized()) { @@ -828,12 +819,12 @@ public int size() { } @Override - public void removeAll(Collection keys) { + public void removeAll(Collection keys) { proxy.removeAll(keys); } @Override - public void removeAll(Collection keys, Object aCallbackArgument) { + public void removeAll(Collection keys, Object aCallbackArgument) { proxy.removeAll(keys, aCallbackArgument); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java index dbc6006088b7..aa569b4a227c 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegion.java @@ -1910,7 +1910,7 @@ public Set basicSubregions(boolean recursive) { } @Override - public Set subregions(boolean recursive) { + public Set> subregions(boolean recursive) { checkReadiness(); return new SubregionsSet(recursive); } @@ -3165,7 +3165,8 @@ private void cacheWriteBeforeRegionClear(RegionEventImpl event) } @Override - public void cacheWriteBeforePut(EntryEventImpl event, Set netWriteRecipients, + public void cacheWriteBeforePut(EntryEventImpl event, + Set netWriteRecipients, CacheWriter localWriter, boolean requireOldValue, Object expectedOldValue) throws CacheWriterException, TimeoutException { Assert.assertTrue(netWriteRecipients == null); @@ -3980,7 +3981,7 @@ public void unregisterInterestRegex(String regex) { } @Override - public List getInterestList() { + public List getInterestList() { ServerRegionProxy proxy = getServerProxy(); if (proxy != null) { return proxy.getInterestList(InterestType.KEY); @@ -3998,14 +3999,15 @@ public List getInterestList() { * @param allowTombstones whether to return destroyed entries * @return a set of the keys matching the given criterion */ - public Set getKeysWithInterest(final @NotNull InterestType interestType, Object interestArg, + public Set getKeysWithInterest(final @NotNull InterestType interestType, + Object interestArg, boolean allowTombstones) { - Set ret; + Set ret; if (interestType == InterestType.REGULAR_EXPRESSION) { if (interestArg == null || ".*".equals(interestArg)) { - ret = new HashSet(keySet(allowTombstones)); + ret = new HashSet<>(keySet(allowTombstones)); } else { - ret = new HashSet(); + ret = new HashSet<>(); // Handle the regex pattern if (!(interestArg instanceof String)) { throw new IllegalArgumentException( diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegionDataView.java b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegionDataView.java index 5f45425f9118..72df40da3e70 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegionDataView.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/LocalRegionDataView.java @@ -32,12 +32,7 @@ * @since GemFire 6.0tx */ public class LocalRegionDataView implements InternalDataView { - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getDeserializedValue(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion, boolean) - */ + @Override public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, @@ -46,12 +41,6 @@ public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boo clientEvent, returnTombstones, retainResult); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#txDestroyExistingEntry(org.apache.geode. - * internal.cache.EntryEventImpl, boolean) - */ @Override public void destroyExistingEntry(EntryEventImpl event, boolean cacheWrite, Object expectedOldValue) { @@ -60,13 +49,6 @@ public void destroyExistingEntry(EntryEventImpl event, boolean cacheWrite, expectedOldValue); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#txInvalidateExistingEntry(org.apache.geode. - * internal.cache.EntryEventImpl, boolean, boolean) - */ @Override public void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry) { @@ -88,68 +70,36 @@ public void updateEntryVersion(EntryEventImpl event) throws EntryNotFoundExcepti } } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#entryCount(org.apache.geode.internal.cache. - * LocalRegion) - */ @Override public int entryCount(LocalRegion localRegion) { return localRegion.getRegionSize(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getValueInVM(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion, boolean) - */ @Override public Object getValueInVM(KeyInfo keyInfo, LocalRegion localRegion, boolean rememberRead) { return localRegion.nonTXbasicGetValueInVM(keyInfo); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#containsKey(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override public boolean containsKey(KeyInfo keyInfo, LocalRegion localRegion) { return localRegion.nonTXContainsKey(keyInfo); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#containsValueForKey(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override public boolean containsValueForKey(KeyInfo keyInfo, LocalRegion localRegion) { return localRegion.nonTXContainsValueForKey(keyInfo); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getEntry(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override - public Entry getEntry(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones) { + public Entry getEntry(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones) { return localRegion.nonTXGetEntry(keyInfo, false, allowTombstones); } @Override - public Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion) { + public Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion) { return localRegion.nonTXGetEntry(keyInfo, true, false); } - @Override public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, @@ -158,12 +108,6 @@ public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, lastModified, overwriteDestroyed); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#putEntry(org.apache.geode.internal.cache. - * EntryEventImpl, boolean, boolean, java.lang.Object, boolean, long, boolean) - */ @Override public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, @@ -172,23 +116,11 @@ public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, lastModified, overwriteDestroyed, invokeCallbacks, throwsConcurrentModification); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#isStatsDeferred() - */ @Override public boolean isDeferredStats() { return false; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#findObject(org.apache.geode.internal.cache. - * LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object) - */ @Override public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate, boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, @@ -198,13 +130,6 @@ public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate, preferCD, requestingClient, clientEvent, returnTombstones); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getEntryForIterator(org.apache.geode.internal. - * cache.LocalRegion, java.lang.Object, boolean) - */ @Override public Region.Entry getEntryForIterator(final KeyInfo keyInfo, final LocalRegion currRgn, boolean rememberReads, boolean allowTombstones) { @@ -215,12 +140,6 @@ public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate, return null; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getKeyForIterator(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion, boolean) - */ @Override public Object getKeyForIterator(final KeyInfo keyInfo, final LocalRegion currRgn, boolean rememberReads, boolean allowTombstones) { @@ -241,25 +160,11 @@ public Object getKeyForIterator(final KeyInfo keyInfo, final LocalRegion currRgn return null; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getAdditionalKeysForIterator(org.apache.geode. - * internal.cache.LocalRegion) - */ @Override - public Set getAdditionalKeysForIterator(LocalRegion currRgn) { + public Set getAdditionalKeysForIterator(LocalRegion currRgn) { return null; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getSerializedValue(org.apache.geode.internal. - * cache.BucketRegion, java.lang.Object, java.lang.Object) - */ @Override public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, @@ -280,37 +185,19 @@ public void destroyOnRemote(EntryEventImpl event, boolean cacheWrite, Object exp destroyExistingEntry(event, cacheWrite, expectedOldValue); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#invalidateOnRemote(org.apache.geode.internal. - * cache.EntryEventImpl, boolean, boolean) - */ @Override public void invalidateOnRemote(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry) throws DataLocationException { invalidateExistingEntry(event, invokeCallbacks, forceNewEntry); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getBucketKeys(int) - */ @Override - public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { + public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { throw new IllegalStateException(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getEntryOnRemote(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override - public Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) + public Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) throws DataLocationException { throw new IllegalStateException(); } @@ -330,13 +217,6 @@ public void checkSupportsRegionClear() throws UnsupportedOperationInTransactionE // do nothing - this view supports it } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getRegionKeysForIteration(org.apache.geode. - * internal.cache.LocalRegion) - */ @Override public Collection getRegionKeysForIteration(LocalRegion currRegion) { // return currRegion.getRegionKeysForIteration(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java index 59a0b84bd31d..1f79cb034f32 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PRQueryProcessor.java @@ -15,6 +15,7 @@ package org.apache.geode.internal.cache; import static java.lang.Integer.getInteger; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.io.DataInput; import java.io.DataOutput; @@ -32,6 +33,7 @@ import java.util.concurrent.TimeoutException; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.InternalGemFireException; import org.apache.geode.annotations.internal.MakeNotStatic; @@ -78,34 +80,37 @@ public class PRQueryProcessor { @MutableForTesting public static int TEST_NUM_THREADS = 0; - private PartitionedRegionDataStore _prds; - private PartitionedRegion pr; + private PartitionedRegionDataStore partitionedRegionDataStore; + private PartitionedRegion partitionedRegion; private final DefaultQuery query; private final Object[] parameters; - private final List _bucketsToQuery; - private final int numBucketsProcessed = 0; + private final List buckets; private volatile ObjectType resultType = null; private boolean isIndexUsedForLocalQuery = false; - public PRQueryProcessor(PartitionedRegionDataStore prDS, DefaultQuery query, Object[] parameters, + public PRQueryProcessor(PartitionedRegionDataStore partitionedRegionDataStore, DefaultQuery query, + Object[] parameters, List buckets) { Assert.assertTrue(!buckets.isEmpty(), "bucket list can not be empty. "); - _prds = prDS; - _bucketsToQuery = buckets; - prDS.partitionedRegion.getCache().getLocalQueryService(); + this.partitionedRegionDataStore = partitionedRegionDataStore; + this.buckets = buckets; this.query = query; this.parameters = parameters; + + partitionedRegionDataStore.partitionedRegion.getCache().getLocalQueryService(); PRQueryExecutor.initializeExecutorService(); } - public PRQueryProcessor(PartitionedRegion pr, DefaultQuery query, Object[] parameters, - List buckets) { + public PRQueryProcessor(PartitionedRegion partitionedRegion, DefaultQuery query, + Object[] parameters, + List buckets) { Assert.assertTrue(!buckets.isEmpty(), "bucket list can not be empty. "); - this.pr = pr; - _bucketsToQuery = buckets; + this.partitionedRegion = partitionedRegion; + this.buckets = buckets; this.query = query; this.parameters = parameters; + PRQueryExecutor.initializeExecutorService(); } @@ -115,76 +120,74 @@ public PRQueryProcessor(PartitionedRegion pr, DefaultQuery query, Object[] param * @return boolean true if the result is a struct type * @throws ForceReattemptException if query should be tried again */ - public boolean executeQuery(Collection resultCollector) + public boolean executeQuery(Collection> resultCollector) throws QueryException, InterruptedException, ForceReattemptException { if (NUM_THREADS > 1 || TEST_NUM_THREADS > 1) { executeWithThreadPool(resultCollector); } else { - executeSequentially(resultCollector, _bucketsToQuery); + executeSequentially(resultCollector, buckets); } return resultType.isStructType(); } - private void executeWithThreadPool(Collection resultCollector) + private void executeWithThreadPool(Collection> resultCollector) throws QueryException, InterruptedException, ForceReattemptException { if (Thread.interrupted()) { throw new InterruptedException(); } - java.util.List callableTasks = buildCallableTaskList(resultCollector); + List callableTasks = buildCallableTaskList(resultCollector); ExecutorService execService = PRQueryExecutor.getExecutorService(); boolean reattemptNeeded = false; ForceReattemptException fre = null; - if (callableTasks != null && !callableTasks.isEmpty()) { - List futures = null; - futures = execService.invokeAll(callableTasks, 300, TimeUnit.SECONDS); - - if (futures != null) { - Iterator itr = futures.iterator(); - while (itr.hasNext() && !execService.isShutdown() && !execService.isTerminated()) { - Future fut = (Future) itr.next(); - QueryTask.BucketQueryResult bqr = null; + if (!callableTasks.isEmpty()) { + List> futures = + execService.invokeAll(callableTasks, 300, TimeUnit.SECONDS); - try { - bqr = (QueryTask.BucketQueryResult) fut.get(BUCKET_QUERY_TIMEOUT, TimeUnit.SECONDS); - bqr.handleAndThrowException(); - if (bqr.retry) { - reattemptNeeded = true; - } + Iterator> itr = futures.iterator(); + while (itr.hasNext() && !execService.isShutdown() && !execService.isTerminated()) { + Future fut = itr.next(); - } catch (TimeoutException e) { + final QueryTask.BucketQueryResult bqr; + try { + bqr = fut.get(BUCKET_QUERY_TIMEOUT, TimeUnit.SECONDS); + bqr.handleAndThrowException(); + if (bqr.retry) { + reattemptNeeded = true; + } + } catch (TimeoutException e) { + throw new InternalGemFireException( + String.format("Timed out while executing query, time exceeded %s", + BUCKET_QUERY_TIMEOUT), + e); + } catch (ExecutionException ee) { + Throwable cause = ee.getCause(); + if (cause instanceof QueryException) { + throw (QueryException) cause; + } else { throw new InternalGemFireException( - String.format("Timed out while executing query, time exceeded %s", - BUCKET_QUERY_TIMEOUT), - e); - } catch (ExecutionException ee) { - Throwable cause = ee.getCause(); - if (cause instanceof QueryException) { - throw (QueryException) cause; - } else { - throw new InternalGemFireException( - "Got unexpected exception while executing query on partitioned region bucket", - cause); - } + "Got unexpected exception while executing query on partitioned region bucket", + cause); } } + } - CompiledSelect cs = query.getSimpleSelect(); + CompiledSelect cs = query.getSimpleSelect(); - if (cs != null && (cs.isOrderBy() || cs.isGroupBy())) { - ExecutionContext context = new QueryExecutionContext(parameters, pr.getCache()); - int limit = query.getLimit(parameters); - Collection mergedResults = coalesceOrderedResults(resultCollector, context, cs, limit); - resultCollector.clear(); - resultCollector.add(mergedResults); - } + if (cs != null && (cs.isOrderBy() || cs.isGroupBy())) { + ExecutionContext context = + new QueryExecutionContext(parameters, partitionedRegion.getCache()); + int limit = query.getLimit(parameters); + Collection mergedResults = coalesceOrderedResults(resultCollector, context, cs, limit); + resultCollector.clear(); + resultCollector.add(mergedResults); } } if (execService == null || execService.isShutdown() || execService.isTerminated()) { - _prds.partitionedRegion.checkReadiness(); + partitionedRegionDataStore.partitionedRegion.checkReadiness(); } if (reattemptNeeded) { @@ -192,20 +195,20 @@ private void executeWithThreadPool(Collection resultCollector) } } - private void executeSequentially(Collection resultCollector, List buckets) - throws QueryException, InterruptedException, ForceReattemptException { + private void executeSequentially(Collection> resultCollector, List buckets) + throws QueryException, ForceReattemptException { ExecutionContext context = - new QueryExecutionContext(parameters, pr.getCache(), query); + new QueryExecutionContext(parameters, partitionedRegion.getCache(), query); CompiledSelect cs = query.getSimpleSelect(); int limit = query.getLimit(parameters); if (cs != null && cs.isOrderBy()) { - for (Integer bucketID : _bucketsToQuery) { + for (Integer bucketID : buckets) { List singleBucket = Collections.singletonList(bucketID); context.setBucketList(singleBucket); executeQueryOnBuckets(resultCollector, context); } - Collection mergedResults = coalesceOrderedResults(resultCollector, context, cs, limit); + Collection mergedResults = coalesceOrderedResults(resultCollector, context, cs, limit); resultCollector.clear(); resultCollector.add(mergedResults); @@ -215,22 +218,14 @@ private void executeSequentially(Collection resultCollector, List bu } } - private Collection coalesceOrderedResults(Collection results, + private Collection coalesceOrderedResults(Collection> results, ExecutionContext context, CompiledSelect cs, int limit) { - List sortedResults = new ArrayList<>(results.size()); - // TODO :Asif : Deal with UNDEFINED - for (Object o : results) { - if (o instanceof Collection) { - sortedResults.add((Collection) o); - } - } - - return new NWayMergeResults(sortedResults, cs.isDistinct(), limit, cs.getOrderByAttrs(), + List> sortedResults = new ArrayList<>(uncheckedCast(results)); + return new NWayMergeResults<>(sortedResults, cs.isDistinct(), limit, cs.getOrderByAttrs(), context, cs.getElementTypeForOrderByQueries()); - } - private void executeQueryOnBuckets(Collection resultCollector, + private void executeQueryOnBuckets(Collection> resultCollector, ExecutionContext context) throws ForceReattemptException, QueryException { // Check if QueryMonitor is enabled, if so add query to be monitored. @@ -248,8 +243,8 @@ private void executeQueryOnBuckets(Collection resultCollector, Object results = query.executeUsingContext(context); synchronized (resultCollector) { - resultType = ((SelectResults) results).getCollectionType().getElementType(); - resultCollector.add((Collection) results); + resultType = ((SelectResults) results).getCollectionType().getElementType(); + resultCollector.add((Collection) results); } isIndexUsedForLocalQuery = ((QueryExecutionContext) context).isIndexUsed(); @@ -264,7 +259,7 @@ private void executeQueryOnBuckets(Collection resultCollector, "The Region on which query is executed may have been destroyed." + rde.getMessage(), rde); } catch (QueryException qe) { // Check if PR is locally destroyed. - if (pr.isLocallyDestroyed || pr.isClosed) { + if (partitionedRegion.isLocallyDestroyed || partitionedRegion.isClosed) { throw new ForceReattemptException( "Local Partition Region or the targeted bucket has been moved"); } @@ -276,10 +271,10 @@ private void executeQueryOnBuckets(Collection resultCollector, } } - private List buildCallableTaskList(Collection resultsColl) { + private @NotNull List buildCallableTaskList(Collection> resultsColl) { List callableTasks = new ArrayList<>(); - for (Integer bId : _bucketsToQuery) { - callableTasks.add(new QueryTask(query, parameters, _prds, bId, resultsColl)); + for (Integer bId : buckets) { + callableTasks.add(new QueryTask(query, parameters, bId, resultsColl)); } return callableTasks; } @@ -307,7 +302,7 @@ static class PRQueryExecutor { /** * Closes the executor service. This is called from - * {@link PartitionedRegion#afterRegionsClosedByCacheClose(InternalCache)} + * {@link PartitionedRegion#afterRegionsClosedByCacheClose()} */ static synchronized void shutdown() { if (execService != null) { @@ -392,35 +387,32 @@ public KnownVersion[] getSerializationVersions() { * */ @SuppressWarnings("synthetic-access") - private class QueryTask implements Callable { + private class QueryTask implements Callable { private final DefaultQuery query; private final Object[] parameters; - private final PartitionedRegionDataStore _prDs; - private final Integer _bucketId; - private final Collection resultColl; + private final Integer bucketId; + private final Collection> results; - public QueryTask(DefaultQuery query, Object[] parameters, PartitionedRegionDataStore prDS, - Integer bucketId, final Collection rColl) { + public QueryTask(DefaultQuery query, Object[] parameters, Integer bucketId, + final Collection> results) { this.query = query; - _prDs = prDS; - _bucketId = bucketId; - resultColl = rColl; + this.bucketId = bucketId; + this.results = results; this.parameters = parameters; } @Override - public Object call() throws Exception { - BucketQueryResult bukResult = new BucketQueryResult(_bucketId); + public BucketQueryResult call() throws Exception { + BucketQueryResult bukResult = new BucketQueryResult(bucketId); try { - List bucketList = Collections.singletonList(_bucketId); + List bucketList = Collections.singletonList(bucketId); ExecutionContext context = - new QueryExecutionContext(parameters, pr.getCache(), query); + new QueryExecutionContext(parameters, partitionedRegion.getCache(), query); context.setBucketList(bucketList); - executeQueryOnBuckets(resultColl, context); + executeQueryOnBuckets(results, context); } catch (ForceReattemptException | QueryException | CacheRuntimeException fre) { bukResult.setException(fre); } - // Exception return bukResult; } @@ -430,40 +422,40 @@ public Object call() throws Exception { */ private class BucketQueryResult { - private final int _buk; - private Exception _ex = null; + private final int bucketId; + private Exception exception = null; public boolean retry = false; public BucketQueryResult(int bukId) { - _buk = bukId; + bucketId = bukId; } public Exception getException() { - return _ex; + return exception; } public boolean exceptionOccurred() { - return _ex != null; + return exception != null; } public void setException(Exception e) { - _ex = e; + exception = e; } public Integer getBucketId() { - return _buk; + return bucketId; } public boolean isReattemptNeeded() { - return _ex instanceof ForceReattemptException; + return exception instanceof ForceReattemptException; } public void handleAndThrowException() throws QueryException { - if (_ex != null) { - if (_ex instanceof QueryException) { - throw (QueryException) _ex; - } else if (_ex instanceof CacheRuntimeException) { - throw (CacheRuntimeException) _ex; + if (exception != null) { + if (exception instanceof QueryException) { + throw (QueryException) exception; + } else if (exception instanceof CacheRuntimeException) { + throw (CacheRuntimeException) exception; } } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java index 03dd3de0271f..8f836de27f3f 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegion.java @@ -14,9 +14,13 @@ */ package org.apache.geode.internal.cache; +import static java.lang.String.format; import static java.lang.System.lineSeparator; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptySet; import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.toSet; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.io.IOException; import java.io.InputStream; @@ -27,7 +31,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Hashtable; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -106,7 +109,6 @@ import org.apache.geode.cache.partition.PartitionNotAvailableException; import org.apache.geode.cache.partition.PartitionRegionHelper; import org.apache.geode.cache.persistence.PartitionOfflineException; -import org.apache.geode.cache.persistence.PersistentID; import org.apache.geode.cache.query.FunctionDomainException; import org.apache.geode.cache.query.Index; import org.apache.geode.cache.query.IndexCreationException; @@ -120,7 +122,6 @@ import org.apache.geode.cache.query.QueryInvocationTargetException; import org.apache.geode.cache.query.SelectResults; import org.apache.geode.cache.query.TypeMismatchException; -import org.apache.geode.cache.query.internal.Bag; import org.apache.geode.cache.query.internal.CompiledSelect; import org.apache.geode.cache.query.internal.DefaultQuery; import org.apache.geode.cache.query.internal.ExecutionContext; @@ -180,7 +181,6 @@ import org.apache.geode.internal.cache.execute.PartitionedRegionFunctionResultWaiter; import org.apache.geode.internal.cache.execute.RegionFunctionContextImpl; import org.apache.geode.internal.cache.execute.ServerToClientFunctionResultSender; -import org.apache.geode.internal.cache.ha.ThreadIdentifier; import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage; import org.apache.geode.internal.cache.partitioned.ContainsKeyValueMessage.ContainsKeyValueResponse; import org.apache.geode.internal.cache.partitioned.DestroyMessage; @@ -228,6 +228,8 @@ import org.apache.geode.internal.cache.partitioned.colocation.ColocationLogger; import org.apache.geode.internal.cache.partitioned.colocation.ColocationLoggerFactory; import org.apache.geode.internal.cache.persistence.PRPersistentConfig; +import org.apache.geode.internal.cache.persistence.PersistentMemberID; +import org.apache.geode.internal.cache.persistence.PersistentMemberPattern; import org.apache.geode.internal.cache.tier.InterestType; import org.apache.geode.internal.cache.tier.sockets.BaseCommand; import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID; @@ -235,6 +237,7 @@ import org.apache.geode.internal.cache.tier.sockets.VersionedObjectList; import org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException; import org.apache.geode.internal.cache.versions.RegionVersionVector; +import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionStamp; import org.apache.geode.internal.cache.versions.VersionTag; import org.apache.geode.internal.cache.wan.AbstractGatewaySender; @@ -261,6 +264,7 @@ * configurable level (for high availability) and placed on multiple VMs for improved performance * and increased storage capacity. */ +@SuppressWarnings({"unchecked", "deprecation"}) public class PartitionedRegion extends LocalRegion implements CacheDistributionAdvisee, QueryExecutor { @@ -305,20 +309,6 @@ public class PartitionedRegion extends LocalRegion @MutableForTesting public static boolean BEFORE_CALCULATE_STARTING_BUCKET_FLAG = false; - /** - * Thread specific random number - */ - private static final ThreadLocal threadRandom = new ThreadLocal() { - @Override - protected Object initialValue() { - int i = RANDOM.nextInt(); - if (i < 0) { - i = -1 * i; - } - return i; - } - }; - /** * Global Region for storing PR config ( PRName->PRConfig). This region would be used to resolve * PR name conflict.* @@ -433,11 +423,13 @@ protected Object initialValue() { private final AdvisorListener advisorListener = new AdvisorListener(); /* - * Map containing or Index>. IndexTask represents an index thats - * completely created or one thats in create phase. This is done in order to avoid synchronization + * Map containing | Index>. IndexTask represents an + * index that's + * completely created or one that's in create phase. This is done in order to avoid + * synchronization * on the indexes. */ - private final ConcurrentMap indexes = new ConcurrentHashMap(); + private final ConcurrentMap indexes = new ConcurrentHashMap<>(); private volatile boolean recoveredFromDisk; @@ -503,12 +495,8 @@ public static PRIdMap getPrIdToPR() { * Byte 0 = no NWHOP Byte 1 = NWHOP to servers in same server-grp Byte 2 = NWHOP tp servers in * other server-grp */ - private static final ThreadLocal networkHopType = new ThreadLocal() { - @Override - protected Byte initialValue() { - return (byte) NETWORK_HOP_NONE; - } - }; + private static final ThreadLocal networkHopType = + ThreadLocal.withInitial(() -> (byte) NETWORK_HOP_NONE); public void clearNetworkHopData() { networkHopType.remove(); @@ -529,12 +517,8 @@ public byte getNetworkHopType() { return networkHopType.get(); } - private static final ThreadLocal metadataVersion = new ThreadLocal() { - @Override - protected Byte initialValue() { - return ClientMetadataService.INITIAL_VERSION; - } - }; + private static final ThreadLocal metadataVersion = + ThreadLocal.withInitial(() -> (byte) ClientMetadataService.INITIAL_VERSION); private void setMetadataVersion(Byte value) { metadataVersion.set(value); @@ -634,7 +618,7 @@ public T computeWithPrimaryLocked(Object key, Callable callable) throws E } - public static class PRIdMap extends HashMap { + public static class PRIdMap extends HashMap { private static final long serialVersionUID = 3667357372967498179L; public static final String DESTROYED = "Partitioned Region Destroyed"; @@ -653,23 +637,21 @@ public Object get(Object key) { } public Object getRegion(Object key) throws PRLocallyDestroyedException { - Assert.assertTrue(key instanceof Integer); - Object o = super.get(key); if (o == DESTROYED) { throw new RegionDestroyedException( - String.format("Region for prId= %s is destroyed", + format("Region for prId= %s is destroyed", key), NO_PATH_FOUND); } if (o == LOCALLY_DESTROYED) { throw new PRLocallyDestroyedException( - String.format("Region with prId= %s is locally destroyed on this node", + format("Region with prId= %s is locally destroyed on this node", key)); } if (o == FAILED_REGISTRATION) { throw new PRLocallyDestroyedException( - String.format("Region with prId= %s failed initialization on this node", + format("Region with prId= %s failed initialization on this node", key)); } return o; @@ -677,15 +659,15 @@ public Object getRegion(Object key) throws PRLocallyDestroyedException { @Override public Object remove(final Object key) { - return put(key, DESTROYED, true); + return put((Integer) key, DESTROYED, true); } @Override - public Object put(final Object key, final Object value) { + public Object put(final Integer key, final Object value) { return put(key, value, true); } - public Object put(final Object key, final Object value, boolean sendIdentityRequestMessage) { + public Object put(final Integer key, final Object value, boolean sendIdentityRequestMessage) { if (cleared) { cleared = false; } @@ -698,13 +680,12 @@ public Object put(final Object key, final Object value, boolean sendIdentityRequ throw new NullPointerException( "null value not allowed for prIdToPR Map"); } - Assert.assertTrue(key instanceof Integer); if (sendIdentityRequestMessage) { - IdentityRequestMessage.setLatestId((Integer) key); + IdentityRequestMessage.setLatestId(key); } if ((super.get(key) == DESTROYED) && (value instanceof PartitionedRegion)) { throw new PartitionedRegionException( - String.format("Can NOT reuse old Partitioned Region Id= %s", + format("Can NOT reuse old Partitioned Region Id= %s", key)); } return super.put(key, value); @@ -718,10 +699,10 @@ public void clear() { public synchronized String dump() { StringBuilder sb = new StringBuilder("prIdToPR Map@"); - sb.append(System.identityHashCode(prIdToPR)).append(':').append(lineSeparator()); - Map.Entry mapEntry; - for (Iterator iterator = prIdToPR.entrySet().iterator(); iterator.hasNext();) { - mapEntry = (Map.Entry) iterator.next(); + sb.append(System.identityHashCode(this)).append(':').append(lineSeparator()); + for (final Iterator> iterator = entrySet().iterator(); iterator + .hasNext();) { + final Entry mapEntry = iterator.next(); sb.append(mapEntry.getKey()).append("=>").append(mapEntry.getValue()); if (iterator.hasNext()) { sb.append(lineSeparator()); @@ -758,8 +739,7 @@ public synchronized String dump() { private byte fixedPASet; - private final List colocatedByList = - new CopyOnWriteArrayList<>(); + private final List colocatedByList = new CopyOnWriteArrayList<>(); private final PartitionListener[] partitionListeners; @@ -782,7 +762,7 @@ public interface RegionAdvisorFactory { * and also by invoking Cache.createRegion(). (Cache.xml etc to be added) */ public PartitionedRegion(String regionName, - RegionAttributes regionAttributes, + RegionAttributes regionAttributes, LocalRegion parentRegion, InternalCache cache, InternalRegionArguments internalRegionArgs, @@ -959,7 +939,7 @@ public AbstractGatewaySender getParallelGatewaySender() { public Set getParallelGatewaySenderIds() { Set regionGatewaySenderIds = getAllGatewaySenderIds(); if (regionGatewaySenderIds.isEmpty()) { - return Collections.emptySet(); + return emptySet(); } Set cacheGatewaySenders = getCache().getAllGatewaySenders(); Set parallelGatewaySenderIds = new HashSet<>(); @@ -996,7 +976,7 @@ && supportsConcurrencyChecks()) { Object[] prms = new Object[] {getFullPath(), getTotalNumberOfBuckets(), config.getTotalNumBuckets()}; throw new IllegalStateException( - String.format( + format( "For partition region %s,total-num-buckets %s should not be changed. Previous configured number is %s.", prms)); } @@ -1007,13 +987,13 @@ && supportsConcurrencyChecks()) { Object[] prms = new Object[] {getFullPath(), colocatedWith, config.getColocatedWith()}; DiskAccessException dae = new DiskAccessException( - String.format( + format( "A DiskAccessException has occurred while writing to the disk for region %s. The region will be closed.", getFullPath()), null, diskStore); diskStore.handleDiskAccessException(dae); throw new IllegalStateException( - String.format( + format( "For partition region %s, cannot change colocated-with to \"%s\" because there is persistent data with different colocation. Previous configured value is \"%s\".", prms)); } @@ -1165,15 +1145,14 @@ private void markRecoveredRecursively(PartitionedRegion region) { @Override public void postCreateRegion() { super.postCreateRegion(); - CacheListener[] listeners = fetchCacheListenersField(); + CacheListener[] listeners = fetchCacheListenersField(); if (listeners != null && listeners.length > 0) { - Set others = getRegionAdvisor().adviseGeneric(); - for (final CacheListener listener : listeners) { + Set others = getRegionAdvisor().adviseGeneric(); + for (final CacheListener listener : listeners) { if (listener instanceof RegionMembershipListener) { - RegionMembershipListener rml = (RegionMembershipListener) listener; + RegionMembershipListener rml = (RegionMembershipListener) listener; try { - DistributedMember[] otherDms = new DistributedMember[others.size()]; - others.toArray(otherDms); + final DistributedMember[] otherDms = others.toArray(new DistributedMember[0]); rml.initialMembers(this, otherDms); } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); @@ -1293,9 +1272,8 @@ public void addGatewaySenderId(String gatewaySenderId) { public void updatePRConfigWithNewSetOfAsynchronousEventDispatchers( Set asynchronousEventDispatchers) { - updatePartitionRegionConfig(prConfig -> { - prConfig.setGatewaySenderIds(asynchronousEventDispatchers); - }); + updatePartitionRegionConfig( + prConfig -> prConfig.setGatewaySenderIds(asynchronousEventDispatchers)); } public void updatePRConfigWithNewGatewaySenderAfterAssigningBuckets(String aeqId) { @@ -1388,7 +1366,7 @@ public void remoteRegionInitialized(CacheProfile profile) { * * @param ra Region attributes */ - private void initializeDataStore(RegionAttributes ra) { + private void initializeDataStore(RegionAttributes ra) { setDataStore( PartitionedRegionDataStore.createDataStore(cache, this, ra.getPartitionAttributes(), getStatisticsClock())); @@ -1414,8 +1392,7 @@ protected DistributedLockService getPartitionedRegionLockService() { private void registerPartitionedRegion(boolean storesData) { // Register this ParitionedRegion. First check if the ParitionedRegion // entry already exists globally. - PartitionRegionConfig prConfig = null; - PartitionAttributes prAttribs = getAttributes().getPartitionAttributes(); + final PartitionAttributes prAttribs = getAttributes().getPartitionAttributes(); if (storesData) { if (fixedPAttrs != null) { node.setPRType(Node.FIXED_PR_DATASTORE); @@ -1440,7 +1417,7 @@ private void registerPartitionedRegion(boolean storesData) { rl.lock(); checkReadiness(); - prConfig = getPRRoot().get(getRegionIdentifier()); + PartitionRegionConfig prConfig = getPRRoot().get(getRegionIdentifier()); if (prConfig == null) { validateParallelAsynchronousEventDispatcherIds(); @@ -1520,7 +1497,7 @@ prAttribs, getScope(), getAttributes().getEvictionAttributes(), // is still usable: SystemFailure.checkFailure(); String registerErrMsg = - String.format( + format( "An exception was caught while registering PartitionedRegion %s. dumpPRId: %s", getFullPath(), prIdToPR.dump()); try { @@ -1616,11 +1593,11 @@ public void validateParallelAsynchronousEventDispatcherIds( // partitioned region. if (getDataPolicy().withPersistence()) { if ((sender != null) && (!sender.isPersistenceEnabled())) { - throw new GatewaySenderConfigurationException(String.format( + throw new GatewaySenderConfigurationException(format( "Non persistent gateway sender %s can not be attached to persistent region %s", dispatcherId, getFullPath())); } else if ((asyncEventQueue != null) && (!asyncEventQueue.isPersistent())) { - throw new AsyncEventQueueConfigurationException(String.format( + throw new AsyncEventQueueConfigurationException(format( "Non persistent asynchronous event queue %s can not be attached to persistent region %s", dispatcherId, getFullPath())); } @@ -1646,7 +1623,7 @@ public void validateParallelAsynchronousEventDispatcherIds( if (colocationMap.containsValue(leader)) { continue; } else { - throw new IllegalStateException(String.format( + throw new IllegalStateException(format( "Non colocated regions %s, %s cannot have the same parallel %s id %s configured.", getFullPath(), config.getFullPath(), (asyncEventQueue != null ? "async event queue" : "gateway sender"), @@ -1654,7 +1631,7 @@ public void validateParallelAsynchronousEventDispatcherIds( } } } else { - throw new IllegalStateException(String.format( + throw new IllegalStateException(format( "Non colocated regions %s, %s cannot have the same parallel %s id %s configured.", getFullPath(), config.getFullPath(), (asyncEventQueue != null ? "async event queue" : "gateway sender"), dispatcherId)); @@ -1689,11 +1666,12 @@ void setRecoveredFromDisk() { */ public void checkPROffline() throws PartitionOfflineException { if (getDataPolicy().withPersistence() && !recoveredFromDisk) { - Set persistIds = - new HashSet(getRegionAdvisor().advisePersistentMembers().values()); + final Set persistIds = + new HashSet<>(getRegionAdvisor().advisePersistentMembers().values()); persistIds.removeAll(getRegionAdvisor().adviseInitializedPersistentMembers().values()); - throw new PartitionOfflineException(persistIds, - String.format("Partitioned Region %s is offline due to unrecovered persistent data, %s", + throw new PartitionOfflineException( + persistIds.stream().map(PersistentMemberPattern::new).collect(toSet()), + format("Partitioned Region %s is offline due to unrecovered persistent data, %s", getFullPath(), persistIds)); } } @@ -1748,7 +1726,7 @@ void executeColocationCallbacks() { * @param allowTombstones - whether a tombstone can be returned */ @Override - protected Region.Entry nonTXGetEntry(KeyInfo keyInfo, boolean access, + protected Region.Entry nonTXGetEntry(KeyInfo keyInfo, boolean access, boolean allowTombstones) { final long startTime = prStats.getTime(); final Object key = keyInfo.getKey(); @@ -1772,7 +1750,6 @@ protected EntrySnapshot getEntryInBucket(final DistributedMember targetNode, fin logger.trace("getEntryInBucket: " + "Key key={} ({}) from: {} bucketId={}", key, key.hashCode(), targetNode, bucketStringForLogs(bucketId)); } - EntrySnapshot ret = null; int count = 0; RetryTimeKeeper retryTime = null; InternalDistributedMember retryNode = (InternalDistributedMember) targetNode; @@ -1797,10 +1774,11 @@ protected EntrySnapshot getEntryInBucket(final DistributedMember targetNode, fin } try { final boolean loc = (localMaxMemory > 0) && retryNode.equals(getMyId()); + final EntrySnapshot ret; if (loc) { ret = dataStore.getEntryLocally(bucketId, key, access, allowTombstones); } else { - ret = getEntryRemotely(retryNode, bucketId, key, access, allowTombstones); + ret = getEntryRemotely(retryNode, key, access, allowTombstones); // TODO:Suranjan&Yogesh : there should be better way than this one String name = Thread.currentThread().getName(); if (name.startsWith("ServerConnection") && !getMyId().equals(targetNode)) { @@ -1860,10 +1838,10 @@ protected EntrySnapshot getEntryInBucket(final DistributedMember targetNode, fin PartitionedRegionDistributionException e = null; // Fix for bug 36014 if (logger.isDebugEnabled()) { e = new PartitionedRegionDistributionException( - String.format("No VM available for getEntry in %s attempts", + format("No VM available for getEntry in %s attempts", count)); } - logger.warn(String.format("No VM available for getEntry in %s attempts", count), e); + logger.warn(format("No VM available for getEntry in %s attempts", count), e); return null; } @@ -1881,7 +1859,6 @@ private void checkShutdown() { * Checks if a key is contained remotely. * * @param targetNode the node where bucket region for the key exists. - * @param bucketId the bucket id for the key. * @param key the key, whose value needs to be checks * @param access true if caller wants last access time updated * @param allowTombstones whether tombstones should be returned @@ -1889,7 +1866,7 @@ private void checkShutdown() { * @throws ForceReattemptException if the peer is no longer available * @return true if the passed key is contained remotely. */ - public EntrySnapshot getEntryRemotely(InternalDistributedMember targetNode, Integer bucketId, + public EntrySnapshot getEntryRemotely(InternalDistributedMember targetNode, Object key, boolean access, boolean allowTombstones) throws EntryNotFoundException, PrimaryBucketException, ForceReattemptException { FetchEntryResponse r = FetchEntryMessage.send(targetNode, this, key, access); @@ -1984,14 +1961,6 @@ public void resetCounts() { } } - /** - * @since GemFire 5.0 - * @throws UnsupportedOperationException OVERRIDES - */ - public Region getSubregion() { - throw new UnsupportedOperationException(); - } - /** * @since GemFire 5.0 * @throws UnsupportedOperationException OVERRIDES @@ -2053,7 +2022,7 @@ public void localInvalidateRegion(Object aCallbackArgument) { public Object executeQuery(final DefaultQuery query, final ExecutionContext executionContext, final Object[] parameters, - final Set buckets) + final Set buckets) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException { for (;;) { @@ -2073,7 +2042,7 @@ public Object executeQuery(final DefaultQuery query, private Object doExecuteQuery(final DefaultQuery query, final ExecutionContext executionContext, final Object[] parameters, - final Set buckets) + final Set buckets) throws FunctionDomainException, TypeMismatchException, NameResolutionException, QueryInvocationTargetException, ForceReattemptException { if (logger.isDebugEnabled()) { @@ -2083,15 +2052,15 @@ private Object doExecuteQuery(final DefaultQuery query, HashSet allBuckets = new HashSet<>(); if (buckets == null) { // remote buckets - final Iterator remoteIter = getRegionAdvisor().getBucketSet().iterator(); + final Iterator remoteIter = getRegionAdvisor().getBucketSet().iterator(); try { while (remoteIter.hasNext()) { - allBuckets.add((Integer) remoteIter.next()); + allBuckets.add(remoteIter.next()); } } catch (NoSuchElementException ignore) { } } else { // local buckets - Iterator localIter = null; + final Iterator localIter; if (dataStore != null) { localIter = buckets.iterator(); } else { @@ -2099,7 +2068,7 @@ private Object doExecuteQuery(final DefaultQuery query, } try { while (localIter.hasNext()) { - allBuckets.add((Integer) localIter.next()); + allBuckets.add(localIter.next()); } } catch (NoSuchElementException ignore) { } @@ -2123,7 +2092,7 @@ private Object doExecuteQuery(final DefaultQuery query, // this can return a BAG even if it's a DISTINCT select expression, // since the expectation is that the duplicates will be removed at the end - SelectResults results = selectExpr.getEmptyResultSet(parameters, getCache(), query); + SelectResults results = selectExpr.getEmptyResultSet(parameters, getCache(), query); PartitionedRegionQueryEvaluator prqe = new PartitionedRegionQueryEvaluator(getSystem(), this, query, executionContext, parameters, results, allBuckets); @@ -2135,17 +2104,12 @@ private Object doExecuteQuery(final DefaultQuery query, break; } catch (InterruptedException ignore) { interrupted = true; - } catch (FunctionDomainException e) { - throw e; - } catch (TypeMismatchException e) { - throw e; - } catch (NameResolutionException e) { - throw e; - } catch (QueryInvocationTargetException e) { + } catch (FunctionDomainException | QueryInvocationTargetException | NameResolutionException + | TypeMismatchException e) { throw e; } catch (QueryException qe) { throw new QueryInvocationTargetException( - String.format("Unexpected query exception occurred during query execution %s", + format("Unexpected query exception occurred during query execution %s", qe.getMessage()), qe); } finally { @@ -2177,9 +2141,9 @@ private Object doExecuteQuery(final DefaultQuery query, } if (selectExpr.isCount() && (results.isEmpty() || selectExpr.isDistinct())) { // Constructor with elementType not visible. - SelectResults resultCount = new ResultsBag(getCachePerfStats()); + ResultsBag resultCount = new ResultsBag(getCachePerfStats()); resultCount.setElementType(new ObjectTypeImpl(Integer.class)); - ((Bag) resultCount).addAndGetOccurence(results.size()); + resultCount.addAndGetOccurence(results.size()); return resultCount; } } @@ -2401,18 +2365,17 @@ public long postPutAllSend(DistributedPutAllOperation putAllOp, final long startTime = prStats.getTime(); // build all the msgs by bucketid - HashMap prMsgMap = putAllOp.createPRMessages(); + HashMap prMsgMap = putAllOp.createPRMessages(); PutAllPartialResult partialKeys = new PutAllPartialResult(putAllOp.putAllDataSize); // clear the successfulPuts list since we're actually doing the puts here // and the basicPutAll work was just a way to build the DPAO object - Map keyToVersionMap = + Map>> keyToVersionMap = new HashMap<>(successfulPuts.size()); successfulPuts.clearVersions(); - for (final Object value : prMsgMap.entrySet()) { - Map.Entry mapEntry = (Map.Entry) value; - Integer bucketId = (Integer) mapEntry.getKey(); - PutAllPRMessage prMsg = (PutAllPRMessage) mapEntry.getValue(); + for (final Map.Entry mapEntry : prMsgMap.entrySet()) { + final Integer bucketId = mapEntry.getKey(); + final PutAllPRMessage prMsg = mapEntry.getValue(); checkReadiness(); long then = 0; if (isDebugEnabled) { @@ -2424,7 +2387,7 @@ public long postPutAllSend(DistributedPutAllOperation putAllOp, partialKeys.addKeysAndVersions(versions); versions.saveVersions(keyToVersionMap); } else if (!getConcurrencyChecksEnabled()) { // no keys returned if not versioned - Set keys = prMsg.getKeys(); + Set keys = prMsg.getKeys(); partialKeys.addKeys(keys); } } catch (PutAllPartialResultException pre) { @@ -2497,7 +2460,8 @@ public long postRemoveAllSend(DistributedRemoveAllOperation op, // clear the successfulOps list since we're actually doing the removes here // and the basicRemoveAll work was just a way to build the "op" object - Map keyToVersionMap = new HashMap<>(successfulOps.size()); + Map>> keyToVersionMap = + new HashMap<>(successfulOps.size()); successfulOps.clearVersions(); for (final Map.Entry mapEntry : prMsgMap.entrySet()) { Integer bucketId = mapEntry.getKey(); @@ -2513,7 +2477,7 @@ public long postRemoveAllSend(DistributedRemoveAllOperation op, partialKeys.addKeysAndVersions(versions); versions.saveVersions(keyToVersionMap); } else if (!getConcurrencyChecksEnabled()) { // no keys returned if not versioned - Set keys = prMsg.getKeys(); + Set keys = prMsg.getKeys(); partialKeys.addKeys(keys); } } catch (PutAllPartialResultException pre) { @@ -2852,7 +2816,7 @@ private VersionedObjectList sendMsgByBucket(final Integer bucketId, RemoveAllPRM public VersionedObjectList tryToSendOnePutAllMessage(PutAllPRMessage prMsg, InternalDistributedMember currentTarget) throws DataLocationException { - boolean putResult = false; + final boolean putResult; VersionedObjectList versions = null; final boolean isLocal = (localMaxMemory > 0) && currentTarget.equals(getMyId()); if (isLocal) { // local @@ -2865,11 +2829,10 @@ public VersionedObjectList tryToSendOnePutAllMessage(PutAllPRMessage prMsg, } else { PutAllPRMessage.PutAllResponse response = (PutAllPRMessage.PutAllResponse) prMsg.send(currentTarget, this); - PutAllPRMessage.PutAllResult pr = null; if (response != null) { prStats.incPartitionMessagesSent(); try { - pr = response.waitForResult(); + final PutAllPRMessage.PutAllResult pr = response.waitForResult(); putResult = pr.returnValue; versions = pr.versions; } catch (RegionDestroyedException rde) { @@ -2899,7 +2862,7 @@ public VersionedObjectList tryToSendOnePutAllMessage(PutAllPRMessage prMsg, public VersionedObjectList tryToSendOneRemoveAllMessage(RemoveAllPRMessage prMsg, InternalDistributedMember currentTarget) throws DataLocationException { - boolean putResult = false; + final boolean putResult; VersionedObjectList versions = null; final boolean isLocal = (localMaxMemory > 0) && currentTarget.equals(getMyId()); if (isLocal) { // local @@ -2912,11 +2875,10 @@ public VersionedObjectList tryToSendOneRemoveAllMessage(RemoveAllPRMessage prMsg } else { RemoveAllPRMessage.RemoveAllResponse response = (RemoveAllPRMessage.RemoveAllResponse) prMsg.send(currentTarget, this); - RemoveAllPRMessage.RemoveAllResult pr = null; if (response != null) { prStats.incPartitionMessagesSent(); try { - pr = response.waitForResult(); + final RemoveAllPRMessage.RemoveAllResult pr = response.waitForResult(); putResult = pr.returnValue; versions = pr.versions; } catch (RegionDestroyedException rde) { @@ -3064,7 +3026,7 @@ private boolean putInBucket(final InternalDistributedMember targetNode, final In long start = prStats.startPutRemote(); try { if (ifNew) { - result = createRemotely(currentTarget, bucketId, event, requireOldValue); + result = createRemotely(currentTarget, event, requireOldValue); } else { result = putRemotely(currentTarget, event, ifNew, ifOld, expectedOldValue, requireOldValue); @@ -3268,7 +3230,7 @@ && getRegionAdvisor().getBucketRedundancy(bucketId) < minimumWriteRedundancy) { } else { int red = getRegionAdvisor().getBucketRedundancy(bucketId); final TimeoutException noTime = new TimeoutException( - String.format( + format( "Attempt to acquire primary node for write on bucket %s timed out in %s ms. Current redundancy [ %s ] does not satisfy minimum [ %s ]", bucketStringForLogs(bucketId), localSnoozer.getRetryTime(), red, minimumWriteRedundancy)); @@ -3333,14 +3295,9 @@ public Object get(Object key, Object aCallbackArgument, boolean generateCallback checkReadiness(); checkForNoAccess(); discoverJTA(); - boolean miss = true; - Object value = getDataView().findObject(getKeyInfo(key, aCallbackArgument), this, + return getDataView().findObject(getKeyInfo(key, aCallbackArgument), this, true/* isCreate */, generateCallbacks, null /* no local value */, disableCopyOnRead, preferCD, requestingClient, clientEvent, returnTombstones); - if (value != null && !Token.isInvalid(value)) { - miss = false; - } - return value; } @Override @@ -3431,33 +3388,27 @@ public boolean putRemotely(final DistributedMember recipient, final EntryEventIm long eventTime = event.getEventTime(0L); PutMessage.PutResponse response = (PutMessage.PutResponse) PutMessage.send(recipient, this, event, eventTime, ifNew, ifOld, expectedOldValue, requireOldValue); - PutResult pr = null; - if (response != null) { - prStats.incPartitionMessagesSent(); - try { - pr = response.waitForResult(); - event.setOperation(pr.op); - event.setVersionTag(pr.versionTag); - if (requireOldValue) { - event.setOldValue(pr.oldValue, true); - } - return pr.returnValue; - } catch (RegionDestroyedException rde) { - if (logger.isDebugEnabled()) { - logger.debug("putRemotely: caught RegionDestroyedException", rde); - } - throw new RegionDestroyedException(toString(), getFullPath()); - } catch (TransactionException te) { - throw te; - } catch (CacheException ce) { - // Fix for bug 36014 - throw new PartitionedRegionDistributionException( - String.format("Putting entry on %s failed", - recipient), - ce); + prStats.incPartitionMessagesSent(); + try { + final PutResult pr = response.waitForResult(); + event.setOperation(pr.op); + event.setVersionTag(pr.versionTag); + if (requireOldValue) { + event.setOldValue(pr.oldValue, true); } + return pr.returnValue; + } catch (RegionDestroyedException rde) { + if (logger.isDebugEnabled()) { + logger.debug("putRemotely: caught RegionDestroyedException", rde); + } + throw new RegionDestroyedException(toString(), getFullPath()); + } catch (TransactionException te) { + throw te; + } catch (CacheException ce) { + // Fix for bug 36014 + throw new PartitionedRegionDistributionException( + format("Putting entry on %s failed", recipient), ce); } - return true;// ???:ezoerner:20080728 why return true if response was null? } /** @@ -3491,7 +3442,7 @@ public InternalDistributedMember createBucket(int bucketId, int size, int startBucketId = fpa.getStartingBucketID(); if (startBucketId == -1) { throw new PartitionNotAvailableException( - String.format( + format( "For FixedPartitionedRegion %s, Partition %s is not yet initialized on datastore", getName(), partitionName)); } @@ -3501,7 +3452,6 @@ public InternalDistributedMember createBucket(int bucketId, int size, } // Potentially no storage assigned, start bucket creation, be careful of race // conditions - final long startTime = prStats.getTime(); if (isDataStore()) { ret = redundancyProvider.createBucketAtomically(bucketId, size, false, partitionName); @@ -3516,7 +3466,7 @@ Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate, TXStateInterface tx boolean generateCallbacks, Object localValue, boolean disableCopyOnRead, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, boolean returnTombstones) throws CacheLoaderException, TimeoutException { - Object obj = null; + final Object obj; final Object key = keyInfo.getKey(); final Object aCallbackArgument = keyInfo.getCallbackArg(); final long startTime = prStats.getTime(); @@ -3527,9 +3477,9 @@ Object findObjectInSystem(KeyInfo keyInfo, boolean isCreate, TXStateInterface tx null, aCallbackArgument); keyInfo.setBucketId(bucketId); } - InternalDistributedMember targetNode = null; - TXStateProxy txState = getTXState(); - boolean allowRetry; + final InternalDistributedMember targetNode; + final TXStateProxy txState = getTXState(); + final boolean allowRetry; if (txState != null) { if (txState.isRealDealLocal()) { targetNode = getMyId(); @@ -3576,24 +3526,22 @@ InternalDistributedMember getBucketNodeForReadOrWrite(int bucketId, * * @since GemFire 6.0 */ - public ResultCollector executeFunction(final Function function, - final PartitionedRegionFunctionExecutor execution, ResultCollector rc, + public ResultCollector executeFunction(final Function function, + final PartitionedRegionFunctionExecutor execution, ResultCollector rc, boolean executeOnBucketSet) { if (execution.isPrSingleHop()) { if (!executeOnBucketSet) { - switch (execution.getFilter().size()) { - case 1: - if (logger.isDebugEnabled()) { - logger.debug("Executing Function: (Single Hop) {} on single node.", function.getId()); - } - return executeOnSingleNode(function, execution, rc, true, false); - default: - if (logger.isDebugEnabled()) { - logger.debug("Executing Function: (Single Hop) {} on multiple nodes.", - function.getId()); - } - return executeOnMultipleNodes(function, execution, rc, true, false); + if (execution.getFilter().size() == 1) { + if (logger.isDebugEnabled()) { + logger.debug("Executing Function: (Single Hop) {} on single node.", function.getId()); + } + return executeOnSingleNode(function, execution, rc, true, false); + } + if (logger.isDebugEnabled()) { + logger.debug("Executing Function: (Single Hop) {} on multiple nodes.", + function.getId()); } + return executeOnMultipleNodes(function, execution, rc, true, false); } else { if (logger.isDebugEnabled()) { logger.debug("Executing Function: (Single Hop) {} on a set of buckets nodes.", @@ -3608,7 +3556,7 @@ public ResultCollector executeFunction(final Function function, logger.debug("Executing Function: {} setArguments={} on all buckets.", function.getId(), execution.getArguments()); } - return executeOnAllBuckets(function, execution, rc, false); + return executeOnAllBuckets(function, execution, rc); case 1: if (logger.isDebugEnabled()) { logger.debug("Executing Function: {} setArguments={} on single node.", function.getId(), @@ -3628,21 +3576,22 @@ public ResultCollector executeFunction(final Function function, /** * Executes function on multiple nodes */ - private ResultCollector executeOnMultipleNodes(final Function function, - final PartitionedRegionFunctionExecutor execution, ResultCollector rc, boolean isPRSingleHop, + private ResultCollector executeOnMultipleNodes( + final Function function, + final PartitionedRegionFunctionExecutor execution, ResultCollector rc, + boolean isPRSingleHop, boolean isBucketSetAsFilter) { - final Set routingKeys = execution.getFilter(); + final Set routingKeys = execution.getFilter(); final boolean primaryMembersNeeded = function.optimizeForWrite(); - Map bucketToKeysMap = FunctionExecutionNodePruner.groupByBucket(this, - routingKeys, primaryMembersNeeded, false, isBucketSetAsFilter); - HashMap memberToKeysMap = - new HashMap<>(); - HashMap memberToBuckets = + final Map> bucketToKeysMap = + FunctionExecutionNodePruner.groupByBucket(this, + routingKeys, primaryMembersNeeded, false, isBucketSetAsFilter); + final Map> memberToKeysMap = new HashMap<>(); + Map memberToBuckets = FunctionExecutionNodePruner.groupByMemberToBuckets(this, bucketToKeysMap.keySet(), primaryMembersNeeded); if (isPRSingleHop && (memberToBuckets.size() > 1)) { - // memberToBuckets.remove(getMyId()); // don't remove for (InternalDistributedMember targetNode : memberToBuckets.keySet()) { if (!targetNode.equals(getMyId())) { int[] bucketArray = memberToBuckets.get(targetNode); @@ -3671,9 +3620,9 @@ private ResultCollector executeOnMultipleNodes(final Function function, } while (!execution.getFailedNodes().isEmpty()) { - Set memberKeySet = memberToBuckets.keySet(); - RetryTimeKeeper retryTime = new RetryTimeKeeper(retryTimeout); - Iterator iterator = memberKeySet.iterator(); + final Set memberKeySet = memberToBuckets.keySet(); + final RetryTimeKeeper retryTime = new RetryTimeKeeper(retryTimeout); + final Iterator iterator = memberKeySet.iterator(); boolean hasRemovedNode = false; @@ -3698,27 +3647,22 @@ private ResultCollector executeOnMultipleNodes(final Function function, } } - for (Map.Entry entry : memberToBuckets.entrySet()) { - InternalDistributedMember member = (InternalDistributedMember) entry.getKey(); - int[] buckets = (int[]) entry.getValue(); + for (Map.Entry entry : memberToBuckets.entrySet()) { + InternalDistributedMember member = entry.getKey(); + int[] buckets = entry.getValue(); int length = BucketSetHelper.length(buckets); if (length == 0) { continue; } - int bucket; for (int i = 0; i < length; i++) { - bucket = BucketSetHelper.get(buckets, i); - HashSet keys = memberToKeysMap.get(member); - if (keys == null) { - keys = new HashSet(); - } - keys.addAll(bucketToKeysMap.get(bucket)); - memberToKeysMap.put(member, keys); + final int bucket = BucketSetHelper.get(buckets, i); + memberToKeysMap.computeIfAbsent(member, k -> new HashSet<>()) + .addAll(bucketToKeysMap.get(bucket)); } } // memberToKeysMap.keySet().retainAll(memberToBuckets.keySet()); if (memberToKeysMap.isEmpty()) { - throw new FunctionException(String.format("No target node found for KEY, %s", + throw new FunctionException(format("No target node found for KEY, %s", routingKeys)); } Set dest = memberToKeysMap.keySet(); @@ -3727,10 +3671,11 @@ private ResultCollector executeOnMultipleNodes(final Function function, execution.setExecutionNodes(dest); // end - final HashSet localKeys = memberToKeysMap.remove(getMyId()); - int[] localBucketSet = null; - boolean remoteOnly = false; + final Set localKeys = memberToKeysMap.remove(getMyId()); + final int[] localBucketSet; + final boolean remoteOnly; if (localKeys == null) { + localBucketSet = null; remoteOnly = true; } else { localBucketSet = FunctionExecutionNodePruner.getBucketSet(this, localKeys, @@ -3738,17 +3683,17 @@ private ResultCollector executeOnMultipleNodes(final Function function, remoteOnly = false; } - final LocalResultCollector localResultCollector = + final LocalResultCollector localResultCollector = execution.getLocalResultCollector(function, rc); final DistributionManager dm = getDistributionManager(); - final PartitionedRegionFunctionResultSender resultSender = - new PartitionedRegionFunctionResultSender(dm, this, 0L, localResultCollector, + final PartitionedRegionFunctionResultSender resultSender = + new PartitionedRegionFunctionResultSender<>(dm, this, 0L, localResultCollector, execution.getServerResultSender(), memberToKeysMap.isEmpty(), remoteOnly, execution.isForwardExceptions(), function, localBucketSet); if (localKeys != null) { - final RegionFunctionContextImpl prContext = - new RegionFunctionContextImpl(cache, function.getId(), this, + final RegionFunctionContextImpl prContext = + new RegionFunctionContextImpl<>(cache, function.getId(), this, execution.getArgumentsForMember(getMyId().getId()), localKeys, ColocationHelper .constructAndGetAllColocatedLocalDataSet(this, localBucketSet), @@ -3762,13 +3707,13 @@ private ResultCollector executeOnMultipleNodes(final Function function, if (!memberToKeysMap.isEmpty()) { HashMap recipMap = new HashMap<>(); - for (Map.Entry me : memberToKeysMap.entrySet()) { - InternalDistributedMember recip = (InternalDistributedMember) me.getKey(); - HashSet memKeys = (HashSet) me.getValue(); + for (Map.Entry> me : memberToKeysMap.entrySet()) { + InternalDistributedMember recip = me.getKey(); + Set memKeys = me.getValue(); FunctionRemoteContext context = new FunctionRemoteContext(function, execution.getArgumentsForMember(recip.getId()), memKeys, FunctionExecutionNodePruner.getBucketSet(this, memKeys, false, isBucketSetAsFilter), - execution.isReExecute(), execution.isFnSerializationReqd(), getPrincipal()); + execution.isReExecute(), execution.isFunctionSerializationRequired(), getPrincipal()); recipMap.put(recip, context); } if (logger.isDebugEnabled()) { @@ -3792,10 +3737,11 @@ private Object getPrincipal() { * * @since GemFire 6.0 */ - private ResultCollector executeOnSingleNode(final Function function, - final PartitionedRegionFunctionExecutor execution, ResultCollector rc, boolean isPRSingleHop, + private ResultCollector executeOnSingleNode(final Function function, + final PartitionedRegionFunctionExecutor execution, ResultCollector rc, + boolean isPRSingleHop, boolean isBucketSetAsFilter) { - final Set routingKeys = execution.getFilter(); + final Set routingKeys = execution.getFilter(); final Object key = routingKeys.iterator().next(); final Integer bucketId; if (isBucketSetAsFilter) { @@ -3804,7 +3750,7 @@ private ResultCollector executeOnSingleNode(final Function function, bucketId = PartitionedRegionHelper.getHashKey(this, Operation.FUNCTION_EXECUTION, key, null, null); } - InternalDistributedMember targetNode = null; + InternalDistributedMember targetNode; if (function.optimizeForWrite()) { targetNode = createBucket(bucketId, 0, null /* retryTimeKeeper */); cache.getInternalResourceManager().getHeapMonitor().checkForLowMemory(function, targetNode); @@ -3829,7 +3775,7 @@ private ResultCollector executeOnSingleNode(final Function function, if (targetNode == null) { throw new FunctionException( - String.format("No target node found for KEY, %s", key)); + format("No target node found for KEY, %s", key)); } if (logger.isDebugEnabled()) { @@ -3839,11 +3785,6 @@ private ResultCollector executeOnSingleNode(final Function function, while (!execution.getFailedNodes().isEmpty()) { RetryTimeKeeper retryTime = new RetryTimeKeeper(retryTimeout); if (execution.getFailedNodes().contains(targetNode.getId())) { - /* - * if (retryTime.overMaximum()) { PRHARedundancyProvider.timedOut(this, null, null, - * "doing function execution", this.retryTimeout); // NOTREACHED } - */ - // Fix for Bug # 40083 targetNode = null; while (targetNode == null) { if (retryTime.overMaximum()) { @@ -3864,20 +3805,19 @@ private ResultCollector executeOnSingleNode(final Function function, } final int[] buckets = new int[2]; - buckets[0] = 0; BucketSetHelper.add(buckets, bucketId); final Set singleMember = Collections.singleton(targetNode); execution.validateExecution(function, singleMember); execution.setExecutionNodes(singleMember); - LocalResultCollector localRC = execution.getLocalResultCollector(function, rc); + LocalResultCollector localRC = execution.getLocalResultCollector(function, rc); if (targetNode.equals(localVm)) { final DistributionManager dm = getDistributionManager(); - PartitionedRegionFunctionResultSender resultSender = - new PartitionedRegionFunctionResultSender(dm, this, 0, localRC, + PartitionedRegionFunctionResultSender resultSender = + new PartitionedRegionFunctionResultSender<>(dm, this, 0, localRC, execution.getServerResultSender(), true, false, execution.isForwardExceptions(), function, buckets); - final FunctionContext context = - new RegionFunctionContextImpl(cache, function.getId(), this, + final FunctionContext context = + new RegionFunctionContextImpl<>(cache, function.getId(), this, execution.getArgumentsForMember(localVm.getId()), routingKeys, ColocationHelper .constructAndGetAllColocatedLocalDataSet(this, buckets), buckets, resultSender, execution.isReExecute()); @@ -3890,8 +3830,9 @@ private ResultCollector executeOnSingleNode(final Function function, } } - public ResultCollector executeOnBucketSet(final Function function, - PartitionedRegionFunctionExecutor execution, ResultCollector rc, Set bucketSet) { + public ResultCollector executeOnBucketSet(final Function function, + PartitionedRegionFunctionExecutor execution, ResultCollector rc, + Set bucketSet) { Set actualBucketSet = getRegionAdvisor().getBucketSet(); try { bucketSet.retainAll(actualBucketSet); @@ -3948,15 +3889,15 @@ public ResultCollector executeOnBucketSet(final Function function, } } - execution = (PartitionedRegionFunctionExecutor) execution.withFilter(new HashSet()); + execution = (PartitionedRegionFunctionExecutor) execution.withFilter(emptySet()); while (!execution.getFailedNodes().isEmpty()) { - Set memberKeySet = memberToBuckets.keySet(); - RetryTimeKeeper retryTime = new RetryTimeKeeper(retryTimeout); - Iterator iterator = memberKeySet.iterator(); + final Set memberKeySet = memberToBuckets.keySet(); + final RetryTimeKeeper retryTime = new RetryTimeKeeper(retryTimeout); boolean hasRemovedNode = false; - while (iterator.hasNext()) { - if (execution.getFailedNodes().contains(((DistributedMember) iterator.next()).getId())) { + for (final InternalDistributedMember internalDistributedMember : memberKeySet) { + if (execution.getFailedNodes() + .contains(((DistributedMember) internalDistributedMember).getId())) { hasRemovedNode = true; } } @@ -3990,21 +3931,21 @@ public ResultCollector executeOnBucketSet(final Function function, for (InternalDistributedMember recip : dest) { FunctionRemoteContext context = new FunctionRemoteContext(function, execution.getArgumentsForMember(recip.getId()), null, memberToBuckets.get(recip), - execution.isReExecute(), execution.isFnSerializationReqd(), getPrincipal()); + execution.isReExecute(), execution.isFunctionSerializationRequired(), getPrincipal()); recipMap.put(recip, context); } - final LocalResultCollector localRC = execution.getLocalResultCollector(function, rc); + final LocalResultCollector localRC = execution.getLocalResultCollector(function, rc); final DistributionManager dm = getDistributionManager(); - final PartitionedRegionFunctionResultSender resultSender = - new PartitionedRegionFunctionResultSender(dm, this, 0L, localRC, + final PartitionedRegionFunctionResultSender resultSender = + new PartitionedRegionFunctionResultSender<>(dm, this, 0L, localRC, execution.getServerResultSender(), recipMap.isEmpty(), !isSelf, execution.isForwardExceptions(), function, localBucketSet); // execute locally and collect the result if (isSelf && dataStore != null) { - final RegionFunctionContextImpl prContext = - new RegionFunctionContextImpl(cache, function.getId(), this, + final RegionFunctionContextImpl prContext = + new RegionFunctionContextImpl<>(cache, function.getId(), this, execution.getArgumentsForMember(getMyId().getId()), null, ColocationHelper .constructAndGetAllColocatedLocalDataSet(this, localBucketSet), localBucketSet, resultSender, execution.isReExecute()); @@ -4022,9 +3963,9 @@ public ResultCollector executeOnBucketSet(final Function function, * * @since GemFire 6.0 */ - private ResultCollector executeOnAllBuckets(final Function function, - final PartitionedRegionFunctionExecutor execution, ResultCollector rc, - boolean isPRSingleHop) { + private ResultCollector executeOnAllBuckets(final Function function, + final PartitionedRegionFunctionExecutor execution, + ResultCollector rc) { Set bucketSet = new HashSet<>(); Iterator itr = getRegionAdvisor().getBucketSet().iterator(); while (itr.hasNext()) { @@ -4043,15 +3984,13 @@ private ResultCollector executeOnAllBuckets(final Function function, } while (!execution.getFailedNodes().isEmpty()) { - Set memberKeySet = memberToBuckets.keySet(); - RetryTimeKeeper retryTime = new RetryTimeKeeper(retryTimeout); - - Iterator iterator = memberKeySet.iterator(); - + final Set memberKeySet = memberToBuckets.keySet(); + final RetryTimeKeeper retryTime = new RetryTimeKeeper(retryTimeout); boolean hasRemovedNode = false; - while (iterator.hasNext()) { - if (execution.getFailedNodes().contains(((DistributedMember) iterator.next()).getId())) { + for (final InternalDistributedMember internalDistributedMember : memberKeySet) { + if (execution.getFailedNodes() + .contains(((DistributedMember) internalDistributedMember).getId())) { hasRemovedNode = true; } } @@ -4084,21 +4023,21 @@ private ResultCollector executeOnAllBuckets(final Function function, for (InternalDistributedMember recip : memberToBuckets.keySet()) { FunctionRemoteContext context = new FunctionRemoteContext(function, execution.getArgumentsForMember(recip.getId()), null, memberToBuckets.get(recip), - execution.isReExecute(), execution.isFnSerializationReqd(), getPrincipal()); + execution.isReExecute(), execution.isFunctionSerializationRequired(), getPrincipal()); recipMap.put(recip, context); } - final LocalResultCollector localResultCollector = + final LocalResultCollector localResultCollector = execution.getLocalResultCollector(function, rc); final DistributionManager dm = getDistributionManager(); - final PartitionedRegionFunctionResultSender resultSender = - new PartitionedRegionFunctionResultSender(dm, this, 0L, localResultCollector, + final PartitionedRegionFunctionResultSender resultSender = + new PartitionedRegionFunctionResultSender<>(dm, this, 0L, localResultCollector, execution.getServerResultSender(), recipMap.isEmpty(), !isSelf, execution.isForwardExceptions(), function, localBucketSet); // execute locally and collect the result if (isSelf && dataStore != null) { - final RegionFunctionContextImpl prContext = - new RegionFunctionContextImpl(cache, function.getId(), this, + final RegionFunctionContextImpl prContext = + new RegionFunctionContextImpl<>(cache, function.getId(), this, execution.getArgumentsForMember(getMyId().getId()), null, ColocationHelper .constructAndGetAllColocatedLocalDataSet(this, localBucketSet), localBucketSet, resultSender, execution.isReExecute()); @@ -4183,7 +4122,7 @@ private Object getFromBucket(final InternalDistributedMember targetNode, int buc // Only transactions set allowRetry to false, // fail the transaction here as region is destroyed. Throwable cause = pde.getCause(); - if (cause != null && cause instanceof RegionDestroyedException) { + if (cause instanceof RegionDestroyedException) { throw (RegionDestroyedException) cause; } else { // Should not see it currently, all current constructors of PRLocallyDestroyedException @@ -4251,10 +4190,10 @@ private Object getFromBucket(final InternalDistributedMember targetNode, int buc PartitionedRegionDistributionException e = null; // Fix for bug 36014 if (logger.isDebugEnabled()) { e = new PartitionedRegionDistributionException( - String.format("No VM available for get in %s attempts", + format("No VM available for get in %s attempts", count)); } - logger.warn(String.format("No VM available for get in %s attempts", count), e); + logger.warn(format("No VM available for get in %s attempts", count), e); return null; } @@ -4313,8 +4252,9 @@ boolean cacheWriteBeforeRegionDestroy(RegionEventImpl event) if (event.getOperation().isDistributed()) { serverRegionDestroy(event); - CacheWriter localWriter = basicGetWriter(); - Set netWriteRecipients = localWriter == null ? distAdvisor.adviseNetWrite() : null; + CacheWriter localWriter = basicGetWriter(); + Set netWriteRecipients = + localWriter == null ? distAdvisor.adviseNetWrite() : null; if (localWriter == null && (netWriteRecipients == null || netWriteRecipients.isEmpty())) { return false; @@ -4380,7 +4320,8 @@ public Object localCacheGet(Object key) { * @return A set of keys * @see LocalRegion#keys() */ - public Set localCacheKeySet() { + @VisibleForTesting + public Set localCacheKeySet() { return super.keys(); } @@ -4394,7 +4335,7 @@ public Set localCacheKeySet() { */ public List getAllBucketEntries(final int bucketId) throws ForceReattemptException { if (bucketId >= getTotalNumberOfBuckets()) { - return Collections.emptyList(); + return emptyList(); } ArrayList ret = new ArrayList<>(); HashSet collected = new HashSet<>(); @@ -4428,14 +4369,14 @@ public String toString() { } }; - Map versions = new HashMap<>(); + Map> versions = new HashMap<>(); - for (final Map.Entry o : (Iterable) br.entrySet()) { + for (final Map.Entry o : (Iterable>) br.entrySet()) { NonTXEntry entry = (NonTXEntry) o; RegionEntry re = entry.getRegionEntry(); Object value = re.getValue(br); // OFFHEAP: incrc, deserialize, decrc - VersionStamp versionStamp = re.getVersionStamp(); - VersionTag versionTag = versionStamp != null ? versionStamp.asVersionTag() : null; + VersionStamp versionStamp = re.getVersionStamp(); + VersionTag versionTag = versionStamp != null ? versionStamp.asVersionTag() : null; if (versionTag != null) { versionTag.replaceNullIDs(br.getVersionMember()); } @@ -4449,7 +4390,7 @@ public String toString() { m.put(re.getKey(), value); versions.put(re.getKey(), versionTag); } - RegionVersionVector rvv = br.getVersionVector(); + RegionVersionVector> rvv = br.getVersionVector(); rvv = rvv != null ? rvv.getCloneForTransmission() : null; ret.add(new BucketDump(bucketId, owner, rvv, m, versions)); continue; @@ -4473,7 +4414,8 @@ public String toString() { * * @return A set of keys from bucketNum or {@link Collections#EMPTY_SET}if no keys can be found. */ - public Set getBucketKeys(int bucketNum) { + @VisibleForTesting + public Set getBucketKeys(int bucketNum) { return getBucketKeys(bucketNum, false); } @@ -4484,9 +4426,8 @@ public Set getBucketKeys(int bucketNum) { * @param allowTombstones whether to include destroyed entries in the result * @return A set of keys from bucketNum or {@link Collections#EMPTY_SET}if no keys can be found. */ - public Set getBucketKeys(int bucketNum, boolean allowTombstones) { + public Set getBucketKeys(int bucketNum, boolean allowTombstones) { final int retryAttempts = calcRetry(); - Set ret = null; int count = 0; InternalDistributedMember nod = getOrCreateNodeForBucketRead(bucketNum); RetryTimeKeeper snoozer = null; @@ -4514,11 +4455,12 @@ public Set getBucketKeys(int bucketNum, boolean allowTombstones) { } try { + final Set ret; if (nod.equals(getMyId())) { ret = dataStore.getKeysLocally(bucketNum, allowTombstones); } else { FetchKeysResponse r = FetchKeysMessage.send(nod, this, bucketNum, allowTombstones); - ret = r.waitForKeys(); + ret = uncheckedCast(r.waitForKeys()); } if (ret != null) { return ret; @@ -4542,7 +4484,7 @@ public Set getBucketKeys(int bucketNum, boolean allowTombstones) { if (snoozer.overMaximum()) { checkReadiness(); throw new TimeoutException( - String.format( + format( "Attempt to acquire primary node for read on bucket %s timed out in %s ms", getBucketName(bucketNum), snoozer.getRetryTime())); } @@ -4555,7 +4497,7 @@ public Set getBucketKeys(int bucketNum, boolean allowTombstones) { if (logger.isDebugEnabled()) { logger.debug("getBucketKeys: no keys found returning empty set"); } - return Collections.emptySet(); + return emptySet(); } /** @@ -4565,18 +4507,17 @@ public void fetchEntries(HashMap> bucketKeys, Versioned ServerConnection servConn) throws IOException { int retryAttempts = calcRetry(); RetryTimeKeeper retryTime = null; - HashMap failures = new HashMap<>(bucketKeys); - HashMap> nodeToBuckets = - new HashMap<>(); + Map> failures = new HashMap<>(bucketKeys); + Map>> nodeToBuckets = new HashMap<>(); while (--retryAttempts >= 0 && !failures.isEmpty()) { nodeToBuckets.clear(); updateNodeToBucketMap(nodeToBuckets, failures); failures.clear(); - HashMap localBuckets = nodeToBuckets.remove(getMyId()); + Map> localBuckets = nodeToBuckets.remove(getMyId()); if (localBuckets != null && !localBuckets.isEmpty()) { - Set keys = new HashSet(); + Set keys = new HashSet<>(); for (Integer id : localBuckets.keySet()) { keys.addAll(localBuckets.get(id)); } @@ -4603,14 +4544,14 @@ public void fetchEntries(HashMap> bucketKeys, Versioned } void updateNodeToBucketMap( - HashMap> nodeToBuckets, - HashMap bucketKeys) { + Map>> nodeToBuckets, + Map> bucketKeys) { for (int id : bucketKeys.keySet()) { InternalDistributedMember node = getOrCreateNodeForBucketWrite(id, null); if (nodeToBuckets.containsKey(node)) { nodeToBuckets.get(node).put(id, bucketKeys.get(id)); } else { - HashMap map = new HashMap<>(); + Map> map = new HashMap<>(); map.put(id, bucketKeys.get(id)); nodeToBuckets.put(node, map); } @@ -4638,7 +4579,7 @@ public void fetchEntries(String regex, VersionedObjectList values, ServerConnect if (localBuckets != null && !localBuckets.isEmpty()) { for (Integer id : localBuckets) { - Set keys = fetchAllLocalKeys(id, failures, regex); + Set keys = fetchAllLocalKeys(id, failures, regex); if (!keys.isEmpty()) { BaseCommand.appendNewRegisterInterestResponseChunkFromLocal(this, values, regex != null ? regex : "ALL_KEYS", keys, servConn); @@ -4698,19 +4639,17 @@ private boolean waitForFetchRemoteEntriesRetry(RetryTimeKeeper retryTime) { return true; } - public Set fetchAllLocalKeys(Integer id, Set failures, String regex) { - Set result = new HashSet(); + private Set fetchAllLocalKeys(Integer id, Set failures, String regex) { + final Set result = new HashSet<>(); try { - Set keys = null; + final Set keys; if (regex != null) { keys = dataStore.handleRemoteGetKeys(id, InterestType.REGULAR_EXPRESSION, regex, true); } else { keys = dataStore.getKeysLocally(id, true); } result.addAll(keys); - } catch (ForceReattemptException ignore) { - failures.add(id); - } catch (PRLocallyDestroyedException ignore) { + } catch (ForceReattemptException | PRLocallyDestroyedException ignore) { failures.add(id); } return result; @@ -4721,29 +4660,27 @@ public Set fetchAllLocalKeys(Integer id, Set failures, String regex) { * older than 8.0 */ public void fetchRemoteEntries( - HashMap> nodeToBuckets, - HashMap failures, VersionedObjectList values, ServerConnection servConn) + Map>> nodeToBuckets, + Map> failures, VersionedObjectList values, + ServerConnection servConn) throws IOException { - Set result = null; - HashMap oneBucketKeys = new HashMap<>(); + Map> oneBucketKeys = new HashMap<>(); - for (Map.Entry> entry : nodeToBuckets + for (final Map.Entry>> entry : nodeToBuckets .entrySet()) { - HashMap bucketKeys = entry.getValue(); - FetchBulkEntriesResponse fber = null; - result = new HashSet(); + final Map> bucketKeys = entry.getValue(); + final Set>> result = new HashSet<>(); // Fetch one bucket-data at a time to avoid this VM running out of memory. - // See #50647 - for (Map.Entry e : bucketKeys.entrySet()) { + for (final Map.Entry> e : bucketKeys.entrySet()) { result.clear(); oneBucketKeys.clear(); oneBucketKeys.put(e.getKey(), e.getValue()); try { - fber = + final FetchBulkEntriesResponse fber = FetchBulkEntriesMessage.send(entry.getKey(), this, oneBucketKeys, null, null, true); - BucketDump[] bds = fber.waitForEntries(); + final BucketDump[] bds = fber.waitForEntries(); if (fber.getFailedBucketIds() != null && !fber.getFailedBucketIds().isEmpty()) { for (int id : fber.getFailedBucketIds()) { failures.put(id, nodeToBuckets.get(entry.getKey()).get(id)); @@ -4770,25 +4707,24 @@ public void fetchRemoteEntries( public void fetchAllRemoteEntries( HashMap> nodeToBuckets, HashSet failures, String regex, VersionedObjectList values, ServerConnection servConn) throws IOException { - Set result = null; - HashSet bucketId = new HashSet<>(); + final HashSet bucketId = new HashSet<>(); - for (Map.Entry> entry : nodeToBuckets.entrySet()) { - HashSet buckets = new HashSet<>(entry.getValue()); // Is it needed to copy the + for (final Map.Entry> entry : nodeToBuckets + .entrySet()) { + final HashSet buckets = new HashSet<>(entry.getValue()); // Is it needed to copy the // set here? - FetchBulkEntriesResponse fber = null; - result = new HashSet(); + final Set>> result = new HashSet<>(); // Fetch one bucket-data at a time to avoid this VM running out of memory. - // See #50647 - for (int bucket : buckets) { + for (final int bucket : buckets) { result.clear(); bucketId.clear(); bucketId.add(bucket); try { - fber = FetchBulkEntriesMessage.send(entry.getKey(), this, null, bucketId, regex, true); + final FetchBulkEntriesResponse fber = + FetchBulkEntriesMessage.send(entry.getKey(), this, null, bucketId, regex, true); - BucketDump[] bds = fber.waitForEntries(); + final BucketDump[] bds = fber.waitForEntries(); if (fber.getFailedBucketIds() != null) { failures.addAll(fber.getFailedBucketIds()); } @@ -4872,11 +4808,14 @@ public Object getRemotely(InternalDistributedMember targetNode, int bucketId, fi return value; } - private ResultCollector executeFunctionOnRemoteNode(InternalDistributedMember targetNode, - final Function function, final Object object, final Set routingKeys, ResultCollector rc, - int[] bucketArray, ServerToClientFunctionResultSender sender, AbstractExecution execution) { - PartitionedRegionFunctionResultSender resultSender = - new PartitionedRegionFunctionResultSender(null, this, 0, rc, sender, false, true, + private ResultCollector executeFunctionOnRemoteNode( + InternalDistributedMember targetNode, + final Function function, final Object object, final Set routingKeys, + ResultCollector rc, + int[] bucketArray, ServerToClientFunctionResultSender sender, + AbstractExecution execution) { + PartitionedRegionFunctionResultSender resultSender = + new PartitionedRegionFunctionResultSender<>(null, this, 0, rc, sender, false, true, execution.isForwardExceptions(), function, bucketArray); PartitionedRegionFunctionResultWaiter resultReceiver = @@ -4884,7 +4823,8 @@ private ResultCollector executeFunctionOnRemoteNode(InternalDistributedMember ta resultSender); FunctionRemoteContext context = new FunctionRemoteContext(function, object, routingKeys, - bucketArray, execution.isReExecute(), execution.isFnSerializationReqd(), getPrincipal()); + bucketArray, execution.isReExecute(), execution.isFunctionSerializationRequired(), + getPrincipal()); HashMap recipMap = new HashMap<>(); @@ -4937,7 +4877,7 @@ private static void releasePRIDLock(final DistributedLockService lockService) { PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID); } } catch (Exception es) { - logger.warn(String.format("releasePRIDLock: unlocking %s caught an exception", + logger.warn(format("releasePRIDLock: unlocking %s caught an exception", PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID), es); } } @@ -4963,12 +4903,10 @@ private static int _generatePRId(InternalDistributedSystem sys, PartitionedRegionHelper.MAX_PARTITIONED_REGION_ID); } - Set parMembers = sys.getDistributionManager().getOtherDistributionManagerIds(); - - Integer currentPRID; - - IdentityResponse pir = IdentityRequestMessage.send(parMembers, sys); - currentPRID = pir.waitForId(); + final Set parMembers = + sys.getDistributionManager().getOtherDistributionManagerIds(); + final IdentityResponse pir = IdentityRequestMessage.send(parMembers, sys); + Integer currentPRID = pir.waitForId(); if (currentPRID == null) { currentPRID = 0; @@ -5033,7 +4971,7 @@ public void fillInProfile(Profile p) { // reusing this boolean to indicate that this member has finished disk recovery. profile.regionInitialized = recoveredFromDisk; - profile.hasCacheServer = (cache.getCacheServers().size() > 0); + profile.hasCacheServer = !cache.getCacheServers().isEmpty(); profile.filterProfile = getFilterProfile(); profile.gatewaySenderIds = getGatewaySenderIds(); profile.asyncEventQueueIds = getVisibleAsyncEventQueueIds(); @@ -5125,7 +5063,7 @@ public static PartitionedRegion getPRFromId(int prid) throws PRLocallyDestroyedE */ public static void validatePRID(InternalDistributedMember sender, int prId, String regionId) { try { - PartitionedRegion pr = null; + final PartitionedRegion pr; synchronized (prIdToPR) { // first do a quick probe pr = (PartitionedRegion) prIdToPR.getRegion(prId); @@ -5166,9 +5104,8 @@ public static String dumpPRId() { public String dumpAllPartitionedRegions() { StringBuilder sb = new StringBuilder(getPRRoot().getFullPath()); sb.append("\n"); - Object key = null; - for (Iterator i = getPRRoot().keySet().iterator(); i.hasNext();) { - key = i.next(); + for (final Iterator i = getPRRoot().keySet().iterator(); i.hasNext();) { + final Object key = i.next(); sb.append(key).append("=>").append(getPRRoot().get(key)); if (i.hasNext()) { sb.append("\n"); @@ -5185,23 +5122,11 @@ public int getPRId() { return partitionedRegionId; } - /** - * Updates local cache with a new value. - * - * @param key the key - * @param value the value - * @param newVersion the new version of the key - */ - void updateLocalCache(Object key, Object value, long newVersion) { - - } - /** * This method returns total number of buckets for this PR * */ public int getTotalNumberOfBuckets() { - return totalNumberOfBuckets; } @@ -5235,8 +5160,7 @@ public void basicDestroy(final EntryEventImpl event, final boolean cacheWrite, public void destroyInBucket(final EntryEventImpl event, Object expectedOldValue) throws EntryNotFoundException, CacheWriterException { // Get the bucket id for the key - final Integer bucketId = event.getKeyInfo().getBucketId(); - assert bucketId != KeyInfo.UNKNOWN_BUCKET; + final int bucketId = event.getKeyInfo().getBucketId(); // check in bucket2Node region final InternalDistributedMember targetNode = getOrCreateNodeForBucketWrite(bucketId, null); @@ -5302,11 +5226,11 @@ public void destroyInBucket(final EntryEventImpl event, Object expectedOldValue) if (getRegionAdvisor().getBucket(bucketId).getBucketAdvisor() .basicGetPrimaryMember() == null) { throw new EntryNotFoundException( - String.format("Entry not found for key %s", + format("Entry not found for key %s", event.getKey())); } TimeoutException e = new TimeoutException( - String.format("Time out looking for target node for destroy; waited %s ms", + format("Time out looking for target node for destroy; waited %s ms", retryTime.getRetryTime())); if (logger.isDebugEnabled()) { logger.debug(e.getMessage(), e); @@ -5340,7 +5264,7 @@ public void destroyInBucket(final EntryEventImpl event, Object expectedOldValue) if (event.isBridgeEvent()) { setNetworkHopType(bucketId, currentTarget); } - destroyRemotely(currentTarget, bucketId, event, expectedOldValue); + destroyRemotely(currentTarget, event, expectedOldValue); } return; @@ -5459,17 +5383,14 @@ public boolean isProfileFromSameGroup(ServerBucketProfile profile) { } public Set getLocalServerGroups() { - Set localServerGroups = new HashSet(); - InternalCache cache = getCache(); - List servers; - - servers = cache.getCacheServers(); + final Set localServerGroups = new HashSet<>(); + final InternalCache cache = getCache(); Collections.addAll(localServerGroups, MemberDataBuilder.parseGroups(null, cache.getInternalDistributedSystem().getConfig().getGroups())); - for (Object object : servers) { - CacheServerImpl server = (CacheServerImpl) object; + final List servers = uncheckedCast(cache.getCacheServers()); + for (CacheServerImpl server : servers) { if (server.isRunning() && (server.getExternalAddress() != null)) { Collections.addAll(localServerGroups, server.getGroups()); } @@ -5481,7 +5402,6 @@ public Set getLocalServerGroups() { * Destroy the entry on the remote node. * * @param recipient the member id of the receiver of the message - * @param bucketId the idenity of the bucket * @param event the event prompting this request * @param expectedOldValue if not null, then destroy only if entry exists and current value is * equal to expectedOldValue @@ -5490,29 +5410,24 @@ public Set getLocalServerGroups() { * @throws PrimaryBucketException if the bucket on that node is not the primary copy * @throws ForceReattemptException if the peer is no longer available */ - public void destroyRemotely(DistributedMember recipient, Integer bucketId, EntryEventImpl event, + public void destroyRemotely(DistributedMember recipient, EntryEventImpl event, Object expectedOldValue) throws EntryNotFoundException, PrimaryBucketException, ForceReattemptException { DestroyResponse response = DestroyMessage.send(recipient, this, event, expectedOldValue); - if (response != null) { - prStats.incPartitionMessagesSent(); - try { - response.waitForCacheException(); - event.setVersionTag(response.getVersionTag()); - } catch (EntryNotFoundException enfe) { - throw enfe; - } catch (TransactionDataNotColocatedException enfe) { - throw enfe; - } catch (TransactionDataRebalancedException e) { - throw e; - } catch (CacheException ce) { - throw new PartitionedRegionException( - String.format("Destroy of entry on %s failed", - recipient), - ce); - } catch (RegionDestroyedException ignore) { - throw new RegionDestroyedException(toString(), getFullPath()); - } + prStats.incPartitionMessagesSent(); + try { + response.waitForCacheException(); + event.setVersionTag(response.getVersionTag()); + } catch (EntryNotFoundException | TransactionDataRebalancedException + | TransactionDataNotColocatedException enfe) { + throw enfe; + } catch (CacheException ce) { + throw new PartitionedRegionException( + format("Destroy of entry on %s failed", + recipient), + ce); + } catch (RegionDestroyedException ignore) { + throw new RegionDestroyedException(toString(), getFullPath()); } } @@ -5539,7 +5454,7 @@ public VersionTag findVersionTagForEvent(EventID eventId) { if (dataStore != null) { Set> bucketMap = dataStore.getAllLocalBuckets(); for (Map.Entry entry : bucketMap) { - VersionTag result = entry.getValue().findVersionTagForEvent(eventId); + VersionTag result = entry.getValue().findVersionTagForEvent(eventId); if (result != null) { return result; } @@ -5550,11 +5465,9 @@ public VersionTag findVersionTagForEvent(EventID eventId) { @Override public VersionTag findVersionTagForClientBulkOp(EventID eventId) { - Map results = new HashMap<>(); if (dataStore != null) { - Set> bucketMap = dataStore.getAllLocalBuckets(); - for (Map.Entry entry : bucketMap) { - VersionTag bucketResult = entry.getValue().findVersionTagForClientBulkOp(eventId); + for (final Map.Entry entry : dataStore.getAllLocalBuckets()) { + final VersionTag bucketResult = entry.getValue().findVersionTagForClientBulkOp(eventId); if (bucketResult != null) { return bucketResult; } @@ -5576,7 +5489,7 @@ public void cleanupFailedInitialization() { redundancyProvider.waitForPersistentBucketRecovery(); cache.removePartitionedRegion(this); cache.getInternalResourceManager(false).removeResourceListener(this); - redundancyProvider.shutdown(); // see bug 41094 + redundancyProvider.shutdown(); int[] serials = getRegionAdvisor().getBucketSerials(); RegionEventImpl event = new RegionEventImpl(this, Operation.REGION_CLOSE, null, false, getMyId(), generateEventID()/* generate EventID */); @@ -5633,8 +5546,7 @@ public void cleanupFailedInitialization() { if (logger.isDebugEnabled()) { logger.debug("cleanupFailedInitialization: end of {}", getName()); } - if (savedFirstRuntimeException != null - && savedFirstRuntimeException instanceof DistributedSystemDisconnectedException) { + if (savedFirstRuntimeException instanceof DistributedSystemDisconnectedException) { logger.warn("cleanupFailedInitialization originally failed with:", savedFirstRuntimeException); throw (DistributedSystemDisconnectedException) savedFirstRuntimeException; @@ -5646,7 +5558,7 @@ public void cleanupFailedInitialization() { * * @since GemFire 5.0 */ - static void afterRegionsClosedByCacheClose(InternalCache cache) { + static void afterRegionsClosedByCacheClose() { PRQueryProcessor.shutdown(); clearPRIdMap(); } @@ -5677,7 +5589,7 @@ private void sendInvalidateRegionMessage(RegionEventImpl event) { while (count <= retryAttempts) { try { count++; - Set recipients = getRegionAdvisor().adviseDataStore(); + Set recipients = getRegionAdvisor().adviseDataStore(); ReplyProcessor21 response = InvalidatePartitionedRegionMessage.send(recipients, this, event); response.waitForReplies(); @@ -5701,7 +5613,7 @@ private void sendInvalidateRegionMessage(RegionEventImpl event) { } if (thr != null) { PartitionedRegionDistributionException e = new PartitionedRegionDistributionException( - String.format("Invalidating partitioned region caught exception %s", + format("Invalidating partitioned region caught exception %s", count)); if (logger.isDebugEnabled()) { logger.debug(e.getMessage(), e); @@ -5727,7 +5639,6 @@ public void basicInvalidate(EntryEventImpl event) throws EntryNotFoundException } finally { prStats.endInvalidate(startTime); } - return; } @Override @@ -5744,7 +5655,6 @@ void basicUpdateEntryVersion(EntryEventImpl event) throws EntryNotFoundException throw new RegionDestroyedException(toString(), getFullPath(), rde); } } - return; } /** @@ -5780,7 +5690,7 @@ void invalidateInBucket(final EntryEventImpl event) throws EntryNotFoundExceptio if (getRegionAdvisor().isStorageAssignedForBucket(bucketId)) { // bucket no longer exists throw new EntryNotFoundException( - String.format("Entry not found for key %s", + format("Entry not found for key %s", event.getKey())); } break; // fall out to failed exception @@ -5797,7 +5707,7 @@ void invalidateInBucket(final EntryEventImpl event) throws EntryNotFoundExceptio event.setInvokePRCallbacks(true); dataStore.invalidateLocally(bucketId, event); } else { - invalidateRemotely(retryNode, bucketId, event); + invalidateRemotely(retryNode, event); } return; } catch (ConcurrentCacheModificationException e) { @@ -5854,7 +5764,7 @@ void invalidateInBucket(final EntryEventImpl event) throws EntryNotFoundExceptio // No target was found PartitionedRegionDistributionException e = new PartitionedRegionDistributionException( - String.format("No VM available for invalidate in %s attempts.", + format("No VM available for invalidate in %s attempts.", count)); // Fix for bug 36014 if (!isDebugEnabled) { logger.warn("No VM available for invalidate in {} attempts.", count); @@ -5868,33 +5778,25 @@ void invalidateInBucket(final EntryEventImpl event) throws EntryNotFoundExceptio * invalidates the remote object with the given key. * * @param recipient the member id of the recipient of the operation - * @param bucketId the id of the bucket the key hashed into * @throws EntryNotFoundException if the entry does not exist in this region * @throws PrimaryBucketException if the bucket on that node is not the primary copy * @throws ForceReattemptException if the peer is no longer available */ - public void invalidateRemotely(DistributedMember recipient, Integer bucketId, - EntryEventImpl event) + public void invalidateRemotely(DistributedMember recipient, EntryEventImpl event) throws EntryNotFoundException, PrimaryBucketException, ForceReattemptException { InvalidateResponse response = InvalidateMessage.send(recipient, this, event); - if (response != null) { - prStats.incPartitionMessagesSent(); - try { - response.waitForResult(); - event.setVersionTag(response.versionTag); - return; - } catch (EntryNotFoundException ex) { - throw ex; - } catch (TransactionDataNotColocatedException ex) { - throw ex; - } catch (TransactionDataRebalancedException e) { - throw e; - } catch (CacheException ce) { - throw new PartitionedRegionException( - String.format("Invalidation of entry on %s failed", - recipient), - ce); - } + prStats.incPartitionMessagesSent(); + try { + response.waitForResult(); + event.setVersionTag(response.versionTag); + } catch (EntryNotFoundException | TransactionDataRebalancedException + | TransactionDataNotColocatedException ex) { + throw ex; + } catch (CacheException ce) { + throw new PartitionedRegionException( + format("Invalidation of entry on %s failed", + recipient), + ce); } } @@ -5912,50 +5814,38 @@ private int calcRetry() { * Creates the key/value pair into the remote target that is managing the key's bucket. * * @param recipient member id of the recipient of the operation - * @param bucketId the id of the bucket that the key hashed to * @param event the event prompting this request * @throws PrimaryBucketException if the bucket on that node is not the primary copy * @throws ForceReattemptException if the peer is no longer available */ - private boolean createRemotely(DistributedMember recipient, Integer bucketId, + private boolean createRemotely(DistributedMember recipient, EntryEventImpl event, boolean requireOldValue) throws PrimaryBucketException, ForceReattemptException { - boolean ret = false; - long eventTime = event.getEventTime(0L); + final long eventTime = event.getEventTime(0L); PutMessage.PutResponse reply = (PutMessage.PutResponse) PutMessage.send(recipient, this, event, eventTime, true, false, null, // expectedOldValue requireOldValue); - PutResult pr = null; - if (reply != null) { - prStats.incPartitionMessagesSent(); - try { - pr = reply.waitForResult(); - event.setOperation(pr.op); - event.setVersionTag(pr.versionTag); - if (requireOldValue) { - event.setOldValue(pr.oldValue, true); - } - ret = pr.returnValue; - } catch (EntryExistsException ignore) { - // This might not be necessary and is here for safety sake - ret = false; - } catch (TransactionDataNotColocatedException tdnce) { - throw tdnce; - } catch (TransactionDataRebalancedException e) { - throw e; - } catch (CacheException ce) { - throw new PartitionedRegionException( - String.format("Create of entry on %s failed", - recipient), - ce); - } catch (RegionDestroyedException rde) { - if (logger.isDebugEnabled()) { - logger.debug("createRemotely: caught exception", rde); - } - throw new RegionDestroyedException(toString(), getFullPath()); + prStats.incPartitionMessagesSent(); + try { + final PutResult pr = reply.waitForResult(); + event.setOperation(pr.op); + event.setVersionTag(pr.versionTag); + if (requireOldValue) { + event.setOldValue(pr.oldValue, true); + } + return pr.returnValue; + } catch (EntryExistsException ignore) { + } catch (TransactionDataNotColocatedException | TransactionDataRebalancedException tdnce) { + throw tdnce; + } catch (CacheException ce) { + throw new PartitionedRegionException(format("Create of entry on %s failed", recipient), ce); + } catch (RegionDestroyedException rde) { + if (logger.isDebugEnabled()) { + logger.debug("createRemotely: caught exception", rde); } + throw new RegionDestroyedException(toString(), getFullPath()); } - return ret; + return false; } // //////////////////////////////// @@ -5980,7 +5870,7 @@ public Set entrySet(boolean recursive) { return Collections.unmodifiableSet(new PREntriesSet()); } - public Set entrySet(Set bucketIds) { + public Set> entrySet(Set bucketIds) { return new PREntriesSet(bucketIds); } @@ -5999,7 +5889,7 @@ private class EntriesSetIterator extends KeysSetIterator { /** reusable KeyInfo */ private final KeyInfo key = new KeyInfo(null, null, null); - public EntriesSetIterator(Set bucketSet, boolean allowTombstones) { + public EntriesSetIterator(Set bucketSet, boolean allowTombstones) { super(bucketSet, allowTombstones); PREntriesSet.this.allowTombstones = allowTombstones; } @@ -6055,11 +5945,12 @@ public Set keySet(boolean allowTombstones) { /** * Get a keyset of the given buckets */ - public Set keySet(Set bucketSet) { + public Set keySet(Set bucketSet) { return new KeysSet(bucketSet); } - public Set keysWithoutCreatesForTests() { + @VisibleForTesting + public Set keysWithoutCreatesForTests() { checkReadiness(); Set availableBuckets = new HashSet<>(); for (int i = 0; i < getTotalNumberOfBuckets(); i++) { @@ -6074,7 +5965,7 @@ public Set keysWithoutCreatesForTests() { protected class KeysSet extends EntriesSet { class KeysSetIterator implements PREntriesIterator { final Iterator bucketSetI; - volatile Iterator currentBucketI = null; + volatile Iterator currentBucketI; int currentBucketId = -1; volatile Object currentKey = null; protected final Set bucketSet; @@ -6129,7 +6020,7 @@ public Object next() { return currentKey; } - protected Iterator getNextBucketIter(boolean canThrow) { + protected Iterator getNextBucketIter(boolean canThrow) { try { currentBucketId = bucketSetI.next(); // TODO: optimize this code by implementing getBucketKeysIterator. @@ -6202,24 +6093,20 @@ public int size() { @Override public Object[] toArray() { - return toArray(null); + return toArray(new Object[0]); } @Override - public Object[] toArray(Object[] array) { - List temp = new ArrayList(size()); + public Object[] toArray(Object @NotNull [] array) { + List temp = new ArrayList<>(size()); for (final Object o : this) { temp.add(o); } - if (array == null) { - return temp.toArray(); - } else { - return temp.toArray(array); - } + return temp.toArray(array); } @Override - public Iterator iterator() { + public Iterator iterator() { checkTX(); return new KeysSetIterator(bucketSet, allowTombstones); } @@ -6255,7 +6142,7 @@ private class ValuesSetIterator extends KeysSetIterator { /** reusable KeyInfo */ private final KeyInfo key = new KeyInfo(null, null, null); - public ValuesSetIterator(Set bucketSet) { + public ValuesSetIterator(Set bucketSet) { super(bucketSet, false); } @@ -6270,8 +6157,9 @@ public boolean hasNext() { } key.setKey(super.next()); key.setBucketId(currentBucketId); - Region.Entry re = (Region.Entry) view.getEntryForIterator(key, PartitionedRegion.this, - rememberReads, allowTombstones); + Region.Entry re = + (Region.Entry) view.getEntryForIterator(key, PartitionedRegion.this, + rememberReads, allowTombstones); if (re != null) { nextValue = re.getValue(); } @@ -6306,7 +6194,7 @@ public ValuesSet(Set bucketSet) { } @Override - public Iterator iterator() { + public Iterator iterator() { checkTX(); return new ValuesSetIterator(bucketSet); } @@ -6332,11 +6220,11 @@ public boolean containsValue(final Object value) { } } - ResultCollector rc = null; try { - rc = FunctionService.onRegion(this).setArguments(value) - .execute(PRContainsValueFunction.class.getName()); - List results = ((List) rc.getResult()); + final ResultCollector> rc = + FunctionService.onRegion(this).setArguments(value) + .execute(PRContainsValueFunction.class.getName()); + final List results = rc.getResult(); for (Boolean r : results) { if (r) { return true; @@ -6489,9 +6377,9 @@ boolean containsKeyInBucket(final InternalDistributedMember targetNode, final In Integer countInteger = count; PartitionedRegionDistributionException e = null; // Fix for bug 36014 if (logger.isDebugEnabled()) { - e = new PartitionedRegionDistributionException(String.format(msg, countInteger)); + e = new PartitionedRegionDistributionException(format(msg, countInteger)); } - logger.warn(String.format(msg, countInteger), e); + logger.warn(format(msg, countInteger), e); return false; } @@ -6534,7 +6422,7 @@ public boolean containsValueForKey(Object key) { checkReadiness(); validateKey(key); final long startTime = prStats.getTime(); - boolean containsValueForKey = false; + final boolean containsValueForKey; try { containsValueForKey = getDataView().containsValueForKey(getKeyInfo(key), this); } finally { @@ -6636,7 +6524,7 @@ public int entryCount(Set buckets, boolean estimate) { if (dataStore != null) { bucketSizes = dataStore.getSizeForLocalBuckets(); } - HashSet recips = (HashSet) getRegionAdvisor().adviseDataStore(true); + Set recips = getRegionAdvisor().adviseDataStore(true); recips.remove(getMyId()); if (!recips.isEmpty()) { Map remoteSizes = getSizeRemotely(recips, false); @@ -6683,17 +6571,16 @@ long getEstimatedLocalSize() { * * @return the size of all the buckets hosted on the target node. */ - private Map getSizeRemotely(Set targetNodes, boolean estimate) { + private Map getSizeRemotely(Set targetNodes, + boolean estimate) { SizeResponse r = SizeMessage.send(targetNodes, this, null, estimate); prStats.incPartitionMessagesSent(); - Map retVal = null; try { - retVal = r.waitBucketSizes(); + return r.waitBucketSizes(); } catch (CacheException e) { checkReadiness(); throw e; } - return retVal; } /** @@ -7064,7 +6951,7 @@ public PRHARedundancyProvider getRedundancyProvider() { public void checkClosed() { if (isClosed) { throw new RegionDestroyedException( - String.format("PR %s is locally closed", this), + format("PR %s is locally closed", this), getFullPath()); } } @@ -7162,7 +7049,7 @@ public void checkForColocatedChildren() { } } if (!childRegionList.isEmpty()) { - throw new IllegalStateException(String.format( + throw new IllegalStateException(format( "The parent region [%s] in colocation chain cannot " + "be destroyed, unless all its children [%s] are destroyed", getFullPath(), childRegionList)); @@ -7223,7 +7110,7 @@ public List getMissingColocatedChildren() { if (colocationLogger != null) { return colocationLogger.updateAndGetMissingChildRegions(); } - return Collections.emptyList(); + return emptyList(); } public void destroyParallelGatewaySenderRegion(Operation op, boolean cacheWrite, boolean lock, @@ -7280,15 +7167,13 @@ public void destroyParallelGatewaySenderRegion(Operation op, boolean cacheWrite, } if (!pausedSenders.isEmpty()) { - String exception = null; + final String exception; if (pausedSenders.size() == 1) { - exception = - String.format("GatewaySender %s is paused. Resume it before destroying region %s.", - pausedSenders, getName()); + exception = format("GatewaySender %s is paused. Resume it before destroying region %s.", + pausedSenders, getName()); } else { exception = - String.format( - "GatewaySenders %s are paused. Resume them before destroying region %s.", + format("GatewaySenders %s are paused. Resume them before destroying region %s.", pausedSenders, getName()); } isDestroyedForParallelWAN = false; @@ -7308,24 +7193,23 @@ public void destroyParallelGatewaySenderRegion(Operation op, boolean cacheWrite, // keepWaiting : comes from the MAXIMUM_SHUTDOWN_WAIT_TIME case handled if (cacheWrite && parallelQueueRegion.size() != 0 && keepWaiting) { continue; - } else {// In any case, destroy shadow PR locally. distributed destroy of - // userPR will take care of detsroying shadowPR locally on other - // nodes. - RegionEventImpl event = null; - if (op.isClose()) { // In case of cache close operation, we want SPR's basic destroy to go - // through CACHE_CLOSE condition of postDestroyRegion not - // closePartitionedRegion code - event = new RegionEventImpl(parallelQueueRegion, op, null, false, getMyId(), - generateEventID()); - } else { - event = new RegionEventImpl(parallelQueueRegion, Operation.REGION_LOCAL_DESTROY, null, - false, getMyId(), generateEventID()); - } - parallelQueueRegion.basicDestroyRegion(event, false, lock, callbackEvents); - parallelQueue.removeShadowPR(getFullPath()); - countOfQueueRegionsToBeDestroyed--; - continue; } + + // In any case, destroy shadow PR locally. distributed destroy of + // userPR will take care of detsroying shadowPR locally on other nodes. + final RegionEventImpl event; + if (op.isClose()) { // In case of cache close operation, we want SPR's basic destroy to go + // through CACHE_CLOSE condition of postDestroyRegion not + // closePartitionedRegion code + event = new RegionEventImpl(parallelQueueRegion, op, null, false, getMyId(), + generateEventID()); + } else { + event = new RegionEventImpl(parallelQueueRegion, Operation.REGION_LOCAL_DESTROY, null, + false, getMyId(), generateEventID()); + } + parallelQueueRegion.basicDestroyRegion(event, false, lock, callbackEvents); + parallelQueue.removeShadowPR(getFullPath()); + countOfQueueRegionsToBeDestroyed--; } if (countOfQueueRegionsToBeDestroyed == 0) { @@ -7573,16 +7457,16 @@ private boolean attemptToSendDestroyRegionMessage(RegionEventImpl event, int[] s new UpdateAttributesProcessor(this, true).distribute(false); return false; } - final HashSet configRecipients = new HashSet(getRegionAdvisor().adviseAllPRNodes()); + final Set configRecipients = + new HashSet<>(getRegionAdvisor().adviseAllPRNodes()); // It's possible this instance has not been initialized // or hasn't gotten through initialize() far enough to have - // sent a CreateRegionProcessor message, bug 36048 + // sent a CreateRegionProcessor message try { final PartitionRegionConfig prConfig = getPRRoot().get(getRegionIdentifier()); if (prConfig != null) { - // Fix for bug#34621 by Tushar for (final Node value : prConfig.getNodes()) { InternalDistributedMember idm = value.getMemberId(); if (!idm.equals(getMyId())) { @@ -7760,7 +7644,7 @@ protected void postDestroyRegion(boolean destroyDiskRegion, RegionEventImpl even if (!isUsedForMetaRegion() && !isUsedForPartitionedRegionAdmin() && !isUsedForPartitionedRegionBucket() && !isUsedForParallelGatewaySenderQueue()) { FilterRoutingInfo localCqFrInfo = getFilterProfile().getFilterRoutingInfoPart1(event, - FilterProfile.NO_PROFILES, Collections.emptySet()); + FilterProfile.NO_PROFILES, emptySet()); FilterRoutingInfo localCqInterestFrInfo = getFilterProfile().getFilterRoutingInfoPart2(localCqFrInfo, event); if (localCqInterestFrInfo != null) { @@ -7867,7 +7751,8 @@ void generateLocalFilterRouting(InternalCacheEvent event) { * @see BucketRegion#cacheWriteBeforePut(EntryEventImpl, Set, CacheWriter, boolean, Object) */ @Override - public void cacheWriteBeforePut(EntryEventImpl event, Set netWriteRecipients, + public void cacheWriteBeforePut(EntryEventImpl event, + Set netWriteRecipients, CacheWriter localWriter, boolean requireOldValue, Object expectedOldValue) throws CacheWriterException, TimeoutException { final boolean isDebugEnabled = logger.isDebugEnabled(); @@ -7939,8 +7824,9 @@ public boolean cacheWriteBeforeDestroy(EntryEventImpl event, Object expectedOldV if (event.isDistributed()) { serverDestroy(event, expectedOldValue); - CacheWriter localWriter = basicGetWriter(); - Set netWriteRecipients = localWriter == null ? distAdvisor.adviseNetWrite() : null; + CacheWriter localWriter = basicGetWriter(); + Set netWriteRecipients = + localWriter == null ? distAdvisor.adviseNetWrite() : null; if (localWriter == null && (netWriteRecipients == null || netWriteRecipients.isEmpty())) { return false; @@ -7997,6 +7883,7 @@ public void dumpBackingMap() { * * @see #dumpAllBuckets(boolean) */ + @VisibleForTesting public void validateAllBuckets() throws ReplyException { PartitionResponse response = DumpBucketsMessage.send(getRegionAdvisor().adviseAllPRNodes(), this, true /* only validate */, false); @@ -8182,9 +8069,9 @@ void enableConcurrencyChecks() { * @param allowTombstones whether to include destroyed entries * @param collector object that will receive the keys as they arrive */ - public void getKeysWithRegEx(String regex, boolean allowTombstones, SetCollector collector) + public void getKeysWithRegEx(String regex, boolean allowTombstones, SetCollector collector) throws IOException { - _getKeysWithInterest(InterestType.REGULAR_EXPRESSION, regex, allowTombstones, collector); + getKeysWithInterest(InterestType.REGULAR_EXPRESSION, regex, allowTombstones, collector); } /** @@ -8194,9 +8081,10 @@ public void getKeysWithRegEx(String regex, boolean allowTombstones, SetCollector * @param allowTombstones whether to return destroyed entries * @param collector object that will receive the keys as they arrive */ - public void getKeysWithList(List keyList, boolean allowTombstones, SetCollector collector) + public void getKeysWithList(List keyList, boolean allowTombstones, + SetCollector collector) throws IOException { - _getKeysWithInterest(InterestType.KEY, keyList, allowTombstones, collector); + getKeysWithInterest(InterestType.KEY, keyList, allowTombstones, collector); } /** @@ -8204,9 +8092,8 @@ public void getKeysWithList(List keyList, boolean allowTombstones, SetCollector * * @param allowTombstones whether to return destroyed entries */ - private void _getKeysWithInterest(final @NotNull InterestType interestType, Object interestArg, - boolean allowTombstones, - SetCollector collector) throws IOException { + private void getKeysWithInterest(final @NotNull InterestType interestType, Object interestArg, + boolean allowTombstones, SetCollector collector) throws IOException { // this could be parallelized by building up a list of buckets for each // vm and sending out the requests for keys in parallel. That might dump // more onto this vm in one swoop than it could handle, though, so we're @@ -8214,7 +8101,7 @@ private void _getKeysWithInterest(final @NotNull InterestType interestType, Obje int totalBuckets = getTotalNumberOfBuckets(); int retryAttempts = calcRetry(); for (int bucket = 0; bucket < totalBuckets; bucket++) { - Set bucketSet = null; + Set bucketKeys = null; final RetryTimeKeeper retryTime = new RetryTimeKeeper(Integer.MAX_VALUE); InternalDistributedMember bucketNode = getOrCreateNodeForBucketRead(bucket); for (int count = 0; count <= retryAttempts; count++) { @@ -8224,12 +8111,12 @@ private void _getKeysWithInterest(final @NotNull InterestType interestType, Obje try { if (bucketNode != null) { if (bucketNode.equals(getMyId())) { - bucketSet = dataStore.handleRemoteGetKeys(bucket, interestType, interestArg, + bucketKeys = dataStore.handleRemoteGetKeys(bucket, interestType, interestArg, allowTombstones); } else { FetchKeysResponse r = FetchKeysMessage.sendInterestQuery(bucketNode, this, bucket, interestType, interestArg, allowTombstones); - bucketSet = r.waitForKeys(); + bucketKeys = uncheckedCast(r.waitForKeys()); } } break; @@ -8255,8 +8142,8 @@ private void _getKeysWithInterest(final @NotNull InterestType interestType, Obje } } } // for(count) - if (bucketSet != null) { - collector.receiveSet(bucketSet); + if (bucketKeys != null) { + collector.receiveSet(bucketKeys); } } // for(bucket) } @@ -8280,8 +8167,8 @@ public boolean shouldNotifyBridgeClients() { * * @since GemFire 5.1 */ - public interface SetCollector { - void receiveSet(Set theSet) throws IOException; + public interface SetCollector { + void receiveSet(Set theSet) throws IOException; } /** @@ -8298,13 +8185,14 @@ public boolean isIndexed() { * * @return Map of all the indexes created. */ - public Map getIndex() { - Hashtable availableIndexes = new Hashtable(); - for (final Object ind : indexes.values()) { + public Map getIndex() { + Map availableIndexes = new HashMap<>(); + for (final Object o : indexes.values()) { // Check if the returned value is instance of Index (this means // the index is not in create phase, its created successfully). - if (ind instanceof Index) { - availableIndexes.put(((Index) ind).getName(), ind); + if (o instanceof Index) { + final Index index = (Index) o; + availableIndexes.put(index.getName(), index); } } return availableIndexes; @@ -8330,17 +8218,17 @@ public PartitionedIndex getIndex(String indexName) { * * @return collection of all the indexes */ - public Collection getIndexes() { + public Collection getIndexes() { if (indexes.isEmpty()) { - return Collections.emptyList(); + return emptyList(); } - ArrayList idxs = new ArrayList(); + List idxs = new ArrayList<>(); for (final Object ind : indexes.values()) { // Check if the returned value is instance of Index (this means // the index is not in create phase, its created successfully). if (ind instanceof Index) { - idxs.add(ind); + idxs.add((Index) ind); } } return idxs; @@ -8381,7 +8269,7 @@ public Index createIndex(boolean remotelyOriginated, IndexType indexType, String // data store where as it should have. if (getLocalMaxMemory() != 0) { throw new IndexCreationException( - String.format( + format( "Data Store on this vm is null and the local max Memory is not zero, the data policy is %s and the localMaxMemeory is : %s", getDataPolicy(), (long) getLocalMaxMemory())); } @@ -8417,7 +8305,7 @@ public Index createIndex(boolean remotelyOriginated, IndexType indexType, String } throw new IndexNameConflictException( - String.format("Index named ' %s ' already exists.", indexName)); + format("Index named ' %s ' already exists.", indexName)); } FutureTask oldIndexFutureTask = (FutureTask) ind; @@ -8445,7 +8333,7 @@ public Index createIndex(boolean remotelyOriginated, IndexType indexType, String icd.setIndexData(indexType, fromClause, indexedExpression, imports, loadEntries); singleIndexDefinition.add(icd); - IndexCreationMsg.IndexCreationResponse response = null; + final IndexCreationMsg.IndexCreationResponse response; try { response = (IndexCreationMsg.IndexCreationResponse) IndexCreationMsg.send(null, this, singleIndexDefinition); @@ -8453,7 +8341,7 @@ public Index createIndex(boolean remotelyOriginated, IndexType indexType, String IndexCreationMsg.IndexCreationResult result = response.waitForResult(); Map indexBucketsMap = result.getIndexBucketsMap(); if (indexBucketsMap != null && indexBucketsMap.size() > 0) { - prIndex.setRemoteBucketesIndexed(indexBucketsMap.values().iterator().next()); + prIndex.setRemoteBucketsIndexed(indexBucketsMap.values().iterator().next()); } } } catch (UnsupportedOperationException ignore) { @@ -8477,7 +8365,7 @@ public Index createIndex(boolean remotelyOriginated, IndexType indexType, String } throw new IndexNameConflictException( - String.format("Index named ' %s ' already exists.", + format("Index named ' %s ' already exists.", indexName)); } } catch (InterruptedException ignore) { @@ -8521,7 +8409,7 @@ public List createIndexes(boolean remotelyOriginated, // data store where as it should have. if (getLocalMaxMemory() != 0) { throw new IndexCreationException( - String.format( + format( "Data Store on this vm is null and the local max Memory is not zero, the data policy is %s and the localMaxMemeory is : %s", getDataPolicy(), (long) getLocalMaxMemory())); } @@ -8626,11 +8514,9 @@ boolean populateEmptyIndexes(Set indexes, HashMap exceptionsMap) { boolean throwException = false; if (getDataStore() != null && indexes.size() > 0) { - Set localBuckets = getDataStore().getAllLocalBuckets(); - for (final Object localBucket : localBuckets) { - Map.Entry entry = (Map.Entry) localBucket; - Region bucket = (Region) entry.getValue(); - + Set> localBuckets = getDataStore().getAllLocalBuckets(); + for (final Map.Entry entry : localBuckets) { + final BucketRegion bucket = entry.getValue(); if (bucket == null) { continue; } @@ -8651,7 +8537,7 @@ boolean populateEmptyIndexes(Set indexes, } @VisibleForTesting - Set getBucketIndexesForPRIndexes(Region bucket, Set indexes) { + Set getBucketIndexesForPRIndexes(Region bucket, Set indexes) { Set bucketIndexes = new HashSet<>(); for (Index ind : indexes) { bucketIndexes.addAll(((PartitionedIndex) ind).getBucketIndexes(bucket)); @@ -8679,7 +8565,7 @@ private boolean sendCreateIndexesMessage(boolean remotelyOriginated, for (Index ind : indexes) { if (remoteIndexBucketsMap.containsKey(ind.getName())) { ((PartitionedIndex) ind) - .setRemoteBucketesIndexed(remoteIndexBucketsMap.get(ind.getName())); + .setRemoteBucketsIndexed(remoteIndexBucketsMap.get(ind.getName())); } } } @@ -8727,18 +8613,16 @@ public void sendIndexCreationMsg(InternalDistributedMember idM) { return; } - Iterator it = indexes.values().iterator(); HashSet indexDefinitions = new HashSet<>(); - Set indexes = new HashSet<>(); - while (it.hasNext()) { - Object ind = it.next(); + Set partitionedIndices = new HashSet<>(); + for (final Object ind : indexes.values()) { // Check if the returned value is instance of Index (this means // the index is not in create phase, its created successfully). if (!(ind instanceof Index)) { continue; } PartitionedIndex prIndex = (PartitionedIndex) ind; - indexes.add(prIndex); + partitionedIndices.add(prIndex); IndexCreationData icd = new IndexCreationData(prIndex.getName()); icd.setIndexData(prIndex.getType(), prIndex.getFromClause(), prIndex.getIndexedExpression(), prIndex.getImports(), true); @@ -8757,12 +8641,11 @@ public void sendIndexCreationMsg(InternalDistributedMember idM) { result = response.waitForResult(); Map remoteIndexBucketsMap = result.getIndexBucketsMap(); // set the number of remote buckets indexed for each pr index - for (Index ind : indexes) { - ((PartitionedIndex) ind) - .setRemoteBucketesIndexed(remoteIndexBucketsMap.get(ind.getName())); + for (PartitionedIndex ind : partitionedIndices) { + ind.setRemoteBucketsIndexed(remoteIndexBucketsMap.get(ind.getName())); } } catch (ForceReattemptException e) { - logger.info(String.format("ForceReattempt exception : %s", e)); + logger.info(format("ForceReattempt exception : %s", e)); } } } @@ -8787,10 +8670,8 @@ public int removeIndexes(boolean remotelyOriginated) this); try { - for (Object bucketEntryObject : dataStore.getAllLocalBuckets()) { - LocalRegion bucket = null; - Map.Entry bucketEntry = (Map.Entry) bucketEntryObject; - bucket = (LocalRegion) bucketEntry.getValue(); + for (Map.Entry bucketEntry : dataStore.getAllLocalBuckets()) { + final LocalRegion bucket = bucketEntry.getValue(); if (bucket != null) { bucket.waitForData(); IndexManager indexMang = IndexUtils.getIndexManager(cache, bucket, false); @@ -8879,16 +8760,14 @@ public int removeIndex(Index ind, boolean remotelyOriginated) // For releasing the write lock after removal. try { - synchronized (prIndex) { - List allBucketIndex = ((PartitionedIndex) prIndex).getBucketIndexes(); - Iterator it = allBucketIndex.iterator(); + synchronized (index) { + final List allBucketIndex = index.getBucketIndexes(); if (logger.isDebugEnabled()) { logger.debug("Will be removing indexes on : {} buckets", allBucketIndex.size()); } - while (it.hasNext()) { - Index in = (Index) it.next(); + for (final Index in : allBucketIndex) { LocalRegion region = ((LocalRegion) in.getRegion()); region.waitForData(); IndexManager indMng = region.getIndexManager(); @@ -9063,7 +8942,7 @@ public long getBirthTime() { return birthTime; } - public PartitionResolver getPartitionResolver() { + public PartitionResolver getPartitionResolver() { return partitionAttributes.getPartitionResolver(); } @@ -9238,9 +9117,8 @@ public ExpirationAttributes setRegionTimeToLive(ExpirationAttributes timeToLive) ExpirationAttributes attr = super.setRegionTimeToLive(timeToLive); // Set to Bucket regions as well if (getDataStore() != null) { // not for accessors - for (Object o : getDataStore().getAllLocalBuckets()) { - Map.Entry entry = (Map.Entry) o; - Region bucketRegion = (Region) entry.getValue(); + for (Map.Entry entry : getDataStore().getAllLocalBuckets()) { + BucketRegion bucketRegion = entry.getValue(); bucketRegion.getAttributesMutator().setRegionTimeToLive(timeToLive); } } @@ -9262,9 +9140,8 @@ public ExpirationAttributes setRegionIdleTimeout(ExpirationAttributes idleTimeou ExpirationAttributes attr = super.setRegionIdleTimeout(idleTimeout); // Set to Bucket regions as well if (getDataStore() != null) { // not for accessors - for (Object o : getDataStore().getAllLocalBuckets()) { - Map.Entry entry = (Map.Entry) o; - Region bucketRegion = (Region) entry.getValue(); + for (Map.Entry entry : getDataStore().getAllLocalBuckets()) { + BucketRegion bucketRegion = entry.getValue(); bucketRegion.getAttributesMutator().setRegionIdleTimeout(idleTimeout); } } @@ -9307,7 +9184,7 @@ public ExpirationAttributes setEntryTimeToLive(ExpirationAttributes timeToLive) */ @Override public CustomExpiry setCustomEntryTimeToLive(CustomExpiry custom) { - CustomExpiry expiry = super.setCustomEntryTimeToLive(custom); + CustomExpiry expiry = super.setCustomEntryTimeToLive(custom); // Set to Bucket regions as well if (dataStore != null) { dataStore.lockBucketCreationAndVisit( @@ -9353,7 +9230,7 @@ public ExpirationAttributes setEntryIdleTimeout(ExpirationAttributes idleTimeout */ @Override public CustomExpiry setCustomEntryIdleTimeout(CustomExpiry custom) { - CustomExpiry expiry = super.setCustomEntryIdleTimeout(custom); + CustomExpiry expiry = super.setCustomEntryIdleTimeout(custom); // Set to Bucket regions as well if (dataStore != null) { dataStore.lockBucketCreationAndVisit( @@ -9434,7 +9311,7 @@ public void run() { } } if (!bucketList.isEmpty()) { - Collections.sort(bucketList, (buk1, buk2) -> { + bucketList.sort((buk1, buk2) -> { long buk1NumEntries = buk1.getSizeForEviction(); long buk2NumEntries = buk2.getSizeForEviction(); if (buk1NumEntries > buk2NumEntries) { @@ -9485,7 +9362,7 @@ public LocalRegion getDataRegionForRead(final KeyInfo keyInfo) { } catch (RegionDestroyedException ignore) { // TODO: why is this purposely not wrapping the original cause? throw new TransactionDataNotColocatedException( - String.format("Key %s is not colocated with transaction", + format("Key %s is not colocated with transaction", entryKey)); } catch (ForceReattemptException ignore) { br = null; @@ -9620,7 +9497,6 @@ public class IndexTask implements Callable { IndexTask(boolean remotelyOriginated, IndexType indexType, String indexName, String indexedExpression, String fromClaus, String imports, boolean loadEntries) { - this.indexName = indexName; this.remotelyOriginated = remotelyOriginated; this.indexType = indexType; this.indexName = indexName; @@ -9663,7 +9539,7 @@ public PartitionedIndex call() throws IndexCreationException, IndexNameConflictE } else { if (getLocalMaxMemory() != 0) { throw new IndexCreationException( - String.format("Data Store on this vm is null and the local max Memory is not zero %s", + format("Data Store on this vm is null and the local max Memory is not zero %s", (long) getLocalMaxMemory())); } logger.info("This is an accessor vm and doesnt contain data"); @@ -9680,10 +9556,9 @@ public PartitionedIndex call() throws IndexCreationException, IndexNameConflictE * This creates indexes on PR buckets. */ private PartitionedIndex createIndexOnPRBuckets() - throws IndexNameConflictException, IndexExistsException, IndexCreationException { + throws IndexNameConflictException, IndexExistsException { - Set localBuckets = getDataStore().getAllLocalBuckets(); - Iterator it = localBuckets.iterator(); + Set> localBuckets = getDataStore().getAllLocalBuckets(); QCompiler compiler = new QCompiler(); if (imports != null) { compiler.compileImports(imports); @@ -9698,32 +9573,26 @@ private PartitionedIndex createIndexOnPRBuckets() // set this the flag to true However if the region is empty, we should set this flag to true // so it will be reported as used even though there is no data in the region - if (!it.hasNext()) { + if (localBuckets.isEmpty()) { parIndex.setPopulated(true); } - while (it.hasNext()) { - Map.Entry entry = (Map.Entry) it.next(); - Region bucket = (Region) entry.getValue(); + for (final Map.Entry entry : localBuckets) { + BucketRegion bucket = entry.getValue(); if (bucket == null) { continue; } ExecutionContext externalContext = new ExecutionContext(null, cache); - externalContext.setBucketRegion(PartitionedRegion.this, (BucketRegion) bucket); + externalContext.setBucketRegion(PartitionedRegion.this, bucket); IndexManager indMng = IndexUtils.getIndexManager(cache, bucket, true); try { Index bucketIndex = indMng.createIndex(indexName, indexType, indexedExpression, fromClause, imports, externalContext, parIndex, loadEntries); - // parIndex.addToBucketIndexes(bucketIndex); - } catch (IndexNameConflictException ince) { + } catch (IndexNameConflictException | IndexExistsException ince) { if (!remotelyOriginated) { throw ince; } - } catch (IndexExistsException iee) { - if (!remotelyOriginated) { - throw iee; - } } } // End of bucket list parIndex.markValid(true); @@ -9735,9 +9604,9 @@ public List getFixedPartitionAttributesImpl() { return fixedPAttrs; } + @VisibleForTesting public List getPrimaryFixedPartitionAttributes_TestsOnly() { - List primaryFixedPAttrs = - new LinkedList<>(); + List primaryFixedPAttrs = new LinkedList<>(); if (fixedPAttrs != null) { for (FixedPartitionAttributesImpl fpa : fixedPAttrs) { if (fpa.isPrimary()) { @@ -9748,9 +9617,9 @@ public List getPrimaryFixedPartitionAttributes_Tes return primaryFixedPAttrs; } + @VisibleForTesting public List getSecondaryFixedPartitionAttributes_TestsOnly() { - List secondaryFixedPAttrs = - new LinkedList<>(); + List secondaryFixedPAttrs = new LinkedList<>(); if (fixedPAttrs != null) { for (FixedPartitionAttributesImpl fpa : fixedPAttrs) { if (!fpa.isPrimary()) { @@ -9847,6 +9716,7 @@ public BucketRegion getBucketRegion(Object key, Object value) { * * @since GemFire 6.1.2.9 */ + @VisibleForTesting public int getPerEntryLRUOverhead() { if (dataStore == null) { // this is an accessor return -1; @@ -9925,7 +9795,7 @@ public void updateEntryVersionInBucket(EntryEventImpl event) { if (getRegionAdvisor().isStorageAssignedForBucket(bucketId)) { // bucket no longer exists throw new EntryNotFoundException( - String.format("Entry not found for key %s", + format("Entry not found for key %s", event.getKey())); } break; // fall out to failed exception @@ -9941,7 +9811,7 @@ public void updateEntryVersionInBucket(EntryEventImpl event) { if (isLocal) { dataStore.updateEntryVersionLocally(bucketId, event); } else { - updateEntryVersionRemotely(retryNode, bucketId, event); + updateEntryVersionRemotely(retryNode, event); } return; } catch (ConcurrentCacheModificationException e) { @@ -9996,7 +9866,7 @@ public void updateEntryVersionInBucket(EntryEventImpl event) { // No target was found PartitionedRegionDistributionException e = new PartitionedRegionDistributionException( - String.format("No VM available for update-version in %s attempts.", + format("No VM available for update-version in %s attempts.", count)); // Fix for bug 36014 if (!isDebugEnabled) { logger.warn("No VM available for update-version in {} attempts.", @@ -10011,33 +9881,27 @@ public void updateEntryVersionInBucket(EntryEventImpl event) { * Updates the entry version timestamp of the remote object with the given key. * * @param recipient the member id of the recipient of the operation - * @param bucketId the id of the bucket the key hashed into * @throws EntryNotFoundException if the entry does not exist in this region * @throws PrimaryBucketException if the bucket on that node is not the primary copy * @throws ForceReattemptException if the peer is no longer available */ - private void updateEntryVersionRemotely(InternalDistributedMember recipient, Integer bucketId, + private void updateEntryVersionRemotely(InternalDistributedMember recipient, EntryEventImpl event) throws EntryNotFoundException, PrimaryBucketException, ForceReattemptException { - UpdateEntryVersionResponse response = PRUpdateEntryVersionMessage.send(recipient, this, event); - if (response != null) { - prStats.incPartitionMessagesSent(); - try { - response.waitForResult(); - return; - } catch (EntryNotFoundException ex) { - throw ex; - } catch (TransactionDataNotColocatedException ex) { - throw ex; - } catch (TransactionDataRebalancedException e) { - throw e; - } catch (CacheException ce) { - throw new PartitionedRegionException( - String.format("Update version of entry on %s failed.", - recipient), - ce); - } + final UpdateEntryVersionResponse response = + PRUpdateEntryVersionMessage.send(recipient, this, event); + prStats.incPartitionMessagesSent(); + try { + response.waitForResult(); + } catch (EntryNotFoundException | TransactionDataRebalancedException + | TransactionDataNotColocatedException ex) { + throw ex; + } catch (CacheException ce) { + throw new PartitionedRegionException( + format("Update version of entry on %s failed.", + recipient), + ce); } } @@ -10103,8 +9967,8 @@ public EntryExpiryTask getEntryExpiryTask(Object key) { void updatePRNodeInformation() { updatePartitionRegionConfig(prConfig -> { - CacheLoader cacheLoader = basicGetLoader(); - CacheWriter cacheWriter = basicGetWriter(); + CacheLoader cacheLoader = basicGetLoader(); + CacheWriter cacheWriter = basicGetWriter(); if (prConfig != null) { for (Node node : prConfig.getNodes()) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataView.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataView.java index 347a0dd381ca..52f93595f9ff 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataView.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionDataView.java @@ -49,7 +49,7 @@ public void destroyExistingEntry(EntryEventImpl event, boolean cacheWrite, } @Override - public Entry getEntry(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones) { + public Entry getEntry(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones) { TXStateProxy tx = localRegion.cache.getTXMgr().pauseTransaction(); try { PartitionedRegion pr = (PartitionedRegion) localRegion; @@ -102,7 +102,6 @@ public void destroyOnRemote(EntryEventImpl event, boolean cacheWrite, Object exp throws DataLocationException { PartitionedRegion pr = (PartitionedRegion) event.getRegion(); pr.getDataStore().destroyLocally(event.getKeyInfo().getBucketId(), event, expectedOldValue); - return; } @Override @@ -113,13 +112,14 @@ public void invalidateOnRemote(EntryEventImpl event, boolean invokeCallbacks, } @Override - public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { + public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { PartitionedRegion pr = (PartitionedRegion) localRegion; return pr.getBucketKeys(bucketId, allowTombstones); } @Override - public Entry getEntryOnRemote(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones) + public Entry getEntryOnRemote(KeyInfo keyInfo, LocalRegion localRegion, + boolean allowTombstones) throws DataLocationException { PartitionedRegion pr = (PartitionedRegion) localRegion; return pr.getDataStore().getEntryLocally(keyInfo.getBucketId(), keyInfo.getKey(), false, @@ -130,7 +130,7 @@ public Entry getEntryOnRemote(KeyInfo keyInfo, LocalRegion localRegion, boolean public Object getKeyForIterator(KeyInfo curr, LocalRegion currRgn, boolean rememberReads, boolean allowTombstones) { // do not perform a value check here, it will send out an - // extra message. Also BucketRegion will check to see if + // extra message. BucketRegion will check to see if // the value for this key is a removed token return curr.getKey(); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java index 9562afe205a4..2f9e79659419 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionHelper.java @@ -14,9 +14,14 @@ */ package org.apache.geode.internal.cache; +import static java.lang.String.format; +import static java.util.Collections.unmodifiableSet; +import static org.apache.geode.cache.DataPolicy.PERSISTENT_PARTITION; import static org.apache.geode.cache.Region.SEPARATOR; import static org.apache.geode.cache.Region.SEPARATOR_CHAR; import static org.apache.geode.internal.cache.LocalRegion.InitializationLevel.ANY_INIT; +import static org.apache.geode.internal.util.CollectionUtils.asSet; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.util.ArrayList; import java.util.Collection; @@ -28,6 +33,7 @@ import java.util.Set; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.CancelException; import org.apache.geode.annotations.Immutable; @@ -67,7 +73,7 @@ public class PartitionedRegionHelper { static final long BYTES_PER_MB = 1024 * 1024; /** - * The administrative region used for storing Partitioned Region meta data sub regions * + * The administrative region used for storing Partitioned Region metadata sub regions * */ public static final String PR_ROOT_REGION_NAME = "__PR"; @@ -99,20 +105,11 @@ public class PartitionedRegionHelper { public static final DataPolicy DEFAULT_DATA_POLICY = DataPolicy.PARTITION; @Immutable - public static final Set ALLOWED_DATA_POLICIES; - - static final Object dlockMonitor = new Object(); - - static { - Set policies = new HashSet(); - policies.add(DEFAULT_DATA_POLICY); - policies.add(DataPolicy.PERSISTENT_PARTITION); - // policies.add(DataPolicy.NORMAL); - ALLOWED_DATA_POLICIES = Collections.unmodifiableSet(policies); - } + public static final Set ALLOWED_DATA_POLICIES = + unmodifiableSet(asSet(DEFAULT_DATA_POLICY, PERSISTENT_PARTITION)); /** - * This function is used for cleaning the config meta data for the failed or closed + * This function is used for cleaning the config metadata for the failed or closed * PartitionedRegion node. * * @param failedNode The failed PartitionedRegion Node @@ -125,7 +122,7 @@ static void removeGlobalMetadataForFailedNode(Node failedNode, String regionIden } /** - * This function is used for cleaning the config meta data for the failed or closed + * This function is used for cleaning the config metadata for the failed or closed * PartitionedRegion node. * * @param failedNode The failed PartitionedRegion Node @@ -135,11 +132,11 @@ static void removeGlobalMetadataForFailedNode(Node failedNode, String regionIden */ static void removeGlobalMetadataForFailedNode(Node failedNode, String regionIdentifier, InternalCache cache, final boolean lock) { - Region root = PartitionedRegionHelper.getPRRoot(cache, false); + Region root = PartitionedRegionHelper.getPRRoot(cache, false); if (root == null) { return; // no partitioned region info to clean up } - PartitionRegionConfig prConfig = (PartitionRegionConfig) root.get(regionIdentifier); + PartitionRegionConfig prConfig = root.get(regionIdentifier); if (null == prConfig || !prConfig.containsNode(failedNode)) { return; } @@ -150,7 +147,7 @@ static void removeGlobalMetadataForFailedNode(Node failedNode, String regionIden if (lock) { rl.lock(); } - prConfig = (PartitionRegionConfig) root.get(regionIdentifier); + prConfig = root.get(regionIdentifier); if (prConfig != null && prConfig.containsNode(failedNode)) { if (logger.isDebugEnabled()) { logger.debug("Cleaning up config for pr {} node {}", regionIdentifier, failedNode); @@ -164,7 +161,7 @@ static void removeGlobalMetadataForFailedNode(Node failedNode, String regionIden root.destroy(regionIdentifier); } catch (EntryNotFoundException e) { logger.warn( - String.format("Got EntryNotFoundException in destroy Op for allPRRegion key, %s", + format("Got EntryNotFoundException in destroy Op for allPRRegion key, %s", regionIdentifier), e); } @@ -189,17 +186,18 @@ static void removeGlobalMetadataForFailedNode(Node failedNode, String regionIden * Return a region that is the root for all Partitioned Region metadata on this node */ public static LocalRegion getPRRoot(final InternalCache cache) { - return getPRRoot(cache, true); + return (LocalRegion) getPRRoot(cache, true); } /** - * Return a region that is the root for all PartitionedRegion meta data on this Node. The main + * Return a region that is the root for all PartitionedRegion metadata on this Node. The main * administrative Regions contained within are allPartitionedRegion (Scope * DISTRIBUTED_ACK) and bucket2Node (Scope DISTRIBUTED_ACK) and dataStore regions. * * @return a GLOBLAL scoped root region used for PartitionedRegion administration */ - public static LocalRegion getPRRoot(final InternalCache cache, boolean createIfAbsent) { + public static Region getPRRoot(final InternalCache cache, + boolean createIfAbsent) { DistributedRegion root = (DistributedRegion) cache.getRegion(PR_ROOT_REGION_NAME, true); if (root == null) { if (!createIfAbsent) { @@ -209,12 +207,13 @@ public static LocalRegion getPRRoot(final InternalCache cache, boolean createIfA logger.debug("Creating root Partitioned Admin Region {}", PartitionedRegionHelper.PR_ROOT_REGION_NAME); } - InternalRegionFactory factory = cache.createInternalRegionFactory(RegionShortcut.REPLICATE); + InternalRegionFactory factory = + cache.createInternalRegionFactory(RegionShortcut.REPLICATE); factory.addCacheListener(new FixedPartitionAttributesListener()); if (Boolean.getBoolean(GeodeGlossary.GEMFIRE_PREFIX + "PRDebug")) { - factory.addCacheListener(new CacheListenerAdapter() { + factory.addCacheListener(new CacheListenerAdapter() { @Override - public void afterCreate(EntryEvent event) { + public void afterCreate(EntryEvent event) { if (logger.isDebugEnabled()) { logger.debug( "Create Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", @@ -224,7 +223,7 @@ public void afterCreate(EntryEvent event) { } @Override - public void afterUpdate(EntryEvent event) { + public void afterUpdate(EntryEvent event) { if (logger.isDebugEnabled()) { logger.debug( "Update Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", @@ -234,7 +233,7 @@ public void afterUpdate(EntryEvent event) { } @Override - public void afterDestroy(EntryEvent event) { + public void afterDestroy(EntryEvent event) { if (logger.isDebugEnabled()) { logger.debug( "Destroy Event for allPR: key = {} oldVal = {} newVal = {} Op = {} origin = {} isNetSearch = {}", @@ -244,15 +243,16 @@ public void afterDestroy(EntryEvent event) { } }); - factory.setCacheWriter(new CacheWriterAdapter() { + factory.setCacheWriter(new CacheWriterAdapter() { @Override - public void beforeUpdate(EntryEvent event) throws CacheWriterException { + public void beforeUpdate(EntryEvent event) + throws CacheWriterException { // the prConfig node list must advance (otherwise meta data becomes out of sync) - final PartitionRegionConfig newConf = (PartitionRegionConfig) event.getNewValue(); - final PartitionRegionConfig oldConf = (PartitionRegionConfig) event.getOldValue(); + final PartitionRegionConfig newConf = event.getNewValue(); + final PartitionRegionConfig oldConf = event.getOldValue(); if (newConf != oldConf && !newConf.isGreaterNodeListVersion(oldConf)) { throw new CacheWriterException( - String.format( + format( "New PartitionedRegionConfig %s does not have newer version than previous %s", newConf, oldConf)); } @@ -260,7 +260,7 @@ public void beforeUpdate(EntryEvent event) throws CacheWriterException { }); } - // Create anonymous stats holder for Partitioned Region meta data + // Create anonymous stats holder for Partitioned Region metadata final HasCachePerfStats prMetaStatsHolder = () -> new CachePerfStats(cache.getDistributedSystem(), "RegionStats-partitionMetaData", cache.getStatisticsClock()); @@ -279,7 +279,7 @@ public void beforeUpdate(EntryEvent event) throws CacheWriterException { } Assert.assertTrue(root != null, "Can not obtain internal Partitioned Region configuration root"); - return root; + return uncheckedCast(root); } // TODO rebalancing - this code was added here in the merge of -r22804:23093 from trunk @@ -298,7 +298,7 @@ public void beforeUpdate(EntryEvent event) throws CacheWriterException { // inconsistent metadata // and regions. /** - * Clean the config meta data for a DistributedMember which has left the DistributedSystem, one + * Clean the config metadata for a DistributedMember which has left the DistributedSystem, one * PartitionedRegion at a time. */ public static void cleanUpMetaDataOnNodeFailure(InternalCache cache, @@ -311,7 +311,8 @@ public static void cleanUpMetaDataOnNodeFailure(InternalCache cache, if (logger.isDebugEnabled()) { logger.debug("Cleaning PartitionedRegion meta data for memberId={}", failedMemId); } - Region rootReg = PartitionedRegionHelper.getPRRoot(cache, false); + Region rootReg = + PartitionedRegionHelper.getPRRoot(cache, false); if (rootReg == null) { return; } @@ -342,12 +343,13 @@ public static void cleanUpMetaDataForRegion(final InternalCache cache, final Str boolean runPostCleanUp = true; try { final PartitionRegionConfig prConf; - Region rootReg = PartitionedRegionHelper.getPRRoot(cache, false); + Region rootReg = + PartitionedRegionHelper.getPRRoot(cache, false); if (rootReg == null) { return; } try { - prConf = (PartitionRegionConfig) rootReg.get(prName); + prConf = rootReg.get(prName); } catch (EntryDestroyedException ignore) { return; } @@ -434,22 +436,23 @@ private static void cleanPartitionedRegionMetaDataForNode(InternalCache cache, N * totalNumberOfBuckets; } **/ - private static PartitionResolver getResolver(PartitionedRegion pr, Object key, + @SuppressWarnings({"unchecked"}) + private static PartitionResolver getResolver(PartitionedRegion pr, Object key, Object callbackArgument) { // First choice is one associated with the region - PartitionResolver result = pr.getPartitionResolver(); + final PartitionResolver result = uncheckedCast(pr.getPartitionResolver()); if (result != null) { return result; } // Second is the key - if (key != null && key instanceof PartitionResolver) { - return (PartitionResolver) key; + if (key instanceof PartitionResolver) { + return (PartitionResolver) key; } // Third is the callback argument - if (callbackArgument != null && callbackArgument instanceof PartitionResolver) { - return (PartitionResolver) callbackArgument; + if (callbackArgument instanceof PartitionResolver) { + return (PartitionResolver) callbackArgument; } // There is no resolver. @@ -488,7 +491,7 @@ public static int getHashKey(PartitionedRegion pr, Operation operation, Object k * @param event entry event created for this entry operation * @return the bucket id the key/routing object hashes to */ - public static int getHashKey(EntryOperation event) { + public static int getHashKey(EntryOperation event) { return getHashKey(event, null, null, null, null, null); } @@ -505,8 +508,9 @@ public static int getHashKey(EntryOperation event) { * Routing object * @return the bucket id the key/routing object hashes to */ - private static int getHashKey(EntryOperation event, PartitionedRegion pr, Operation operation, - Object key, Object value, Object callbackArgument) { + private static int getHashKey(EntryOperation event, PartitionedRegion pr, + Operation operation, + K key, V value, Object callbackArgument) { // avoid creating EntryOperation if there is no resolver if (event != null) { pr = (PartitionedRegion) event.getRegion(); @@ -514,42 +518,44 @@ private static int getHashKey(EntryOperation event, PartitionedRegion pr, Operat callbackArgument = event.getCallbackArgument(); } - PartitionResolver resolver = getResolver(pr, key, callbackArgument); - Object resolveKey = null; + PartitionResolver resolver = getResolver(pr, key, callbackArgument); + final Object resolveKey; if (pr.isFixedPartitionedRegion()) { - String partition = null; + final String partition; if (resolver instanceof FixedPartitionResolver) { Map partitionMap = pr.getPartitionsMap(); if (event == null) { - event = new EntryOperationImpl(pr, operation, key, value, callbackArgument); + event = + new EntryOperationImpl<>(uncheckedCast(pr), operation, key, value, callbackArgument); } partition = - ((FixedPartitionResolver) resolver).getPartitionName(event, partitionMap.keySet()); + ((FixedPartitionResolver) resolver).getPartitionName(event, + partitionMap.keySet()); if (partition == null) { Object[] prms = new Object[] {pr.getName(), resolver}; throw new IllegalStateException( - String.format("For region %s, partition resolver %s returned partition name null", + format("For region %s, partition resolver %s returned partition name null", prms)); } Integer[] bucketArray = partitionMap.get(partition); if (bucketArray == null) { Object[] prms = new Object[] {pr.getName(), partition}; throw new PartitionNotAvailableException( - String.format( + format( "For FixedPartitionedRegion %s, partition %s is not available on any datastore.", prms)); } - int numBukets = bucketArray[1]; - resolveKey = (numBukets == 1) ? partition : resolver.getRoutingObject(event); + int numberOfBuckets = bucketArray[1]; + resolveKey = (numberOfBuckets == 1) ? partition : resolver.getRoutingObject(event); } else if (resolver == null) { throw new IllegalStateException( - String.format( + format( "For FixedPartitionedRegion %s, FixedPartitionResolver is not available (neither through the partition attribute partition-resolver nor key/callbackArg implementing FixedPartitionResolver)", pr.getName())); } else { Object[] prms = new Object[] {pr.getName(), resolver}; throw new IllegalStateException( - String.format( + format( "For FixedPartitionedRegion %s, Resolver defined %s is not an instance of FixedPartitionResolver", prms)); } @@ -564,7 +570,8 @@ private static int getHashKey(EntryOperation event, PartitionedRegion pr, Operat } } else { if (event == null) { - event = new EntryOperationImpl(pr, operation, key, value, callbackArgument); + event = + new EntryOperationImpl<>(uncheckedCast(pr), operation, key, value, callbackArgument); } resolveKey = resolver.getRoutingObject(event); if (resolveKey == null) { @@ -582,10 +589,10 @@ private static int assignFixedBucketId(PartitionedRegion pr, String partition, int startingBucketID = 0; int partitionNumBuckets = 0; boolean isPartitionAvailable = pr.getPartitionsMap().containsKey(partition); - Integer[] partitionDeatils = pr.getPartitionsMap().get(partition); + Integer[] partitionDetails = pr.getPartitionsMap().get(partition); if (isPartitionAvailable) { - startingBucketID = partitionDeatils[0]; - partitionNumBuckets = partitionDeatils[1]; + startingBucketID = partitionDetails[0]; + partitionNumBuckets = partitionDetails[1]; int hc = resolveKey.hashCode(); int bucketId = Math.abs(hc % partitionNumBuckets); @@ -623,7 +630,7 @@ private static int assignFixedBucketId(PartitionedRegion pr, String partition, if (isPartitionAvailable) { Object[] prms = new Object[] {pr.getName(), partition}; throw new IllegalStateException( - String.format( + format( "For region %s, For partition %s partition-num-buckets is set to 0. Buckets cann not be created on this partition.", prms)); } @@ -632,7 +639,7 @@ private static int assignFixedBucketId(PartitionedRegion pr, String partition, if (!isPartitionAvailable) { Object[] prms = new Object[] {pr.getName(), partition}; throw new PartitionNotAvailableException( - String.format("For region %s, partition name %s is not available on any datastore.", + format("For region %s, partition name %s is not available on any datastore.", prms)); } int hc = resolveKey.hashCode(); @@ -654,7 +661,7 @@ public static int getHashKey(Object routingObject, int totalNumBuckets) { } public static PartitionedRegion getPartitionedRegion(String prName, Cache cache) { - Region region = cache.getRegion(prName); + Region region = cache.getRegion(prName); if (region != null) { if (region instanceof PartitionedRegion) { return (PartitionedRegion) region; @@ -687,10 +694,8 @@ public static Bucket getProxyBucketRegion(Cache cache, String fullPath) } String prid = getPRPath(bucketName); - // PartitionedRegion region = - // PartitionedRegion.getPRFromId(Integer.parseInt(prid)); - Region region; + Region region; final InitializationLevel oldLevel = LocalRegion.setThreadInitLevelRequirement(ANY_INIT); try { region = cache.getRegion(prid); @@ -766,13 +771,7 @@ public static String getBucketName(String bucketFullPath) { } public static String getBucketFullPath(String prFullPath, int bucketId) { - String name = getBucketName(prFullPath, bucketId); - if (name != null) { - return SEPARATOR + PR_ROOT_REGION_NAME + SEPARATOR + name; - } - - return null; - + return SEPARATOR + PR_ROOT_REGION_NAME + SEPARATOR + getBucketName(prFullPath, bucketId); } public static String escapePRPath(String prFullPath) { @@ -792,7 +791,7 @@ public static String unescapePRPath(String escapedPath) { return path; } - public static String getBucketName(String prPath, int bucketId) { + public static @NotNull String getBucketName(String prPath, int bucketId) { return PartitionedRegionHelper.BUCKET_REGION_PREFIX + PartitionedRegionHelper.escapePRPath(prPath) + PartitionedRegion.BUCKET_NAME_SEPARATOR + bucketId; @@ -811,7 +810,7 @@ public static String getPRPath(String bucketName) { } /** - * Returns the bucket id gvien the bucketName (see getBucketName). + * Returns the bucket id given the bucketName (see getBucketName). */ public static int getBucketId(String bucketName) { // bucketName = _B_PRNAME_10 @@ -840,45 +839,13 @@ public static boolean isSubRegion(String fullPath) { return isSubRegion; } - /** - * Utility method to print warning when nodeList in b2n region is found empty. This will signify - * potential data loss scenario. - * - * @param bucketId Id of Bucket whose nodeList in b2n is empty. - * @param callingMethod methodName of the calling method. - */ - public static void logForDataLoss(PartitionedRegion partitionedRegion, int bucketId, - String callingMethod) { - if (!Boolean.getBoolean(GeodeGlossary.GEMFIRE_PREFIX + "PRDebug")) { - return; - } - Region root = PartitionedRegionHelper.getPRRoot(partitionedRegion.getCache()); - // Region allPartitionedRegions = PartitionedRegionHelper.getPRConfigRegion( - // root, partitionedRegion.getCache()); - PartitionRegionConfig prConfig = - (PartitionRegionConfig) root.get(partitionedRegion.getRegionIdentifier()); - if (prConfig == null) { - return; - } - - Set members = partitionedRegion.getDistributionManager().getDistributionManagerIds(); - logger.warn( - "DATALOSS ( {} ) :: Size of nodeList After verifyBucketNodes for bucket ID, {} is 0", - callingMethod, bucketId); - logger.warn("DATALOSS ( {} ) :: NodeList from prConfig, {}", - callingMethod, printCollection(prConfig.getNodes())); - logger.warn("DATALOSS ( {} ) :: Current Membership List, {}", - callingMethod, printCollection(members)); - } - /** * Utility method to print a collection. - * */ - public static String printCollection(Collection c) { + public static String printCollection(Collection c) { if (c != null) { StringBuilder sb = new StringBuilder("["); - Iterator itr = c.iterator(); + Iterator itr = c.iterator(); while (itr.hasNext()) { sb.append(itr.next()); if (itr.hasNext()) { @@ -913,28 +880,11 @@ public static FixedPartitionAttributesImpl getFixedPartitionAttributesForBucket( } Object[] prms = new Object[] {pr.getName(), bucketId}; throw new PartitionNotAvailableException( - String.format( + format( "For FixedPartitionedRegion %s, Fixed partition is not defined for bucket id %s on any datastore", prms)); } - private static Set getAllAvailablePartitions(PartitionedRegion region) { - Set partitionSet = new HashSet<>(); - List localFPAs = region.getFixedPartitionAttributesImpl(); - if (localFPAs != null) { - for (FixedPartitionAttributesImpl fpa : localFPAs) { - partitionSet.add(fpa.getPartitionName()); - } - } - - List remoteFPAs = - region.getRegionAdvisor().adviseAllFixedPartitionAttributes(); - for (FixedPartitionAttributes fpa : remoteFPAs) { - partitionSet.add(fpa.getPartitionName()); - } - return Collections.unmodifiableSet(partitionSet); - } - public static Set getAllFixedPartitionAttributes( PartitionedRegion region) { Set fpaSet = new HashSet<>(); @@ -950,7 +900,7 @@ public static Set getAllFixedPartitionAttributes( private static class MemberFailureListener implements MembershipListener { - InternalCache cache = null; + private final InternalCache cache; MemberFailureListener(InternalCache cache) { this.cache = cache; @@ -978,28 +928,29 @@ public void quorumLost(DistributionManager distributionManager, } - static class FixedPartitionAttributesListener extends CacheListenerAdapter { + static class FixedPartitionAttributesListener + extends CacheListenerAdapter { private static final Logger logger = LogService.getLogger(); @Override - public void afterCreate(EntryEvent event) { - PartitionRegionConfig prConfig = (PartitionRegionConfig) event.getNewValue(); + public void afterCreate(EntryEvent event) { + PartitionRegionConfig prConfig = event.getNewValue(); if (!prConfig.getElderFPAs().isEmpty()) { updatePartitionMap(prConfig); } } @Override - public void afterUpdate(EntryEvent event) { - PartitionRegionConfig prConfig = (PartitionRegionConfig) event.getNewValue(); + public void afterUpdate(EntryEvent event) { + PartitionRegionConfig prConfig = event.getNewValue(); if (!prConfig.getElderFPAs().isEmpty()) { updatePartitionMap(prConfig); } } private void updatePartitionMap(PartitionRegionConfig prConfig) { - int prId = prConfig.getPRId(); - PartitionedRegion pr = null; + final int prId = prConfig.getPRId(); + final PartitionedRegion pr; try { pr = PartitionedRegion.getPRFromId(prId); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionQueryEvaluator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionQueryEvaluator.java index 047cd3ef0938..d2aa553b1bc5 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionQueryEvaluator.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PartitionedRegionQueryEvaluator.java @@ -25,13 +25,10 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.LinkedBlockingQueue; -import it.unimi.dsi.fastutil.objects.Object2ObjectOpenHashMap; import org.apache.logging.log4j.Logger; import org.apache.geode.CopyHelper; @@ -49,7 +46,6 @@ import org.apache.geode.cache.query.internal.DefaultQuery; import org.apache.geode.cache.query.internal.DefaultQueryService; import org.apache.geode.cache.query.internal.ExecutionContext; -import org.apache.geode.cache.query.internal.IndexTrackingQueryObserver.IndexInfo; import org.apache.geode.cache.query.internal.NWayMergeResults; import org.apache.geode.cache.query.internal.OrderByComparator; import org.apache.geode.cache.query.internal.PRQueryTraceInfo; @@ -65,7 +61,6 @@ import org.apache.geode.distributed.DistributedMember; import org.apache.geode.distributed.internal.DistributionMessage; import org.apache.geode.distributed.internal.InternalDistributedSystem; -import org.apache.geode.distributed.internal.ReplyException; import org.apache.geode.distributed.internal.ReplyProcessor21; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.Assert; @@ -84,6 +79,7 @@ * * revamped with streaming of results retry logic */ +@SuppressWarnings({"rawtypes", "unchecked"}) public class PartitionedRegionQueryEvaluator extends StreamingPartitionOperation { private static final Logger logger = LogService.getLogger(); @@ -270,9 +266,8 @@ private List sortIncomingData(List objects, List orderByA boolean nullAtStart = !orderByAttribs.get(0).getCriterion(); final SelectResults newResults; // Asif: There is a bug in the versions < 9.0, such that the struct results coming from the - // bucket nodes , do not contain approrpiate ObjectTypes. All the projection fields have - // have the types as ObjectType. The resultset being created here has the right more selective - // type. + // bucket nodes , do not contain appropriate ObjectTypes. All the projection fields have the + // types as ObjectType. The resultset being created here has the right more selective type. // so the addition of objects throw exception due to type mismatch. To handle this problem, // instead // of adding the struct objects as is, add fieldValues. @@ -312,7 +307,7 @@ public boolean executeQueryOnRemoteAndLocalNodes(final TestHook th) HashMap> n2b = new HashMap<>(node2bucketIds); n2b.remove(pr.getMyId()); - // Shobhit: IF query is originated from a Function and we found some buckets on + // IF query is originated from a Function and we found some buckets on // remote node we should throw exception mentioning data movement during function execution. // According to discussions we dont know if this is possible as buckets are not moved until // function execution is completed. @@ -321,7 +316,7 @@ public boolean executeQueryOnRemoteAndLocalNodes(final TestHook th) logger.debug("Remote buckets found for query executed in a Function."); } throw new QueryInvocationTargetException( - "Data movement detected accross PartitionRegion nodes while executing the Query with function filter."); + "Data movement detected across PartitionRegion nodes while executing the Query with function filter."); } if (isDebugEnabled) { @@ -365,7 +360,7 @@ public boolean executeQueryOnRemoteAndLocalNodes(final TestHook th) Throwable localFault = null; boolean localNeedsRetry = false; - // Shobhit: Check if query is only for local buckets else return. + // Check if query is only for local buckets else return. if (node2bucketIds.containsKey(pr.getMyId())) { if (isDebugEnabled) { logger.debug("Started query execution on local data for query:{}", @@ -405,20 +400,12 @@ public boolean executeQueryOnRemoteAndLocalNodes(final TestHook th) logger.debug("Following remote members failed {} and retry flag is set to: {}", failedMembers, requiresRetry); } - } catch (org.apache.geode.cache.TimeoutException e) { // Shobhit: Swallow remote exception if - // local exception is there. + } catch (org.apache.geode.cache.TimeoutException e) { + // Swallow remote exception if local exception is there. if (localFault == null) { throw new QueryException(e); } - } catch (ReplyException e) { - if (localFault == null) { - throw e; - } - } catch (Error e) { - if (localFault == null) { - throw e; - } - } catch (RuntimeException e) { + } catch (Error | RuntimeException e) { if (localFault == null) { throw e; } @@ -484,7 +471,7 @@ public SelectResults queryBuckets(final TestHook th) throws QueryException, Inte boolean needsRetry = true; int retry = 0; while (needsRetry && retry < MAX_PR_QUERY_RETRIES) { - // Shobhit: Now on if buckets to be queried are on remote as well as local node, + // Now on if buckets to be queried are on remote as well as local node, // request will be sent to remote node first to run query in parallel on local and // remote node. // Note: if Any Exception is thrown on local and some remote node, local exception @@ -496,7 +483,7 @@ public SelectResults queryBuckets(final TestHook th) throws QueryException, Inte } if (needsRetry) { - // Shobhit: Only one chance is allowed for Function queries. + // Only one chance is allowed for Function queries. if (query.isQueryWithFunctionContext()) { if (isDebugEnabled) { logger.debug("No of retry attempts are: {}", retry); @@ -514,7 +501,7 @@ public SelectResults queryBuckets(final TestHook th) throws QueryException, Inte } pr.getCachePerfStats().incPRQueryRetries(); retry++; - // Shobhit: Wait for sometime as rebalancing might be happening + // Wait for sometime as rebalancing might be happening waitBeforeRetry(); } if (th != null) { @@ -571,7 +558,7 @@ private Set calculateRetryBuckets() { logStr.append("Query ").append(query.getQueryString()) .append(" needs to retry bucketsIds: ["); for (Integer i : retryBuckets) { - logStr.append("," + i); + logStr.append(",").append(i); } logStr.append("]"); logger.debug(logStr); @@ -581,8 +568,6 @@ private Set calculateRetryBuckets() { } private SelectResults addResultsToResultSet() throws QueryException { - int numElementsInResult = 0; - boolean isDistinct = false; boolean isCount = false; @@ -607,7 +592,7 @@ private SelectResults addResultsToResultSet() throws QueryException { boolean isGroupByResults = cs.getType() == CompiledValue.GROUP_BY_SELECT; if (isGroupByResults) { - SelectResults baseResults = null; + final SelectResults baseResults; CompiledGroupBySelect cgs = (CompiledGroupBySelect) cs; if (cgs.getOrderByAttrs() != null && !cgs.getOrderByAttrs().isEmpty()) { baseResults = buildSortedResult(cs, limit); @@ -620,7 +605,7 @@ private SelectResults addResultsToResultSet() throws QueryException { } else { if (cumulativeResults.getCollectionType().isOrdered() && cs.getOrderByAttrs() != null) { - // If its a sorted result set, sort local and remote results using query. + // If it's a sorted result set, sort local and remote results using query. return buildSortedResult(cs, limit); } else { return buildCumulativeResults(isDistinct, limit); @@ -701,7 +686,6 @@ private SelectResults buildCumulativeResults(boolean isDistinct, int limit) { } else { for (Collection res : e.getValue()) { checkIfQueryShouldBeCancelled(); - // final TaintableArrayList res = (TaintableArrayList) e.getValue(); if (res != null) { if (isDebugEnabled) { logger.debug("Query Result from member :{}: {}", e.getKey(), res.size()); @@ -714,7 +698,7 @@ private SelectResults buildCumulativeResults(boolean isDistinct, int limit) { for (Object obj : res) { checkIfQueryShouldBeCancelled(); - int occurrence = 0; + final int occurrence; obj = PDXUtils.convertPDX(obj, isStruct, getDomainObjectForPdx, getDeserializedObject, localResults, objectChangedMarker, true); boolean elementGotAdded = @@ -863,7 +847,7 @@ private Map> buildNodeToBucketMapForBuc bucketIds = findBucketOwners(bucketIdsToConsider, ret); } if (bucketIds.size() != bucketIdsToConsider.size()) { - bucketIdsToConsider.removeAll(bucketIds); + bucketIds.forEach(bucketIdsToConsider::remove); throw new QueryException("Data loss detected, unable to find the hosting " + " node for some of the dataset. [dataset/bucket ids:" + bucketIdsToConsider + "]"); } @@ -992,9 +976,8 @@ private boolean executeQueryOnLocalNode() throws QueryException, InterruptedExce // the value. // The non tx entry already checks copy on read and returns a copy. // The rest of the pr query will be copies from their respective nodes - if (!query.isRemoteQuery() && pr.getCompressor() == null - && pr.getCache().isCopyOnRead() && (!DefaultQueryService.COPY_ON_READ_AT_ENTRY_LEVEL - || (qp.isIndexUsed() && DefaultQueryService.COPY_ON_READ_AT_ENTRY_LEVEL))) { + if (!query.isRemoteQuery() && pr.getCompressor() == null && pr.getCache().isCopyOnRead() + && (!DefaultQueryService.COPY_ON_READ_AT_ENTRY_LEVEL || qp.isIndexUsed())) { MemberResultsList tmpResultCollector = new MemberResultsList(); for (Object o : resultCollector) { Collection tmpResults; @@ -1065,49 +1048,6 @@ public Map getResultsPerMember() { return resultsPerMember; } - /** - * This class is used to accumulate information about indexes used in multipleThreads and results - * gained from buckets. In future this can be used for adding for more information to final query - * running info from pool threads. - * - * @since GemFire 6.6 - */ - public static class PRQueryResultCollector { - - private BlockingQueue resultQueue; - private final Map usedIndexInfoMap; - - public PRQueryResultCollector() { - resultQueue = new LinkedBlockingQueue(); - usedIndexInfoMap = new Object2ObjectOpenHashMap<>(); // {indexName, - // IndexInfo} Map - } - - public boolean isEmpty() { - return resultQueue.isEmpty(); - } - - public void setResultQueue(BlockingQueue resultQueue) { - this.resultQueue = resultQueue; - } - - public Map getIndexInfoMap() { - return usedIndexInfoMap; - } - - public int size() { - return resultQueue.size(); - } - - public Object get() throws InterruptedException { - return resultQueue.take(); - } - - public void put(Object obj) throws InterruptedException { - resultQueue.put(obj); - } - } - public class StreamingQueryPartitionResponse extends StreamingPartitionOperation.StreamingPartitionResponse { @@ -1125,7 +1065,7 @@ public void process(DistributionMessage msg) { msgsBeingProcessed.incrementAndGet(); try { StreamingReplyMessage m = (StreamingReplyMessage) msg; - boolean isLast = true; // is last message for this member? + final boolean isLastMessageForMember; List objects = m.getObjects(); if (m.isCanceled()) { @@ -1147,16 +1087,16 @@ public void process(DistributionMessage msg) { abort = true; // volatile store } } - isLast = isAborted || trackMessage(m); // interpret msgNum - // @todo ezoerner send an abort message to data provider if + isLastMessageForMember = isAborted || trackMessage(m); // interpret msgNum + // @todo send an abort message to data provider if // !doContinue (region was destroyed or cache closed); // also provide ability to explicitly cancel } else { // if a null chunk was received (no data), then // we're done with that member - isLast = true; + isLastMessageForMember = true; } - if (isLast) { // commented by Suranjan watch this out + if (isLastMessageForMember) { super.process(msg, false); // removes from members and cause us to // ignore future messages received from that member } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PausedTXStateProxyImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PausedTXStateProxyImpl.java index 0073a69e0aa7..12739e71559e 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PausedTXStateProxyImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PausedTXStateProxyImpl.java @@ -20,6 +20,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.geode.cache.CommitConflictException; +import org.apache.geode.cache.EntryEvent; import org.apache.geode.cache.EntryNotFoundException; import org.apache.geode.cache.Region.Entry; import org.apache.geode.cache.TransactionId; @@ -84,7 +85,7 @@ public void commit() throws CommitConflictException {} public void rollback() {} @Override - public List getEvents() { + public List> getEvents() { return null; } @@ -103,7 +104,7 @@ public void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallback boolean forceNewEntry) {} @Override - public Entry getEntry(KeyInfo keyInfo, LocalRegion region, boolean allowTombstones) { + public Entry getEntry(KeyInfo keyInfo, LocalRegion region, boolean allowTombstones) { return null; } @@ -240,7 +241,7 @@ public boolean containsValueForKey(KeyInfo keyInfo, LocalRegion localRegion) { } @Override - public Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) + public Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) throws DataLocationException { return null; } @@ -300,7 +301,7 @@ public Object getKeyForIterator(KeyInfo keyInfo, LocalRegion currRgn, boolean re } @Override - public Set getAdditionalKeysForIterator(LocalRegion currRgn) { + public Set getAdditionalKeysForIterator(LocalRegion currRgn) { return null; } @@ -326,12 +327,12 @@ public void checkSupportsRegionInvalidate() throws UnsupportedOperationInTransac public void checkSupportsRegionClear() throws UnsupportedOperationInTransactionException {} @Override - public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { + public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { return null; } @Override - public void postPutAll(DistributedPutAllOperation putallOp, VersionedObjectList successfulPuts, + public void postPutAll(DistributedPutAllOperation putAllOp, VersionedObjectList successfulPuts, InternalRegion reg) {} @Override @@ -339,7 +340,7 @@ public void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList InternalRegion reg) {} @Override - public Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion) { + public Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion) { return null; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/PeerTXStateStub.java b/geode-core/src/main/java/org/apache/geode/internal/cache/PeerTXStateStub.java index 07a06d8b8c77..f2c8b228d0f4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/PeerTXStateStub.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/PeerTXStateStub.java @@ -40,7 +40,7 @@ public class PeerTXStateStub extends TXStateStub { protected static final Logger logger = LogService.getLogger(); - private InternalDistributedMember originatingMember = null; + private InternalDistributedMember originatingMember; protected TXCommitMessage commitMessage = null; public PeerTXStateStub(TXStateProxy stateProxy, DistributedMember target, @@ -104,10 +104,8 @@ public void commit() throws CommitConflictException { try { commitMessage = message.waitForResponse(); - } catch (CommitConflictException e) { + } catch (TransactionException e) { throw e; - } catch (TransactionException te) { - throw te; } catch (ReliableReplyException e) { if (e.getCause() != null) { throw new TransactionInDoubtException(e.getCause()); @@ -136,17 +134,13 @@ public void commit() throws CommitConflictException { if (eCause instanceof ForceReattemptException) { if (eCause.getCause() instanceof PrimaryBucketException) { // data rebalanced - TransactionDataRebalancedException tdnce = - new TransactionDataRebalancedException(eCause.getCause().getMessage(), - eCause.getCause()); - throw tdnce; + throw new TransactionDataRebalancedException(eCause.getCause().getMessage(), + eCause.getCause()); } else { // We cannot be sure that the member departed starting to process commit request, // so throw a TransactionInDoubtException rather than a TransactionDataNodeHasDeparted. // fixes 44939 - TransactionInDoubtException tdnce = - new TransactionInDoubtException(e.getCause().getMessage(), eCause); - throw tdnce; + throw new TransactionInDoubtException(e.getCause().getMessage(), eCause); } } throw new TransactionInDoubtException(eCause); @@ -166,7 +160,7 @@ protected void cleanup() { @Override protected TXRegionStub generateRegionStub(InternalRegion region) { - TXRegionStub stub = null; + final TXRegionStub stub; if (region.getPartitionAttributes() != null) { // a partitioned region stub = new PartitionedTXRegionStub(this, (PartitionedRegion) region); @@ -221,7 +215,7 @@ public void afterCompletion(int status) { @Override public InternalDistributedMember getOriginatingMember() { /* - * This needs to be set to the clients member id if the client originated the tx + * This needs to be set to the client's member id if the client originated the tx */ return originatingMember; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyBucketRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyBucketRegion.java index 19d1bee9f287..e9633e03cd44 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyBucketRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ProxyBucketRegion.java @@ -227,7 +227,7 @@ public InternalCache getCache() { } @Override - public RegionAttributes getAttributes() { + public RegionAttributes getAttributes() { return partitionedRegion.getAttributes(); } @@ -317,7 +317,7 @@ public BucketRegion getCreatedBucketRegion() { /** * Returns the real BucketRegion that is currently being locally hosted. Returns null if the real - * bucket is null or if it is still being initialized. After the bucket is intialized isHosting + * bucket is null or if it is still being initialized. After the bucket is initialized isHosting * will be flagged true and future calls to this method will return the bucket. * * @return the real bucket if currently hosted or null diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEntryFactory.java b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEntryFactory.java index 36d1db9531b6..72eaabe64dcd 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEntryFactory.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/RegionEntryFactory.java @@ -34,7 +34,7 @@ public interface RegionEntryFactory { /** * @return the Class that each entry, of this factory, is an instance of */ - Class getEntryClass(); + Class getEntryClass(); /** * @return return the versioned equivalent of this RegionEntryFactory diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java index 5d992530a47e..f3954af7a7b4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXRemoteCommitMessage.java @@ -65,7 +65,8 @@ public static RemoteCommitResponse send(Cache cache, int txUniqId, InternalDistributedMember onBehalfOfClientMember, DistributedMember recipient) { final InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem(); - final Set recipients = Collections.singleton(recipient); + final Set recipients = + Collections.singleton((InternalDistributedMember) recipient); RemoteCommitResponse p = new RemoteCommitResponse(system, recipients); TXMessage msg = new TXRemoteCommitMessage(txUniqId, onBehalfOfClientMember, p); @@ -136,11 +137,6 @@ public static class TXRemoteCommitReplyMessage extends ReplyMessage { private transient TXCommitMessage commitMessage; - /* - * Used on the fromData side to transfer the value bytes to the requesting thread - */ - public transient byte[] valueInBytes; - /** * Empty constructor to conform to DataSerializable interface */ @@ -165,7 +161,7 @@ public boolean getInlineProcess() { /** * Return the value from the get operation, serialize it bytes as late as possible to avoid - * making un-neccesary byte[] copies. De-serialize those same bytes as late as possible to avoid + * making unnecessary byte[] copies. De-serialize those same bytes as late as possible to avoid * using precious threads (aka P2P readers). * * @param recipient the origin VM that performed the get @@ -191,7 +187,6 @@ public static void send(InternalDistributedMember recipient, int processorId, */ @Override public void process(final DistributionManager dm, ReplyProcessor21 processor) { - final long startTime = getTimestamp(); if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { logger.trace(LogMarker.DM_VERBOSE, "TXRemoteCommitReply process invoking reply processor with processorId:{}", @@ -248,7 +243,8 @@ public static class RemoteCommitResponse extends RemoteOperationResponse { private volatile TXCommitMessage commitMessage; private volatile long start; - public RemoteCommitResponse(InternalDistributedSystem ds, Set recipients) { + public RemoteCommitResponse(InternalDistributedSystem ds, + Set recipients) { super(ds, recipients, true); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateInterface.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateInterface.java index cfd79ef443c4..7908bcbb8aaf 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateInterface.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateInterface.java @@ -21,6 +21,7 @@ import javax.transaction.Synchronization; import org.apache.geode.cache.CommitConflictException; +import org.apache.geode.cache.EntryEvent; import org.apache.geode.cache.Region.Entry; import org.apache.geode.cache.TransactionId; import org.apache.geode.cache.UnsupportedOperationInTransactionException; @@ -89,7 +90,7 @@ public interface TXStateInterface extends Synchronization, InternalDataView { void rollback(); - List getEvents(); + List> getEvents(); @@ -107,7 +108,7 @@ void invalidateExistingEntry(final EntryEventImpl event, boolean invokeCallbacks * otherwise returns null */ @Override - Entry getEntry(final KeyInfo keyInfo, final LocalRegion region, boolean allowTombstones); + Entry getEntry(final KeyInfo keyInfo, final LocalRegion region, boolean allowTombstones); TXEvent getEvent(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateProxyImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateProxyImpl.java index 9ca1a4e3d8c4..b816fc12d1ab 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateProxyImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateProxyImpl.java @@ -28,6 +28,7 @@ import org.apache.geode.GemFireException; import org.apache.geode.annotations.internal.MakeNotStatic; import org.apache.geode.cache.CommitConflictException; +import org.apache.geode.cache.EntryEvent; import org.apache.geode.cache.EntryNotFoundException; import org.apache.geode.cache.Region.Entry; import org.apache.geode.cache.TransactionDataNotColocatedException; @@ -53,7 +54,7 @@ public class TXStateProxyImpl implements TXStateProxy { protected static final AtomicBoolean txDistributedClientWarningIssued = new AtomicBoolean(); private boolean isJTA; - private TXId txId; + private final TXId txId; protected final TXManagerImpl txMgr; protected DistributedMember target; private boolean commitRequestedByOwner; @@ -193,10 +194,6 @@ public TXStateInterface getRealDeal(DistributedMember t) { return realDeal; } - protected void setTXIDForReplay(TXId id) { - txId = id; - } - @Override public boolean isOnBehalfOfClient() { return onBehalfOfClientMember != null; @@ -357,10 +354,10 @@ public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boo } @Override - public Entry getEntry(KeyInfo keyInfo, LocalRegion region, boolean allowTombstones) { + public Entry getEntry(KeyInfo keyInfo, LocalRegion region, boolean allowTombstones) { try { operationCount++; - Entry retVal = getRealDeal(keyInfo, region).getEntry(keyInfo, region, allowTombstones); + Entry retVal = getRealDeal(keyInfo, region).getEntry(keyInfo, region, allowTombstones); trackBucketForTx(keyInfo); return retVal; } catch (TransactionDataRebalancedException transactionDataRebalancedException) { @@ -377,7 +374,7 @@ public TXEvent getEvent() { } @Override - public List getEvents() { + public List> getEvents() { assertBootstrapped(); return getRealDeal(null, null).getEvents(); } @@ -585,7 +582,7 @@ public Object findObject(KeyInfo key, LocalRegion r, boolean isCreate, boolean g } @Override - public Set getAdditionalKeysForIterator(LocalRegion currRgn) { + public Set getAdditionalKeysForIterator(LocalRegion currRgn) { if (realDeal == null) { return null; } @@ -795,7 +792,7 @@ public void checkSupportsRegionClear() throws UnsupportedOperationInTransactionE } @Override - public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { + public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { boolean resetTxState = isTransactionInternalSuspendNeeded(localRegion); TXStateProxy txp = null; if (resetTxState) { @@ -814,7 +811,8 @@ public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTom } @Override - public Entry getEntryOnRemote(KeyInfo keyInfo, LocalRegion localRegion, boolean allowTombstones) + public Entry getEntryOnRemote(KeyInfo keyInfo, LocalRegion localRegion, + boolean allowTombstones) throws DataLocationException { operationCount++; TXStateInterface tx = getRealDeal(keyInfo, localRegion); @@ -982,10 +980,10 @@ public void setJCATransaction() { } @Override - public Entry accessEntry(KeyInfo keyInfo, LocalRegion region) { + public Entry accessEntry(KeyInfo keyInfo, LocalRegion region) { try { operationCount++; - Entry retVal = getRealDeal(keyInfo, region).accessEntry(keyInfo, region); + Entry retVal = getRealDeal(keyInfo, region).accessEntry(keyInfo, region); trackBucketForTx(keyInfo); return retVal; } catch (TransactionDataRebalancedException transactionDataRebalancedException) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateStub.java b/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateStub.java index 2b1da9643110..92dbb760894f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateStub.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/TXStateStub.java @@ -22,6 +22,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.geode.cache.CommitConflictException; +import org.apache.geode.cache.EntryEvent; import org.apache.geode.cache.EntryNotFoundException; import org.apache.geode.cache.Region; import org.apache.geode.cache.Region.Entry; @@ -45,16 +46,14 @@ public abstract class TXStateStub implements TXStateInterface { protected final DistributedMember target; protected final TXStateProxy proxy; - protected Runnable internalAfterSendRollback; - protected Runnable internalAfterSendCommit; + protected Runnable internalAfterSendRollback = null; + protected Runnable internalAfterSendCommit = null; Map, TXRegionStub> regionStubs = new HashMap<>(); - protected TXStateStub(TXStateProxy stateProxy, DistributedMember target) { + protected TXStateStub(TXStateProxy proxy, DistributedMember target) { this.target = target; - proxy = stateProxy; - internalAfterSendRollback = null; - internalAfterSendCommit = null; + this.proxy = proxy; } @Override @@ -128,13 +127,6 @@ public String toString() { } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.TXStateInterface#destroyExistingEntry(org.apache.geode.internal - * .cache.EntryEventImpl, boolean, java.lang.Object) - */ @Override public void destroyExistingEntry(EntryEventImpl event, boolean cacheWrite, Object expectedOldValue) throws EntryNotFoundException { @@ -146,44 +138,21 @@ public void destroyExistingEntry(EntryEventImpl event, boolean cacheWrite, rs.destroyExistingEntry(event, cacheWrite, expectedOldValue); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getBeginTime() - */ @Override public long getBeginTime() { - // TODO Auto-generated method stub return 0; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getCache() - */ @Override public InternalCache getCache() { return proxy.getCache(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getChanges() - */ @Override public int getChanges() { - // TODO Auto-generated method stub return 0; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getDeserializedValue(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion, boolean) - */ @Override public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boolean updateStats, boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, @@ -192,76 +161,31 @@ public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boo return null; } - public Object getDeserializedValue(KeyInfo keyInfo, LocalRegion localRegion, boolean updateStats, - boolean disableCopyOnRead, boolean preferCD, EntryEventImpl clientEvent, - boolean returnTombstones) { - // We never have a local value if we are a stub... - return null; - } - - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getEntry(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override - public Entry getEntry(KeyInfo keyInfo, LocalRegion r, boolean allowTombstones) { + public Entry getEntry(KeyInfo keyInfo, LocalRegion r, boolean allowTombstones) { return getTXRegionStub(r).getEntry(keyInfo, allowTombstones); - // Entry retVal = null; - // if (r.getPartitionAttributes() != null) { - // PartitionedRegion pr = (PartitionedRegion)r; - // try { - // retVal = pr.getEntryRemotely((InternalDistributedMember)target, - // keyInfo.getBucketId(), keyInfo.getKey(), allowTombstones); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getEvent() - */ @Override public TXEvent getEvent() { throw new UnsupportedOperationException(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getEvents() - */ @Override - public List getEvents() { + public List> getEvents() { throw new UnsupportedOperationException(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getRegions() - */ @Override public Collection getRegions() { throw new UnsupportedOperationException(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getTransactionId() - */ @Override public TransactionId getTransactionId() { return proxy.getTxId(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#invalidateExistingEntry(org.apache.geode. - * internal.cache.EntryEventImpl, boolean, boolean) - */ @Override public void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry) { @@ -274,191 +198,86 @@ public void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallback } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#isInProgress() - */ @Override public boolean isInProgress() { return proxy.isInProgress(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#isInProgressAndSameAs(org.apache.geode. - * internal.cache.TXStateInterface) - */ @Override public boolean isInProgressAndSameAs(TXStateInterface state) { throw new UnsupportedOperationException(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#needsLargeModCount() - */ @Override public boolean needsLargeModCount() { - // TODO Auto-generated method stub return false; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#nextModSerialNum() - */ @Override public int nextModSerialNum() { - // TODO Auto-generated method stub return 0; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.TXStateInterface#readRegion(org.apache.geode.internal.cache. - * LocalRegion) - */ @Override public TXRegionState readRegion(InternalRegion r) { throw new UnsupportedOperationException(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#rmRegion(org.apache.geode.internal.cache. - * LocalRegion) - */ @Override public void rmRegion(LocalRegion r) { throw new UnsupportedOperationException(); } - - public void setAfterSendRollback(Runnable afterSend) { - // TODO Auto-generated method stub internalAfterSendRollback = afterSend; } public void setAfterSendCommit(Runnable afterSend) { - // TODO Auto-generated method stub internalAfterSendCommit = afterSend; } - - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.TXStateInterface#txPutEntry(org.apache.geode.internal.cache. - * EntryEventImpl, boolean, boolean, boolean) - */ @Override public boolean txPutEntry(EntryEventImpl event, boolean ifNew, boolean requireOldValue, boolean checkResources, Object expectedOldValue) { return false; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#txReadEntry(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion, boolean) - */ @Override public TXEntryState txReadEntry(KeyInfo entryKey, LocalRegion localRegion, boolean rememberRead, boolean createTxEntryIfAbsent) { - // TODO Auto-generated method stub return null; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.TXStateInterface#txReadRegion(org.apache.geode.internal.cache. - * LocalRegion) - */ @Override public TXRegionState txReadRegion(InternalRegion internalRegion) { - // TODO Auto-generated method stub return null; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.TXStateInterface#txWriteRegion(org.apache.geode.internal.cache. - * LocalRegion, java.lang.Object) - */ @Override public TXRegionState txWriteRegion(InternalRegion internalRegion, KeyInfo entryKey) { - // TODO Auto-generated method stub return null; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.TXStateInterface#writeRegion(org.apache.geode.internal.cache. - * LocalRegion) - */ @Override public TXRegionState writeRegion(InternalRegion r) { - // TODO Auto-generated method stub return null; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#containsKey(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override public boolean containsKey(KeyInfo keyInfo, LocalRegion localRegion) { return getTXRegionStub(localRegion).containsKey(keyInfo); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#containsValueForKey(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override public boolean containsValueForKey(KeyInfo keyInfo, LocalRegion localRegion) { return getTXRegionStub(localRegion).containsValueForKey(keyInfo); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#entryCount(org.apache.geode.internal.cache. - * LocalRegion) - */ @Override public int entryCount(LocalRegion localRegion) { return getTXRegionStub(localRegion).entryCount(); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#findObject(org.apache.geode.internal.cache. - * LocalRegion, java.lang.Object, java.lang.Object, boolean, boolean, java.lang.Object) - */ @Override public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate, boolean generateCallbacks, Object value, boolean disableCopyOnRead, boolean preferCD, @@ -468,38 +287,17 @@ public Object findObject(KeyInfo keyInfo, LocalRegion r, boolean isCreate, requestingClient, clientEvent); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getAdditionalKeysForIterator(org.apache.geode. - * internal.cache.LocalRegion) - */ @Override - public Set getAdditionalKeysForIterator(LocalRegion currRgn) { - // TODO Auto-generated method stub + public Set getAdditionalKeysForIterator(LocalRegion currRgn) { return null; } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getEntryForIterator(org.apache.geode.internal. - * cache.LocalRegion, java.lang.Object, boolean) - */ @Override public Object getEntryForIterator(KeyInfo keyInfo, LocalRegion currRgn, boolean rememberReads, boolean allowTombstones) { return getTXRegionStub(currRgn).getEntryForIterator(keyInfo, allowTombstones); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getKeyForIterator(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion, boolean) - */ @Override public Object getKeyForIterator(KeyInfo keyInfo, LocalRegion currRgn, boolean rememberReads, boolean allowTombstones) { @@ -510,23 +308,11 @@ public Object getKeyForIterator(KeyInfo keyInfo, LocalRegion currRgn, boolean re return key; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getValueInVM(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion, boolean) - */ @Override public Object getValueInVM(KeyInfo keyInfo, LocalRegion localRegion, boolean rememberRead) { - // TODO Auto-generated method stub return null; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#isDeferredStats() - */ @Override public boolean isDeferredStats() { return true; @@ -541,12 +327,6 @@ public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, false); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#putEntry(org.apache.geode.internal.cache. - * EntryEventImpl, boolean, boolean, java.lang.Object, boolean, long, boolean) - */ @Override public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, @@ -555,13 +335,6 @@ public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, requireOldValue, lastModified, overwriteDestroyed); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getSerializedValue(org.apache.geode.internal. - * cache.LocalRegion, java.lang.Object, java.lang.Object) - */ @Override public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean doNotLockEntry, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent, @@ -569,13 +342,6 @@ public Object getSerializedValue(LocalRegion localRegion, KeyInfo key, boolean d throw new UnsupportedOperationException(); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#putEntryOnRemote(org.apache.geode.internal. - * cache.EntryEventImpl, boolean, boolean, java.lang.Object, boolean, long, boolean) - */ @Override public boolean putEntryOnRemote(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, @@ -588,25 +354,12 @@ public boolean isFireCallbacks() { return false; } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#destroyOnRemote(java.lang.Integer, - * org.apache.geode.internal.cache.EntryEventImpl, java.lang.Object) - */ @Override public void destroyOnRemote(EntryEventImpl event, boolean cacheWrite, Object expectedOldValue) throws DataLocationException { throw new IllegalStateException(); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#invalidateOnRemote(org.apache.geode.internal. - * cache.EntryEventImpl, boolean, boolean) - */ @Override public void invalidateOnRemote(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry) throws DataLocationException { @@ -631,15 +384,8 @@ public void checkSupportsRegionClear() throws UnsupportedOperationInTransactionE "clear() is not supported while in a transaction"); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getBucketKeys(org.apache.geode.internal.cache. - * LocalRegion, int) - */ @Override - public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { + public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTombstones) { PartitionedRegion pr = (PartitionedRegion) localRegion; /* * txtodo: what does this mean for c/s @@ -647,41 +393,22 @@ public Set getBucketKeys(LocalRegion localRegion, int bucketId, boolean allowTom return pr.getBucketKeys(bucketId, allowTombstones); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.InternalDataView#getEntryOnRemote(java.lang.Object, - * org.apache.geode.internal.cache.LocalRegion) - */ @Override - public Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) + public Entry getEntryOnRemote(KeyInfo key, LocalRegion localRegion, boolean allowTombstones) throws DataLocationException { throw new IllegalStateException(); } - /* - * (non-Javadoc) - * - * @see org.apache.geode.internal.cache.TXStateInterface#getSemaphore() - */ @Override public ReentrantLock getLock() { return proxy.getLock(); } - /* - * (non-Javadoc) - * - * @see - * org.apache.geode.internal.cache.InternalDataView#getRegionKeysForIteration(org.apache.geode. - * internal.cache.LocalRegion) - */ @Override - public Set getRegionKeysForIteration(LocalRegion currRegion) { + public Set getRegionKeysForIteration(LocalRegion currRegion) { return getTXRegionStub(currRegion).getRegionKeysForIteration(); } - @Override public boolean isRealDealLocal() { return false; @@ -703,9 +430,8 @@ public void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList getTXRegionStub(reg).postRemoveAll(op, successfulOps, reg); } - @Override - public Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion) { + public Entry accessEntry(KeyInfo keyInfo, LocalRegion localRegion) { return getEntry(keyInfo, localRegion, false); } @@ -715,9 +441,7 @@ public void updateEntryVersion(EntryEventImpl event) throws EntryNotFoundExcepti } @Override - public void close() { - // nothing needed - } + public void close() {} @Override public boolean isTxState() { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/control/InternalResourceManager.java b/geode-core/src/main/java/org/apache/geode/internal/cache/control/InternalResourceManager.java index cad0d16252c7..0cf9af8679e5 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/control/InternalResourceManager.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/control/InternalResourceManager.java @@ -542,57 +542,36 @@ public void setCriticalOffHeapPercentage(float offHeapPercentage) { getOffHeapMonitor().setCriticalThreshold(offHeapPercentage); } - /** - * {@inheritDoc} - */ @Override public float getCriticalOffHeapPercentage() { return getOffHeapMonitor().getCriticalThreshold(); } - /** - * {@inheritDoc} - */ @Override public void setEvictionOffHeapPercentage(float offHeapPercentage) { getOffHeapMonitor().setEvictionThreshold(offHeapPercentage); } - /** - * {@inheritDoc} - */ @Override public float getEvictionOffHeapPercentage() { return getOffHeapMonitor().getEvictionThreshold(); } - /** - * {@inheritDoc} - */ @Override public void setCriticalHeapPercentage(float heapPercentage) { getHeapMonitor().setCriticalThreshold(heapPercentage); } - /** - * {@inheritDoc} - */ @Override public float getCriticalHeapPercentage() { return getHeapMonitor().getCriticalThreshold(); } - /** - * {@inheritDoc} - */ @Override public void setEvictionHeapPercentage(float heapPercentage) { getHeapMonitor().setEvictionThreshold(heapPercentage); } - /** - * {@inheritDoc} - */ @Override public float getEvictionHeapPercentage() { return getHeapMonitor().getEvictionThreshold(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/control/SerializableRegionRedundancyStatusImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/control/SerializableRegionRedundancyStatusImpl.java index 84adf872733e..163266e9337e 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/control/SerializableRegionRedundancyStatusImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/control/SerializableRegionRedundancyStatusImpl.java @@ -28,7 +28,7 @@ /** * result object produced by the servers. These need to be transferred to the locators - * via functions so they need to be DataSerializable + * via functions, so they need to be DataSerializable */ public class SerializableRegionRedundancyStatusImpl extends RegionRedundancyStatusImpl diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryHeap.java index 4704131bebfb..e8fa4abd8a2b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsDiskLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryOffHeap.java index 94296ba6eeec..6409cee1a010 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskLRURegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsDiskLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryHeap.java index 4cc30a6e28df..738cf8d7b7b4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsDiskRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryOffHeap.java index 1b830e98453a..80aa99849ecc 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsDiskRegionEntryOffHeap.java @@ -65,7 +65,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsDiskRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryHeap.java index 4c0ccbcdfd40..0519643f637b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryOffHeap.java index 7ed8cef2d1e5..823433ebc7fa 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsLRURegionEntryOffHeap.java @@ -65,7 +65,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryHeap.java index a809b5f1f82b..f5dc404604f6 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryHeap.java @@ -63,7 +63,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryOffHeap.java index 9455853bf065..7c88420daee1 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMStatsRegionEntryOffHeap.java @@ -65,7 +65,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMStatsRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryHeap.java index ca213b6a152b..d22d7dddef84 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinDiskLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryOffHeap.java index 3d1765d63b40..e093e9a5a474 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskLRURegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinDiskLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryHeap.java index 6fe0dfea5253..1a918381d8db 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinDiskRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryOffHeap.java index f1103b2bcc8d..60c7493cd438 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinDiskRegionEntryOffHeap.java @@ -65,7 +65,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinDiskRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryHeap.java index dfc3538e58cd..30064ec82bc4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryOffHeap.java index c09b05c0e133..5fd57f197e0e 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinLRURegionEntryOffHeap.java @@ -65,7 +65,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryHeap.java index b69950ce657e..31311396d45b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryHeap.java @@ -63,7 +63,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryOffHeap.java index 795e333d2fb3..ad874ae75069 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VMThinRegionEntryOffHeap.java @@ -65,7 +65,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VMThinRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryHeap.java index 49be33540638..fa69313ad84c 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsDiskLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryOffHeap.java index c1f00c599874..d98ec7cdff75 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskLRURegionEntryOffHeap.java @@ -68,7 +68,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsDiskLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryHeap.java index f8f30861cf0a..f84dfac3ff5f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryHeap.java @@ -66,7 +66,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsDiskRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryOffHeap.java index 5479bbf5daa6..7f7d47c715b5 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsDiskRegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsDiskRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryHeap.java index 54558294f490..9737c00fc064 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryHeap.java @@ -66,7 +66,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryOffHeap.java index 6e85519dd9c8..5f1bc2949a22 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsLRURegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryHeap.java index 486d1fa5a7eb..7f3875ca9901 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryOffHeap.java index 504c62c9e193..d86745699987 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedStatsRegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedStatsRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryHeap.java index d1c17b025c06..d4d62530dbfd 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryHeap.java @@ -66,7 +66,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinDiskLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryOffHeap.java index ade85ba817da..46f09a90151f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskLRURegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinDiskLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryHeap.java index fb593633ccdc..4c255a6b75d1 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryHeap.java @@ -66,7 +66,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinDiskRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryOffHeap.java index 475814207efe..1d57e953fd68 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinDiskRegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinDiskRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryHeap.java index 5f824a22fbf9..3fbb4260f307 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryHeap.java @@ -66,7 +66,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinLRURegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryOffHeap.java index 6040e68974a5..6c1ef69220f0 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinLRURegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinLRURegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryHeap.java index bbc6cc4b5ee0..7a619272c434 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryHeap.java @@ -64,7 +64,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinRegionEntryHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryOffHeap.java b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryOffHeap.java index dcfc38e77525..d296ae3309f8 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryOffHeap.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/entries/VersionedThinRegionEntryOffHeap.java @@ -67,7 +67,7 @@ public RegionEntry createEntry(RegionEntryContext context, Object key, Object va } @Override - public Class getEntryClass() { + public Class getEntryClass() { // The class returned from this method is used to estimate the memory size. // This estimate will not take into account the memory saved by inlining the keys. return VersionedThinRegionEntryOffHeapObjectKey.class; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/AbstractExecution.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/AbstractExecution.java index 73c02c405545..95c885799932 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/AbstractExecution.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/AbstractExecution.java @@ -15,6 +15,8 @@ package org.apache.geode.internal.cache.execute; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Collection; import java.util.HashSet; import java.util.Set; @@ -53,7 +55,7 @@ * @since GemFire 5.8LA * */ -public abstract class AbstractExecution implements InternalExecution { +public abstract class AbstractExecution implements InternalExecution { private static final Logger logger = LogService.getLogger(); public static final int DEFAULT_CLIENT_FUNCTION_TIMEOUT = 0; @@ -66,11 +68,11 @@ public abstract class AbstractExecution implements InternalExecution { protected MemberMappedArgument memberMappedArg; - protected Object args; + protected IN args; - protected ResultCollector rc; + protected ResultCollector rc; - protected Set filter = new HashSet(); + protected Set filter = new HashSet<>(); protected volatile boolean isReExecute = false; @@ -78,23 +80,14 @@ public abstract class AbstractExecution implements InternalExecution { Set failedNodes = new HashSet<>(); - protected boolean isFnSerializationReqd; + protected boolean isFunctionSerializationRequired; /*** - * yjing The following code is added to get a set of function executing nodes by the data aware + * The following code is added to get a set of function executing nodes by the data aware * procedure */ private Collection executionNodes = null; - public interface ExecutionNodesListener { - - void afterExecutionNodesSet(AbstractExecution execution); - - void reset(); - } - - private final ExecutionNodesListener executionNodesListener = null; - boolean waitOnException = false; boolean forwardExceptions = false; @@ -166,7 +159,7 @@ protected AbstractExecution() { timeoutMsSystemProperty >= 0 ? timeoutMsSystemProperty : DEFAULT_CLIENT_FUNCTION_TIMEOUT; } - protected AbstractExecution(AbstractExecution ae) { + protected AbstractExecution(AbstractExecution ae) { if (ae.args != null) { args = ae.args; } @@ -181,20 +174,21 @@ protected AbstractExecution(AbstractExecution ae) { if (ae.proxyCache != null) { proxyCache = ae.proxyCache; } - isFnSerializationReqd = ae.isFnSerializationReqd; + isFunctionSerializationRequired = ae.isFunctionSerializationRequired; timeoutMs = ae.timeoutMs; } - protected AbstractExecution(AbstractExecution ae, boolean isReExecute) { + protected AbstractExecution(AbstractExecution ae, boolean isReExecute) { this(ae); this.isReExecute = isReExecute; } - public Object getArgumentsForMember(String memberId) { + @SuppressWarnings("unchecked") + public T getArgumentsForMember(String memberId) { if (!isMemberMappedArgument) { - return args; + return (T) args; } else { - return memberMappedArg.getArgumentsForMember(memberId); + return (T) memberMappedArg.getArgumentsForMember(memberId); } } @@ -206,19 +200,18 @@ public Object getArguments() { return args; } - public ResultCollector getResultCollector() { - return rc; + @SuppressWarnings("unchecked") + public ResultCollector getResultCollector() { + return (ResultCollector) rc; } - public Set getFilter() { - return filter; + @SuppressWarnings("unchecked") + public Set getFilter() { + return (Set) filter; } - public AbstractExecution setIsReExecute() { + public AbstractExecution setIsReExecute() { isReExecute = true; - if (executionNodesListener != null) { - executionNodesListener.reset(); - } return this; } @@ -242,20 +235,17 @@ public boolean isClientServerMode() { return isClientServerMode; } - public boolean isFnSerializationReqd() { - return isFnSerializationReqd; + public boolean isFunctionSerializationRequired() { + return isFunctionSerializationRequired; } public void setExecutionNodes(Set nodes) { if (executionNodes != null) { executionNodes = nodes; - if (executionNodesListener != null) { - executionNodesListener.afterExecutionNodesSet(this); - } } } - public void executeFunctionOnLocalPRNode(final Function fn, final FunctionContext cx, + public void executeFunctionOnLocalPRNode(final Function fn, final FunctionContext cx, final PartitionedRegionFunctionResultSender sender, DistributionManager dm, boolean isTx) { if (dm instanceof ClusterDistributionManager && !isTx) { if (ServerConnection.isExecuteFunctionOnLocalNodeOnly() == 1) { @@ -288,11 +278,11 @@ public void executeFunctionOnLocalPRNode(final Function fn, final FunctionContex } } - // Bug41118 : in case of lonerDistribuedSystem do local execution through + // in case of lonerDistributedSystem do local execution through // main thread otherwise give execution to FunctionExecutor from // DistributionManager - public void executeFunctionOnLocalNode(final Function fn, final FunctionContext cx, - final ResultSender sender, DistributionManager dm, final boolean isTx) { + public void executeFunctionOnLocalNode(final Function fn, final FunctionContext cx, + final ResultSender sender, DistributionManager dm, final boolean isTx) { if (dm instanceof ClusterDistributionManager && !isTx) { final ClusterDistributionManager newDM = (ClusterDistributionManager) dm; newDM.getExecutors().getFunctionExecutor().execute(() -> { @@ -313,8 +303,8 @@ public void executeFunctionOnLocalNode(final Function fn, final FunctionConte } } - private void executeFunctionLocally(final Function fn, final FunctionContext cx, - final ResultSender sender, DistributionManager dm) { + private void executeFunctionLocally(final Function fn, final FunctionContext cx, + final ResultSender sender, DistributionManager dm) { FunctionStats stats = FunctionStatsManager.getFunctionStats(fn.getId(), dm.getSystem()); @@ -327,13 +317,13 @@ private void executeFunctionLocally(final Function fn, final FunctionContext fn.execute(cx); stats.endFunctionExecution(start, fn.hasResult()); - } catch (FunctionInvocationTargetException fite) { + } catch (FunctionInvocationTargetException e) { FunctionException functionException; if (fn.isHA()) { functionException = - new FunctionException(new InternalFunctionInvocationTargetException(fite.getMessage())); + new FunctionException(new InternalFunctionInvocationTargetException(e.getMessage())); } else { - functionException = new FunctionException(fite); + functionException = new FunctionException(e); } handleException(functionException, fn, sender, dm, start); } catch (BucketMovedException bme) { @@ -355,18 +345,18 @@ private void executeFunctionLocally(final Function fn, final FunctionContext } @Override - public ResultCollector execute(final String functionName) { + public ResultCollector execute(final String functionName) { return execute(functionName, getTimeoutMs(), TimeUnit.MILLISECONDS); } @Override - public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { + public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { if (functionName == null) { throw new FunctionException( "The input function for the execute function request is null"); } - isFnSerializationReqd = false; - Function functionObject = FunctionService.getFunction(functionName); + isFunctionSerializationRequired = false; + Function functionObject = uncheckedCast(FunctionService.getFunction(functionName)); if (functionObject == null) { throw new FunctionException( String.format("Function named %s is not registered to FunctionService", @@ -377,7 +367,8 @@ public ResultCollector execute(final String functionName, long timeout, TimeUnit } @Override - public ResultCollector execute(Function function, long timeout, TimeUnit unit) + public ResultCollector execute(@SuppressWarnings("rawtypes") Function function, + long timeout, TimeUnit unit) throws FunctionException { if (function == null) { throw new FunctionException( @@ -393,12 +384,13 @@ public ResultCollector execute(Function function, long timeout, TimeUnit unit) throw new IllegalArgumentException( "The Function#getID() returned null"); } - isFnSerializationReqd = true; - return executeFunction(function, timeout, unit); + isFunctionSerializationRequired = true; + return executeFunction(uncheckedCast(function), timeout, unit); } @Override - public ResultCollector execute(Function function) throws FunctionException { + public ResultCollector execute(@SuppressWarnings("rawtypes") Function function) + throws FunctionException { return execute(function, getTimeoutMs(), TimeUnit.MILLISECONDS); } @@ -433,7 +425,8 @@ public boolean isIgnoreDepartedMembers() { return ignoreDepartedMembers; } - protected abstract ResultCollector executeFunction(Function fn, long timeout, TimeUnit unit); + protected abstract ResultCollector executeFunction(Function fn, long timeout, + TimeUnit unit); /** * validates whether a function should execute in presence of transaction and HeapCritical @@ -441,18 +434,18 @@ public boolean isIgnoreDepartedMembers() { * * @param function the function * @param targetMembers the set of members the function will be executed on - * @throws TransactionException if more than one nodes are targeted within a transaction + * @throws TransactionException if more than one node is targeted within a transaction * @throws LowMemoryException if the set contains a heap critical member */ - public abstract void validateExecution(Function function, + public abstract void validateExecution(Function function, Set targetMembers); - public LocalResultCollector getLocalResultCollector(Function function, - final ResultCollector rc) { + public LocalResultCollector getLocalResultCollector(final Function function, + final ResultCollector rc) { if (rc instanceof LocalResultCollector) { - return (LocalResultCollector) rc; + return (LocalResultCollector) rc; } else { - return new LocalResultCollectorImpl(function, rc, this); + return uncheckedCast(new LocalResultCollectorImpl(function, rc, this)); } } @@ -480,8 +473,8 @@ void addFunctionAttributes(String functionId, byte[] functionAttributes) { idToFunctionAttributes.put(functionId, functionAttributes); } - private void handleException(Throwable functionException, final Function fn, - final ResultSender sender, DistributionManager dm, long startTime) { + private void handleException(Throwable functionException, final Function fn, + final ResultSender sender, DistributionManager dm, long startTime) { FunctionStats stats = FunctionStatsManager.getFunctionStats(fn.getId(), dm.getSystem()); if (logger.isDebugEnabled()) { @@ -499,7 +492,7 @@ private void handleException(Throwable functionException, final Function fn, // create a new FunctionException on the original one's message (not cause). functionException = new FunctionException(functionException.getLocalizedMessage()); } - sender.lastResult(functionException); + sender.lastResult(uncheckedCast(functionException)); } else { ((InternalResultSender) sender).setException(functionException); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DefaultResultCollector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DefaultResultCollector.java index aa74ccbb7254..9988b00086d3 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DefaultResultCollector.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DefaultResultCollector.java @@ -15,6 +15,7 @@ package org.apache.geode.internal.cache.execute; import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.geode.cache.execute.Function; @@ -32,9 +33,9 @@ * @since GemFire 6.0 * */ -public class DefaultResultCollector implements ResultCollector { +public class DefaultResultCollector implements ResultCollector> { - private final ArrayList resultList = new ArrayList<>(); + private final List resultList = new ArrayList<>(); public DefaultResultCollector() {} @@ -44,7 +45,7 @@ public DefaultResultCollector() {} */ @Override public synchronized void addResult(DistributedMember distributedMember, - Object resultOfSingleExecution) { + T resultOfSingleExecution) { resultList.add(resultOfSingleExecution); } @@ -57,7 +58,7 @@ public synchronized void addResult(DistributedMember distributedMember, * @throws FunctionException if something goes wrong while retrieving the result */ @Override - public Object getResult() throws FunctionException { + public List getResult() throws FunctionException { return resultList; // this is full result } @@ -80,7 +81,7 @@ public void endResults() {} * @throws FunctionException if something goes wrong while retrieving the result */ @Override - public Object getResult(long timeout, TimeUnit unit) throws FunctionException { + public List getResult(long timeout, TimeUnit unit) throws FunctionException { return resultList; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutor.java index 717697e2819a..0ffabfe16a38 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionExecutor.java @@ -14,6 +14,9 @@ */ package org.apache.geode.internal.cache.execute; + +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Set; import java.util.concurrent.TimeUnit; @@ -46,13 +49,14 @@ * @since GemFire 5.8 LA * */ -public class DistributedRegionFunctionExecutor extends AbstractExecution { +public class DistributedRegionFunctionExecutor + extends AbstractExecution { private final LocalRegion region; private ServerToClientFunctionResultSender sender; - public DistributedRegionFunctionExecutor(Region r) { + public DistributedRegionFunctionExecutor(Region r) { if (r == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", @@ -61,17 +65,7 @@ public DistributedRegionFunctionExecutor(Region r) { region = (LocalRegion) r; } - private DistributedRegionFunctionExecutor(DistributedRegionFunctionExecutor drfe) { - super(drfe); - region = drfe.region; - if (drfe.filter != null) { - filter.clear(); - filter.addAll(drfe.filter); - } - sender = drfe.sender; - } - - public DistributedRegionFunctionExecutor(DistributedRegion region, Set filter2, Object args, + public DistributedRegionFunctionExecutor(DistributedRegion region, Set filter, IN args, MemberMappedArgument memberMappedArg, ServerToClientFunctionResultSender resultSender) { if (args != null) { this.args = args; @@ -80,16 +74,16 @@ public DistributedRegionFunctionExecutor(DistributedRegion region, Set filter2, isMemberMappedArgument = true; } sender = resultSender; - if (filter2 != null) { - filter.clear(); - filter.addAll(filter2); + if (filter != null) { + this.filter.clear(); + this.filter.addAll(filter); } this.region = region; isClientServerMode = true; } private DistributedRegionFunctionExecutor( - DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, + DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, MemberMappedArgument argument) { super(distributedRegionFunctionExecutor); @@ -104,7 +98,8 @@ private DistributedRegionFunctionExecutor( } private DistributedRegionFunctionExecutor( - DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, ResultCollector rs) { + DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, + ResultCollector rs) { super(distributedRegionFunctionExecutor); region = distributedRegionFunctionExecutor.getRegion(); @@ -116,7 +111,7 @@ private DistributedRegionFunctionExecutor( } private DistributedRegionFunctionExecutor( - DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, Object args) { + DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, IN args) { super(distributedRegionFunctionExecutor); region = distributedRegionFunctionExecutor.getRegion(); @@ -128,7 +123,8 @@ private DistributedRegionFunctionExecutor( } private DistributedRegionFunctionExecutor( - DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, Set filter2) { + DistributedRegionFunctionExecutor distributedRegionFunctionExecutor, + Set filter2) { super(distributedRegionFunctionExecutor); region = distributedRegionFunctionExecutor.getRegion(); @@ -138,26 +134,26 @@ private DistributedRegionFunctionExecutor( filter.addAll(filter2); } - private DistributedRegionFunctionExecutor(DistributedRegionFunctionExecutor drfe, + private DistributedRegionFunctionExecutor(DistributedRegionFunctionExecutor other, boolean isReExecute) { - super(drfe); - region = drfe.region; - if (drfe.filter != null) { + super(other); + region = other.region; + if (other.filter != null) { filter.clear(); - filter.addAll(drfe.filter); + filter.addAll(other.filter); } - sender = drfe.sender; + sender = other.sender; this.isReExecute = isReExecute; } @Override - public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { + public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { if (functionName == null) { throw new FunctionException( "The input function for the execute function request is null"); } - isFnSerializationReqd = false; - Function functionObject = FunctionService.getFunction(functionName); + isFunctionSerializationRequired = false; + Function functionObject = uncheckedCast(FunctionService.getFunction(functionName)); if (functionObject == null) { throw new FunctionException( String.format("Function named %s is not registered to FunctionService", @@ -171,12 +167,13 @@ public ResultCollector execute(final String functionName, long timeout, TimeUnit } @Override - public ResultCollector execute(final String functionName) { + public ResultCollector execute(final String functionName) { return execute(functionName, getTimeoutMs(), TimeUnit.MILLISECONDS); } @Override - public ResultCollector execute(final Function function, long timeout, TimeUnit unit) { + public ResultCollector execute(@SuppressWarnings("rawtypes") final Function function, + long timeout, TimeUnit unit) { if (function == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", @@ -195,24 +192,26 @@ public ResultCollector execute(final Function function, long timeout, TimeUnit u throw new FunctionException( "The Function#getID() returned null"); } - isFnSerializationReqd = true; - return executeFunction(function, timeout, unit); + isFunctionSerializationRequired = true; + return executeFunction(uncheckedCast(function), timeout, unit); } @Override - public ResultCollector execute(final Function function) { + public ResultCollector execute(@SuppressWarnings("rawtypes") final Function function) { return execute(function, getTimeoutMs(), TimeUnit.MILLISECONDS); } @Override - protected ResultCollector executeFunction(Function function, long timeout, TimeUnit unit) { + protected ResultCollector executeFunction(Function function, long timeout, + TimeUnit unit) { if (!function.hasResult()) { region.executeFunction(this, function, args, null, filter, sender); - return new NoResult(); + return new NoResult<>(); } - ResultCollector inRc = (rc == null) ? new DefaultResultCollector() : rc; - ResultCollector rcToReturn = - region.executeFunction(this, function, args, inRc, filter, sender); + ResultCollector inRc = + (rc == null) ? uncheckedCast(new DefaultResultCollector<>()) : rc; + ResultCollector rcToReturn = + uncheckedCast(region.executeFunction(this, function, args, inRc, filter, sender)); if (timeout > 0) { try { rcToReturn.getResult(timeout, unit); @@ -224,18 +223,19 @@ protected ResultCollector executeFunction(Function function, long timeout, TimeU } @Override - public Execution withFilter(Set filter) { + public Execution withFilter(@SuppressWarnings("rawtypes") Set filter) { if (filter == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "filter")); } - return new DistributedRegionFunctionExecutor(this, filter); + // noinspection unchecked + return new DistributedRegionFunctionExecutor(this, filter); } @Override - public InternalExecution withBucketFilter(Set bucketIDs) { + public InternalExecution withBucketFilter(Set bucketIDs) { if (bucketIDs != null && !bucketIDs.isEmpty()) { throw new IllegalArgumentException( String.format("Buckets as filter cannot be applied to a non partitioned region: %s", @@ -255,43 +255,43 @@ public ServerToClientFunctionResultSender getServerResultSender() { @Override - public Execution setArguments(Object args) { + public Execution setArguments(IN args) { if (args == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "Args")); } - return new DistributedRegionFunctionExecutor(this, args); + return new DistributedRegionFunctionExecutor<>(this, args); } @Override - public Execution withArgs(Object args) { + public Execution withArgs(IN args) { return setArguments(args); } @Override - public Execution withCollector(ResultCollector rs) { + public Execution withCollector(ResultCollector rs) { if (rs == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "Result Collector")); } - return new DistributedRegionFunctionExecutor(this, rs); + return new DistributedRegionFunctionExecutor<>(this, rs); } @Override - public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { + public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { if (argument == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "MemberMappedArgument")); } - return new DistributedRegionFunctionExecutor(this, argument); + return new DistributedRegionFunctionExecutor<>(this, argument); } @Override - public AbstractExecution setIsReExecute() { - return new DistributedRegionFunctionExecutor(this, true); + public AbstractExecution setIsReExecute() { + return new DistributedRegionFunctionExecutor<>(this, true); } @Override @@ -312,7 +312,8 @@ public String toString() { * cache.execute.Function, java.util.Set) */ @Override - public void validateExecution(Function function, Set targetMembers) { + public void validateExecution(final Function function, + final Set targetMembers) { InternalCache cache = region.getGemFireCache(); if (cache != null && cache.getTxManager().getTXState() != null) { if (targetMembers.size() > 1) { @@ -320,7 +321,7 @@ public void validateExecution(Function function, Set targetMembers) { "Function inside a transaction cannot execute on more than one node"); } else { assert targetMembers.size() == 1; - DistributedMember funcTarget = (DistributedMember) targetMembers.iterator().next(); + DistributedMember funcTarget = targetMembers.iterator().next(); DistributedMember target = cache.getTxManager().getTXState().getTarget(); if (target == null) { cache.getTxManager().getTXState().setTarget(funcTarget); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionResultSender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionResultSender.java index 5ab445b20ae1..ea00b9cbb719 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionResultSender.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/DistributedRegionFunctionResultSender.java @@ -232,7 +232,7 @@ public void setException(Throwable exception) { } @Override - public void enableOrderedResultStreming(boolean enable) { + public void enableOrderedResultStreaming(boolean enable) { enableOrderedResultStreming = enable; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionContextImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionContextImpl.java index d3a3f686fceb..8ed94e773a39 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionContextImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionContextImpl.java @@ -34,27 +34,27 @@ * @see RegionFunctionContextImpl * */ -public class FunctionContextImpl implements FunctionContext { +public class FunctionContextImpl implements FunctionContext { - private Object args = null; + private final T args; - private String functionId = null; + private final String functionId; - private Cache cache = null; + private final Cache cache; - private ResultSender resultSender = null; + private final ResultSender resultSender; private final boolean isPossDup; private final Object principal; - public FunctionContextImpl(final Cache cache, final String functionId, final Object args, - ResultSender resultSender) { + public FunctionContextImpl(final Cache cache, final String functionId, final T args, + ResultSender resultSender) { this(cache, functionId, args, resultSender, false); } - public FunctionContextImpl(final Cache cache, final String functionId, final Object args, - ResultSender resultSender, boolean isPossibleDuplicate) { + public FunctionContextImpl(final Cache cache, final String functionId, final T args, + ResultSender resultSender, boolean isPossibleDuplicate) { this.cache = cache; this.functionId = functionId; this.args = args; @@ -77,7 +77,7 @@ public FunctionContextImpl(final Cache cache, final String functionId, final Obj * @return the arguments or null if there are no arguments */ @Override - public Object getArguments() { + public T getArguments() { return args; } @@ -104,9 +104,10 @@ public String toString() { + ']'; } + @SuppressWarnings("unchecked") @Override - public ResultSender getResultSender() { - return resultSender; + public ResultSender getResultSender() { + return (ResultSender) resultSender; } @Override diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java index 119e0d5f4c41..f061d4afed41 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionExecutionNodePruner.java @@ -14,6 +14,9 @@ */ package org.apache.geode.internal.cache.execute; +import static java.lang.String.format; +import static org.apache.geode.internal.cache.PartitionedRegionHelper.getHashKey; + import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -29,7 +32,6 @@ import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.cache.BucketSetHelper; import org.apache.geode.internal.cache.PartitionedRegion; -import org.apache.geode.internal.cache.PartitionedRegionHelper; import org.apache.geode.logging.internal.log4j.api.LogService; public class FunctionExecutionNodePruner { @@ -43,10 +45,8 @@ public static HashMap pruneNodes( if (isDebugEnabled) { logger.debug("FunctionExecutionNodePruner: The buckets to be pruned are: {}", buckets); } - HashMap nodeToBucketsMap = - new HashMap(); - HashMap prunedNodeToBucketsMap = - new HashMap(); + HashMap nodeToBucketsMap = new HashMap<>(); + HashMap prunedNodeToBucketsMap = new HashMap<>(); try { for (Integer bucketId : buckets) { @@ -91,11 +91,11 @@ public static HashMap pruneNodes( * no of nodes to execute the function on. * * Second Logic: Give highest preference to the local node and after that use First Logic. // - * Local Node gets preference but still its deterministic for all the execution taking // place + * Local Node gets preference but still it's deterministic for all the execution taking // place * at that node which require same set of buckets. * * Third Logic: After including local node, choose random nodes among the remaining nodes in - * step until your curentBucketSet has all the required buckets. // No optimization for number + * step until your currentBucketSet has all the required buckets. // No optimization for number * of nodes to execute the function */ @@ -157,16 +157,16 @@ private static InternalDistributedMember findNextNode( for (Map.Entry entry : entrySet) { int[] buckets = entry.getValue(); - int[] tempbuckets = new int[buckets.length]; - System.arraycopy(buckets, 0, tempbuckets, 0, buckets[0] + 1); - tempbuckets = removeAllElements(tempbuckets, currentBucketArray); + int[] tempBuckets = new int[buckets.length]; + System.arraycopy(buckets, 0, tempBuckets, 0, buckets[0] + 1); + tempBuckets = removeAllElements(tempBuckets, currentBucketArray); - if (max < BucketSetHelper.length(tempbuckets)) { - max = BucketSetHelper.length(tempbuckets); + if (max < BucketSetHelper.length(tempBuckets)) { + max = BucketSetHelper.length(tempBuckets); node = entry.getKey(); nodesOfEqualSize.clear(); nodesOfEqualSize.add(node); - } else if (max == BucketSetHelper.length(tempbuckets)) { + } else if (max == BucketSetHelper.length(tempBuckets)) { nodesOfEqualSize.add(node); } } @@ -176,59 +176,49 @@ private static InternalDistributedMember findNextNode( ? nodesOfEqualSize.get(PartitionedRegion.RANDOM.nextInt(nodesOfEqualSize.size())) : null); } - public static Map groupByBucket(PartitionedRegion pr, Set routingKeys, + public static Map> groupByBucket(PartitionedRegion pr, Set routingKeys, final boolean primaryMembersNeeded, final boolean hasRoutingObjects, final boolean isBucketSetAsFilter) { - HashMap bucketToKeysMap = new HashMap(); + HashMap> bucketToKeysMap = new HashMap<>(); - for (final Object routingKey : routingKeys) { + for (final K routingKey : routingKeys) { final Integer bucketId; - Object key = routingKey; if (isBucketSetAsFilter) { - bucketId = ((Integer) key); + bucketId = ((Integer) routingKey); } else { if (hasRoutingObjects) { - bucketId = PartitionedRegionHelper.getHashKey(pr, key); + bucketId = getHashKey(pr, routingKey); } else { - bucketId = PartitionedRegionHelper.getHashKey(pr, - Operation.FUNCTION_EXECUTION, key, null, null); + bucketId = getHashKey(pr, Operation.FUNCTION_EXECUTION, routingKey, null, null); } } - InternalDistributedMember mem = null; + final InternalDistributedMember mem; if (primaryMembersNeeded) { mem = pr.getOrCreateNodeForBucketWrite(bucketId, null); } else { mem = pr.getOrCreateNodeForBucketRead(bucketId); } if (mem == null) { - throw new FunctionException( - String.format("No target node found for KEY, %s", - key)); - } - HashSet bucketKeys = (HashSet) bucketToKeysMap.get(bucketId); - if (bucketKeys == null) { - bucketKeys = new HashSet(); // faster if this was an ArrayList - bucketToKeysMap.put(bucketId, bucketKeys); + throw new FunctionException(format("No target node found for KEY, %s", routingKey)); } - bucketKeys.add(key); + bucketToKeysMap.computeIfAbsent(bucketId, k -> new HashSet<>()).add(routingKey); } return bucketToKeysMap; } - public static int[] getBucketSet(PartitionedRegion pr, Set routingKeys, + public static int[] getBucketSet(PartitionedRegion pr, Set routingKeys, final boolean hasRoutingObjects, boolean isBucketSetAsFilter) { int[] bucketArray = null; - for (Object key : routingKeys) { + for (K key : routingKeys) { final Integer bucketId; if (isBucketSetAsFilter) { bucketId = (Integer) key; } else { if (hasRoutingObjects) { - bucketId = PartitionedRegionHelper.getHashKey(pr, key); + bucketId = getHashKey(pr, key); } else { - bucketId = PartitionedRegionHelper.getHashKey(pr, - Operation.FUNCTION_EXECUTION, key, null, null); + bucketId = getHashKey(pr, Operation.FUNCTION_EXECUTION, key, null, null); } } if (bucketArray == null) { @@ -243,7 +233,7 @@ public static int[] getBucketSet(PartitionedRegion pr, Set routingKeys, public static HashMap groupByMemberToBuckets( PartitionedRegion pr, Set bucketSet, boolean primaryOnly) { if (primaryOnly) { - HashMap memberToBucketsMap = new HashMap(); + HashMap memberToBucketsMap = new HashMap<>(); try { for (Integer bucketId : bucketSet) { InternalDistributedMember mem = pr.getOrCreateNodeForBucketWrite(bucketId, null); @@ -281,9 +271,7 @@ private static int[] removeAllElements(int[] arrayA, int[] arrayB) { inSet.removeAll(subSet); - int[] outArray = BucketSetHelper.fromSet(inSet); - - return outArray; + return BucketSetHelper.fromSet(inSet); } @@ -298,9 +286,7 @@ private static int[] addAllElements(int[] arrayA, int[] arrayB) { inSet.addAll(addSet); - int[] outArray = BucketSetHelper.fromSet(inSet); - - return outArray; + return BucketSetHelper.fromSet(inSet); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionRemoteContext.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionRemoteContext.java index eb90ac2ff1be..74a39b4abc6c 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionRemoteContext.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionRemoteContext.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.execute; +import static java.util.Objects.requireNonNull; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -34,7 +36,7 @@ */ public class FunctionRemoteContext implements DataSerializable { - private Set filter; + private Set filter; private Object args; @@ -42,24 +44,25 @@ public class FunctionRemoteContext implements DataSerializable { private boolean isReExecute; - private boolean isFnSerializationReqd; + private boolean isFunctionSerializationRequired; private String functionId; - private Function function; + private Function function; private Object principal; public FunctionRemoteContext() {} - public FunctionRemoteContext(final Function function, Object object, Set filter, - int[] bucketArray, boolean isReExecute, boolean isFnSerializationReqd, Object principal) { + public FunctionRemoteContext(final Function function, Object args, Set filter, + int[] bucketArray, boolean isReExecute, boolean isFunctionSerializationRequired, + Object principal) { this.function = function; - args = object; + this.args = args; this.filter = filter; this.bucketArray = bucketArray; this.isReExecute = isReExecute; - this.isFnSerializationReqd = isFnSerializationReqd; + this.isFunctionSerializationRequired = isFunctionSerializationRequired; this.principal = principal; } @@ -67,21 +70,21 @@ public FunctionRemoteContext(final Function function, Object object, Set filter, public void fromData(DataInput in) throws IOException, ClassNotFoundException { Object object = DataSerializer.readObject(in); if (object instanceof String) { - isFnSerializationReqd = false; + isFunctionSerializationRequired = false; function = FunctionService.getFunction((String) object); if (function == null) { functionId = (String) object; } } else { - function = (Function) object; - isFnSerializationReqd = true; + function = (Function) object; + isFunctionSerializationRequired = true; } args = DataSerializer.readObject(in); filter = DataSerializer.readHashSet(in); if (StaticSerialization.getVersionForDataStream(in).isNotOlderThan(KnownVersion.GEODE_1_11_0)) { bucketArray = DataSerializer.readIntArray(in); } else { - HashSet bucketSet = DataSerializer.readHashSet(in); + HashSet bucketSet = requireNonNull(DataSerializer.readHashSet(in)); bucketArray = BucketSetHelper.fromSet(bucketSet); } isReExecute = DataSerializer.readBoolean(in); @@ -98,19 +101,19 @@ public void fromData(DataInput in) throws IOException, ClassNotFoundException { @Override public void toData(DataOutput out) throws IOException { - if (isFnSerializationReqd) { + if (isFunctionSerializationRequired) { DataSerializer.writeObject(function, out); } else { DataSerializer.writeObject(function.getId(), out); } DataSerializer.writeObject(args, out); - DataSerializer.writeHashSet((HashSet) filter, out); + DataSerializer.writeHashSet((HashSet) filter, out); if (StaticSerialization.getVersionForDataStream(out) .isNotOlderThan(KnownVersion.GEODE_1_11_0)) { DataSerializer.writeIntArray(bucketArray, out); } else { Set bucketSet = BucketSetHelper.toSet(bucketArray); - DataSerializer.writeHashSet((HashSet) bucketSet, out); + DataSerializer.writeHashSet((HashSet) bucketSet, out); } DataSerializer.writeBoolean(isReExecute, out); @@ -124,7 +127,7 @@ public void toData(DataOutput out) throws IOException { } } - public Set getFilter() { + public Set getFilter() { return filter; } @@ -140,7 +143,7 @@ public boolean isReExecute() { return isReExecute; } - public Function getFunction() { + public Function getFunction() { return function; } @@ -154,7 +157,6 @@ public Object getPrincipal() { @Override public String toString() { - return "{FunctionRemoteContext " + "functionId=" + functionId + " args=" + args diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java index 2bc476f790dd..41b72eb5a144 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/FunctionStreamingResultCollector.java @@ -157,7 +157,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -181,7 +181,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -203,7 +203,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -229,7 +229,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -288,7 +288,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -314,7 +314,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -336,7 +336,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -363,7 +363,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalExecution.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalExecution.java index ecb088fa76d5..885858aafe1e 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalExecution.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalExecution.java @@ -28,9 +28,9 @@ * @since GemFire 5.8LA * */ -public interface InternalExecution extends Execution { +public interface InternalExecution extends Execution { - InternalExecution withMemberMappedArgument(MemberMappedArgument argument); + InternalExecution withMemberMappedArgument(MemberMappedArgument argument); /** * Specifies a filter of bucketIDs for selecting the GemFire members to execute the function on. @@ -44,7 +44,7 @@ public interface InternalExecution extends Execution { * {@link FunctionService#onRegion(org.apache.geode.cache.Region)} * @since Geode 1.0 */ - InternalExecution withBucketFilter(Set bucketIDs); + InternalExecution withBucketFilter(Set bucketIDs); /** * If true, function execution waits for all exceptions from target nodes
diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalRegionFunctionContext.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalRegionFunctionContext.java index 2e27812fd6c4..98701df58d80 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalRegionFunctionContext.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalRegionFunctionContext.java @@ -21,13 +21,12 @@ import org.apache.geode.cache.Region; import org.apache.geode.cache.execute.RegionFunctionContext; import org.apache.geode.cache.partition.PartitionRegionHelper; -import org.apache.geode.internal.cache.LocalDataSet; /** * Internal interface used to provide for some essential functionality for * {@link RegionFunctionContext} invoked by {@link PartitionRegionHelper}. */ -public interface InternalRegionFunctionContext extends RegionFunctionContext { +public interface InternalRegionFunctionContext extends RegionFunctionContext { /** * Return a region providing read access limited to the local data set corresponding to the @@ -47,13 +46,13 @@ public interface InternalRegionFunctionContext extends RegionFunctionContext { * with read access limited to the routing keys as specified by the {@link #getFilter()} method of * the function context. *

- * Writes using these Region have no constraints and behave the same as a partitioned Region. + * Writes using these Regions have no constraints and behave the same as a partitioned Region. *

* If there are no colocated regions, return an empty map. * * @return an unmodifiable map of {@linkplain Region#getFullPath() region name} to {@link Region} */ - Map getColocatedLocalDataSets(); + Map> getColocatedLocalDataSets(); /** * Get the int array of bucket IDs for this node as specified by the {@link #getFilter()} method diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalResultSender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalResultSender.java index 555c92c87b39..28471366ad52 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalResultSender.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/InternalResultSender.java @@ -16,9 +16,9 @@ import org.apache.geode.cache.execute.ResultSender; -public interface InternalResultSender extends ResultSender { +public interface InternalResultSender extends ResultSender { - void enableOrderedResultStreming(boolean enable); + void enableOrderedResultStreaming(boolean enable); boolean isLocallyExecuted(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/LocalResultCollectorImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/LocalResultCollectorImpl.java index 4ee206c424d6..670998ce5dba 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/LocalResultCollectorImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/LocalResultCollectorImpl.java @@ -133,7 +133,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(function); } else { newRc = execution.execute(function.getId()); @@ -182,7 +182,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(function); } else { newRc = execution.execute(function.getId()); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionExecutor.java index 57249ee31232..984c0600340b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionExecutor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionExecutor.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.execute; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -36,8 +38,7 @@ import org.apache.geode.internal.cache.GemFireCacheImpl; import org.apache.geode.internal.cache.InternalCache; -public class MemberFunctionExecutor extends AbstractExecution { - +public class MemberFunctionExecutor extends AbstractExecution { protected InternalDistributedSystem distributedSystem; protected Set members; @@ -57,7 +58,7 @@ public class MemberFunctionExecutor extends AbstractExecution { MemberFunctionExecutor(DistributedSystem distributedSystem, Set members) { this.distributedSystem = (InternalDistributedSystem) distributedSystem; - this.members = (Set) members; + this.members = uncheckedCast(members); } public MemberFunctionExecutor(DistributedSystem distributedSystem, @@ -67,14 +68,14 @@ public MemberFunctionExecutor(DistributedSystem distributedSystem, this.sender = sender; } - private MemberFunctionExecutor(MemberFunctionExecutor memFunctionExecutor) { + private MemberFunctionExecutor(MemberFunctionExecutor memFunctionExecutor) { super(memFunctionExecutor); distributedSystem = memFunctionExecutor.distributedSystem; members = new HashSet<>(memFunctionExecutor.members); sender = memFunctionExecutor.sender; } - private MemberFunctionExecutor(MemberFunctionExecutor memberFunctionExecutor, + private MemberFunctionExecutor(MemberFunctionExecutor memberFunctionExecutor, MemberMappedArgument argument) { this(memberFunctionExecutor); @@ -82,21 +83,22 @@ private MemberFunctionExecutor(MemberFunctionExecutor memberFunctionExecutor, isMemberMappedArgument = true; } - private MemberFunctionExecutor(MemberFunctionExecutor memberFunctionExecutor, - ResultCollector rs) { + private MemberFunctionExecutor(MemberFunctionExecutor memberFunctionExecutor, + ResultCollector rs) { this(memberFunctionExecutor); rc = rs; } - private MemberFunctionExecutor(MemberFunctionExecutor memberFunctionExecutor, Object arguments) { + private MemberFunctionExecutor(MemberFunctionExecutor memberFunctionExecutor, + IN arguments) { this(memberFunctionExecutor); args = arguments; } - private ResultCollector executeFunction(final Function function, - ResultCollector resultCollector) { + private ResultCollector executeFunction(final Function function, + ResultCollector resultCollector) { final DistributionManager dm = distributedSystem.getDistributionManager(); final Set dest = new HashSet<>(members); if (dest.isEmpty()) { @@ -109,7 +111,8 @@ private ResultCollector executeFunction(final Function function, final InternalDistributedMember localVM = distributedSystem.getDistributionManager().getDistributionManagerId(); - final LocalResultCollector localRC = getLocalResultCollector(function, resultCollector); + final LocalResultCollector localRC = + getLocalResultCollector(function, resultCollector); boolean remoteOnly = false; boolean localOnly = false; if (!dest.contains(localVM)) { @@ -131,7 +134,7 @@ private ResultCollector executeFunction(final Function function, if (cache != null) { isTx = cache.getTxManager().getTXState() != null; } - final FunctionContext context = new FunctionContextImpl(cache, function.getId(), + final FunctionContext context = new FunctionContextImpl<>(cache, function.getId(), getArgumentsForMember(localVM.getId()), resultSender); executeFunctionOnLocalNode(function, context, resultSender, dm, isTx); } @@ -146,13 +149,13 @@ private ResultCollector executeFunction(final Function function, new MemberFunctionResultWaiter(distributedSystem, localRC, function, memberArgs, dest, resultSender); - return resultReceiver.getFunctionResultFrom(dest, function, this); + return uncheckedCast(resultReceiver.getFunctionResultFrom(dest, function, this)); } return localRC; } @Override - public void validateExecution(final Function function, + public void validateExecution(final Function function, final Set dest) { final InternalCache cache = GemFireCacheImpl.getInstance(); if (cache == null) { @@ -182,13 +185,15 @@ public void validateExecution(final Function function, } @Override - protected ResultCollector executeFunction(Function function, long timeout, TimeUnit unit) { + protected ResultCollector executeFunction(Function function, long timeout, + TimeUnit unit) { if (!function.hasResult()) { executeFunction(function, null); - return new NoResult(); + return new NoResult<>(); } - ResultCollector inRc = (rc == null) ? new DefaultResultCollector() : rc; - ResultCollector rcToReturn = executeFunction(function, inRc); + ResultCollector inRc = + (rc == null) ? uncheckedCast(new DefaultResultCollector<>()) : rc; + ResultCollector rcToReturn = executeFunction(function, inRc); if (timeout > 0) { try { rcToReturn.getResult(timeout, unit); @@ -200,60 +205,59 @@ protected ResultCollector executeFunction(Function function, long timeout, TimeU } @Override - public Execution setArguments(Object args) { + public Execution setArguments(IN args) { if (args == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "args")); } - return new MemberFunctionExecutor(this, args); + return new MemberFunctionExecutor<>(this, args); } - // Changing the object!! @Override - public Execution withArgs(Object args) { + public Execution withArgs(IN args) { return setArguments(args); } - // Changing the object!! @Override - public Execution withCollector(ResultCollector rs) { + public Execution withCollector(ResultCollector rs) { if (rs == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "Result Collector")); } - return new MemberFunctionExecutor(this, rs); + return new MemberFunctionExecutor<>(this, rs); } @Override - public Execution withFilter(Set filter) { + public Execution withFilter(Set filter) { throw new FunctionException( String.format("Cannot specify %s for data independent functions", "filter")); } @Override - public InternalExecution withBucketFilter(Set bucketIDs) { + public InternalExecution withBucketFilter(Set bucketIDs) { throw new FunctionException( String.format("Cannot specify %s for data independent functions", "bucket as filter")); } @Override - public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { + public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { if (argument == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "MemberMappedArgs")); } - return new MemberFunctionExecutor(this, argument); + return new MemberFunctionExecutor<>(this, argument); } + @SuppressWarnings("unchecked") @Override - public Object getArgumentsForMember(String memberId) { + public T getArgumentsForMember(String memberId) { if (!isMemberMappedArgument) { - return args; + return (T) args; } else { return memberMappedArg.getArgumentsForMember(memberId); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionResultSender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionResultSender.java index 64de9a36c5e7..b0d769e47168 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionResultSender.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberFunctionResultSender.java @@ -239,7 +239,7 @@ public void setException(Throwable exception) { } @Override - public void enableOrderedResultStreming(boolean enable) { + public void enableOrderedResultStreaming(boolean enable) { enableOrderedResultStreming = enable; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberMappedArgument.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberMappedArgument.java index 7a2300cade5e..94e194fc82ae 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberMappedArgument.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MemberMappedArgument.java @@ -48,20 +48,12 @@ public MemberMappedArgument(Object defaultArgument, Map memberTo this.memberToArgMap = memberToArgMap; } - public Object getArgumentsForMember(String memberId) { - if (memberToArgMap.containsKey(memberId)) { - return memberToArgMap.get(memberId); - } else { - return defaultArgument; - } + @SuppressWarnings("unchecked") + public T getArgumentsForMember(String memberId) { + return (T) memberToArgMap.getOrDefault(memberId, defaultArgument); } public Object getDefaultArgument() { return defaultArgument; } - - // TODO:Asif: Not good to return the refernec of the mapping. Should we return a copy? - public Map getMemberSpecificArgumentsMap() { - return memberToArgMap; - } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContext.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContext.java index f027daa7778e..213b055284b8 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContext.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContext.java @@ -27,9 +27,9 @@ * @since GemFire 6.5 * */ -public interface MultiRegionFunctionContext extends FunctionContext { +public interface MultiRegionFunctionContext extends FunctionContext { - Set getRegions(); + Set> getRegions(); /** * Returns a boolean to identify whether this is a re-execute. Returns true if it is a re-execute diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContextImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContextImpl.java index 976fbfca433c..0701ac86abd3 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContextImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionContextImpl.java @@ -27,15 +27,15 @@ * @since GemFire 6.5 * */ -public class MultiRegionFunctionContextImpl extends FunctionContextImpl - implements MultiRegionFunctionContext { +public class MultiRegionFunctionContextImpl extends FunctionContextImpl + implements MultiRegionFunctionContext { - private Set regions = null; + private final Set> regions; private final boolean isPossibleDuplicate; public MultiRegionFunctionContextImpl(final Cache cache, final String functionId, - final Object args, ResultSender resultSender, Set regions, + final T args, ResultSender resultSender, Set> regions, boolean isPossibleDuplicate) { super(cache, functionId, args, resultSender); this.regions = regions; @@ -43,7 +43,7 @@ public MultiRegionFunctionContextImpl(final Cache cache, final String functionId } @Override - public Set getRegions() { + public Set> getRegions() { return regions; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionExecutor.java index 6c8273b13151..6787f246dbbc 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionExecutor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/MultiRegionFunctionExecutor.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.execute; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -41,44 +43,17 @@ import org.apache.geode.internal.cache.LocalRegion; import org.apache.geode.internal.cache.PartitionedRegion; -public class MultiRegionFunctionExecutor extends AbstractExecution { +public class MultiRegionFunctionExecutor extends AbstractExecution { - private final Set regions; + private final Set> regions; private ServerToClientFunctionResultSender sender; - public MultiRegionFunctionExecutor(Set regions) { - this.regions = regions; - } - - private MultiRegionFunctionExecutor(MultiRegionFunctionExecutor drfe) { - super(drfe); - regions = drfe.regions; - if (drfe.filter != null) { - filter.clear(); - filter.addAll(drfe.filter); - } - sender = drfe.sender; - } - - private MultiRegionFunctionExecutor(Set regions, Set filter2, Object args, - MemberMappedArgument memberMappedArg, ServerToClientFunctionResultSender resultSender) { - if (args != null) { - this.args = args; - } else if (memberMappedArg != null) { - this.memberMappedArg = memberMappedArg; - isMemberMappedArgument = true; - } - sender = resultSender; - if (filter2 != null) { - filter.clear(); - filter.addAll(filter2); - } + public MultiRegionFunctionExecutor(Set> regions) { this.regions = regions; - isClientServerMode = true; } - private MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, + private MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, MemberMappedArgument argument) { super(executor); regions = executor.getRegions(); @@ -90,7 +65,8 @@ private MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, } - private MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, ResultCollector rs) { + private MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, + ResultCollector rs) { super(executor); regions = executor.getRegions(); filter.clear(); @@ -99,7 +75,7 @@ private MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, Result rc = rs; } - public MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, Object args) { + public MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, IN args) { super(executor); regions = executor.getRegions(); filter.clear(); @@ -109,7 +85,8 @@ public MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, Object this.args = args; } - public MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, boolean isReExecute) { + public MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, + boolean isReExecute) { super(executor); regions = executor.getRegions(); filter.clear(); @@ -120,16 +97,16 @@ public MultiRegionFunctionExecutor(MultiRegionFunctionExecutor executor, boolean } @Override - public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { + public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { if (argument == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "MemberMapped Arg")); } - return new MultiRegionFunctionExecutor(this, argument); + return new MultiRegionFunctionExecutor<>(this, argument); } - public Set getRegions() { + public Set> getRegions() { return regions; } @@ -138,52 +115,54 @@ public ServerToClientFunctionResultSender getServerResultSender() { } @Override - public Execution setArguments(Object args) { + public Execution setArguments(IN args) { if (args == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "args")); } - return new MultiRegionFunctionExecutor(this, args); + return new MultiRegionFunctionExecutor<>(this, args); } @Override - public Execution withArgs(Object args) { + public Execution withArgs(IN args) { return setArguments(args); } @Override - public Execution withCollector(ResultCollector rc) { + public Execution withCollector(ResultCollector rc) { if (rc == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", "Result Collector")); } - return new MultiRegionFunctionExecutor(this, rc); + return new MultiRegionFunctionExecutor<>(this, rc); } @Override - public Execution withFilter(Set filter) { + public Execution withFilter(Set filter) { throw new FunctionException( String.format("Cannot specify %s for multi region function", "filter")); } @Override - public InternalExecution withBucketFilter(Set bucketIDs) { + public InternalExecution withBucketFilter(Set bucketIDs) { throw new FunctionException( String.format("Cannot specify %s for multi region function", "bucket as filter")); } @Override - protected ResultCollector executeFunction(Function function, long timeout, TimeUnit unit) { + protected ResultCollector executeFunction(Function function, long timeout, + TimeUnit unit) { if (!function.hasResult()) { executeFunction(function, null); - return new NoResult(); + return new NoResult<>(); } - ResultCollector inRc = (rc == null) ? new DefaultResultCollector() : rc; - ResultCollector rcToReturn = executeFunction(function, inRc); + ResultCollector inRc = + (rc == null) ? uncheckedCast(new DefaultResultCollector<>()) : rc; + ResultCollector rcToReturn = executeFunction(function, inRc); if (timeout > 0) { try { rcToReturn.getResult(timeout, unit); @@ -194,8 +173,8 @@ protected ResultCollector executeFunction(Function function, long timeout, TimeU return rcToReturn; } - private ResultCollector executeFunction(final Function function, - ResultCollector resultCollector) { + private ResultCollector executeFunction(final Function function, + ResultCollector resultCollector) { InternalDistributedSystem ds = InternalDistributedSystem.getConnectedInstance(); if (ds == null) { throw new IllegalStateException( @@ -220,7 +199,7 @@ private ResultCollector executeFunction(final Function function, setExecutionNodes(dest); final InternalDistributedMember localVM = cache.getMyId(); - final LocalResultCollector localResultCollector = + final LocalResultCollector localResultCollector = getLocalResultCollector(function, resultCollector); boolean remoteOnly = false; boolean localOnly = false; @@ -237,15 +216,15 @@ private ResultCollector executeFunction(final Function function, // if member is local VM dest.remove(localVM); Set regionPathSet = memberToRegionMap.get(localVM); - Set regions = new HashSet<>(); + Set> regions = new HashSet<>(); if (regionPathSet != null) { InternalCache cache1 = GemFireCacheImpl.getInstance(); for (String regionPath : regionPathSet) { regions.add(cache1.getRegion(regionPath)); } } - final FunctionContextImpl context = - new MultiRegionFunctionContextImpl(cache, function.getId(), + final FunctionContextImpl context = + new MultiRegionFunctionContextImpl<>(cache, function.getId(), getArgumentsForMember(localVM.getId()), resultSender, regions, isReExecute); boolean isTx = cache.getTxManager().getTXState() != null; executeFunctionOnLocalNode(function, context, resultSender, dm, isTx); @@ -266,11 +245,10 @@ private ResultCollector executeFunction(final Function function, } private Map> calculateMemberToRegionMap() { - Map> memberToRegions = - new HashMap<>(); + Map> memberToRegions = new HashMap<>(); // nodes is maintained for node pruning logic Set nodes = new HashSet<>(); - for (Region region : regions) { + for (Region region : regions) { DataPolicy dp = region.getAttributes().getDataPolicy(); if (region instanceof PartitionedRegion) { PartitionedRegion pr = (PartitionedRegion) region; @@ -356,14 +334,15 @@ private Map> calculateMemberToRegionMap() } @Override - public AbstractExecution setIsReExecute() { - return new MultiRegionFunctionExecutor(this, true); + public AbstractExecution setIsReExecute() { + return new MultiRegionFunctionExecutor<>(this, true); } @Override - public void validateExecution(Function function, Set targetMembers) { + public void validateExecution(final Function function, + final Set targetMembers) { InternalCache cache = null; - for (Region r : regions) { + for (Region r : regions) { cache = (InternalCache) r.getCache(); break; } @@ -376,7 +355,7 @@ public void validateExecution(Function function, Set targetMembers) { "Function inside a transaction cannot execute on more than one node"); } else { assert targetMembers.size() == 1; - DistributedMember funcTarget = (DistributedMember) targetMembers.iterator().next(); + DistributedMember funcTarget = targetMembers.iterator().next(); DistributedMember target = cache.getTxManager().getTXState().getTarget(); if (target == null) { cache.getTxManager().getTXState().setTarget(funcTarget); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/NoResult.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/NoResult.java index 54c53a0f8cbc..00d9739952a0 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/NoResult.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/NoResult.java @@ -35,7 +35,7 @@ * @see Function#hasResult() * */ -public class NoResult implements ResultCollector, Serializable { +public class NoResult implements ResultCollector, Serializable { private static final long serialVersionUID = -4901369422864228848L; @@ -52,12 +52,12 @@ public void endResults() { } @Override - public Object getResult() throws FunctionException { + public S getResult() throws FunctionException { throw new FunctionException("Cannot return any result as the Function#hasResult() is false"); } @Override - public Object getResult(long timeout, TimeUnit unit) + public S getResult(long timeout, TimeUnit unit) throws FunctionException, InterruptedException { throw new FunctionException("Cannot return any result as the Function#hasResult() is false"); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionExecutor.java index 9930a99606d4..e6aafe2e38ca 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionExecutor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionExecutor.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.execute; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Set; import java.util.concurrent.TimeUnit; @@ -30,7 +32,8 @@ import org.apache.geode.internal.cache.LocalRegion; import org.apache.geode.internal.cache.PartitionedRegion; -public class PartitionedRegionFunctionExecutor extends AbstractExecution { +public class PartitionedRegionFunctionExecutor + extends AbstractExecution { private final PartitionedRegion pr; @@ -40,7 +43,7 @@ public class PartitionedRegionFunctionExecutor extends AbstractExecution { private boolean isPRSingleHop = false; - public PartitionedRegionFunctionExecutor(Region r) { + public PartitionedRegionFunctionExecutor(Region r) { if (r == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", @@ -49,123 +52,102 @@ public PartitionedRegionFunctionExecutor(Region r) { pr = (PartitionedRegion) r; } - private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor prfe) { - super(prfe); - pr = prfe.pr; - executeOnBucketSet = prfe.executeOnBucketSet; - isPRSingleHop = prfe.isPRSingleHop; - isReExecute = prfe.isReExecute; - if (prfe.filter != null) { - filter.clear(); - filter.addAll(prfe.filter); - } - if (prfe.sender != null) { - sender = prfe.sender; - } - } - - private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor prfe, + private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor other, MemberMappedArgument argument) { // super copies args, rc and memberMappedArgument - super(prfe); - - pr = prfe.pr; - executeOnBucketSet = prfe.executeOnBucketSet; - isPRSingleHop = prfe.isPRSingleHop; - filter.clear(); - filter.addAll(prfe.filter); - sender = prfe.sender; + super(other); + + pr = other.pr; + executeOnBucketSet = other.executeOnBucketSet; + isPRSingleHop = other.isPRSingleHop; + filter.addAll(other.filter); + sender = other.sender; // override member mapped arguments memberMappedArg = argument; isMemberMappedArgument = true; } - private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor prfe, - ResultCollector rs) { + private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor other, + ResultCollector rs) { // super copies args, rc and memberMappedArgument - super(prfe); - - pr = prfe.pr; - executeOnBucketSet = prfe.executeOnBucketSet; - isPRSingleHop = prfe.isPRSingleHop; - filter.clear(); - filter.addAll(prfe.filter); - sender = prfe.sender; + super(other); + + pr = other.pr; + executeOnBucketSet = other.executeOnBucketSet; + isPRSingleHop = other.isPRSingleHop; + filter.addAll(other.filter); + sender = other.sender; // override ResultCollector rc = rs; } - private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor prfe, - Object arguments) { + private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor other, + IN arguments) { // super copies args, rc and memberMappedArgument - super(prfe); - pr = prfe.pr; - executeOnBucketSet = prfe.executeOnBucketSet; - isPRSingleHop = prfe.isPRSingleHop; - filter.clear(); - filter.addAll(prfe.filter); - sender = prfe.sender; + super(other); + pr = other.pr; + executeOnBucketSet = other.executeOnBucketSet; + isPRSingleHop = other.isPRSingleHop; + filter.addAll(other.filter); + sender = other.sender; // override arguments args = arguments; } - private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor prfe, Set filter2) { + private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor other, + Set filter) { // super copies args, rc and memberMappedArgument - super(prfe); - pr = prfe.pr; - executeOnBucketSet = prfe.executeOnBucketSet; - isPRSingleHop = prfe.isPRSingleHop; - sender = prfe.sender; - filter.clear(); - filter.addAll(filter2); - isReExecute = prfe.isReExecute; + super(other); + pr = other.pr; + executeOnBucketSet = other.executeOnBucketSet; + isPRSingleHop = other.isPRSingleHop; + sender = other.sender; + this.filter.addAll(filter); + isReExecute = other.isReExecute; } - private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor prfe, + private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor other, Set bucketsAsFilter, boolean executeOnBucketSet) { // super copies args, rc and memberMappedArgument - super(prfe); - pr = prfe.pr; + super(other); + pr = other.pr; this.executeOnBucketSet = executeOnBucketSet; - isPRSingleHop = prfe.isPRSingleHop; - sender = prfe.sender; - filter.clear(); + isPRSingleHop = other.isPRSingleHop; + sender = other.sender; filter.addAll(bucketsAsFilter); - isReExecute = prfe.isReExecute; + isReExecute = other.isReExecute; } - private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor prfe, + private PartitionedRegionFunctionExecutor(PartitionedRegionFunctionExecutor other, boolean isReExecute) { - super(prfe); - pr = prfe.pr; - executeOnBucketSet = prfe.executeOnBucketSet; - isPRSingleHop = prfe.isPRSingleHop; - if (prfe.filter != null) { - filter.clear(); - filter.addAll(prfe.filter); + super(other); + pr = other.pr; + executeOnBucketSet = other.executeOnBucketSet; + isPRSingleHop = other.isPRSingleHop; + if (other.filter != null) { + filter.addAll(other.filter); } - if (prfe.sender != null) { - sender = prfe.sender; + if (other.sender != null) { + sender = other.sender; } this.isReExecute = isReExecute; - isClientServerMode = prfe.isClientServerMode; - if (prfe.failedNodes != null) { + isClientServerMode = other.isClientServerMode; + if (other.failedNodes != null) { failedNodes.clear(); - failedNodes.addAll(prfe.failedNodes); + failedNodes.addAll(other.failedNodes); } } - public PartitionedRegionFunctionExecutor(PartitionedRegion region, Set filter2, Object args, + public PartitionedRegionFunctionExecutor(PartitionedRegion region, Set filter, IN args, MemberMappedArgument memberMappedArg, ServerToClientFunctionResultSender resultSender, - Set failedNodes, boolean executeOnBucketSet) { + Set failedNodes, boolean executeOnBucketSet) { pr = region; sender = resultSender; isClientServerMode = true; this.executeOnBucketSet = executeOnBucketSet; - if (filter2 != null) { - filter.clear(); - filter.addAll(filter2); + if (filter != null) { + this.filter.addAll(filter); } if (args != null) { @@ -183,17 +165,16 @@ public PartitionedRegionFunctionExecutor(PartitionedRegion region, Set filter2, } - public PartitionedRegionFunctionExecutor(PartitionedRegion region, Set filter2, Object args, + public PartitionedRegionFunctionExecutor(PartitionedRegion region, Set filter, IN args, MemberMappedArgument memberMappedArg, ServerToClientFunctionResultSender resultSender, - Set failedNodes, boolean executeOnBucketSet, boolean isPRSingleHop) { + Set failedNodes, boolean executeOnBucketSet, boolean isPRSingleHop) { pr = region; sender = resultSender; isClientServerMode = true; this.executeOnBucketSet = executeOnBucketSet; this.isPRSingleHop = isPRSingleHop; - if (filter2 != null) { - filter.clear(); - filter.addAll(filter2); + if (filter != null) { + this.filter.addAll(filter); } if (args != null) { @@ -210,17 +191,24 @@ public PartitionedRegionFunctionExecutor(PartitionedRegion region, Set filter2, } @Immutable - private static final ResultCollector NO_RESULT = new NoResult(); + private static final ResultCollector NO_RESULT = new NoResult<>(); + + @SuppressWarnings("unchecked") + private static ResultCollector noResult() { + return (ResultCollector) NO_RESULT; + } @Override - public ResultCollector executeFunction(final Function function, long timeout, TimeUnit unit) { + public ResultCollector executeFunction(final Function function, long timeout, + TimeUnit unit) { try { if (!function.hasResult()) /* NO RESULT:fire-n-forget */ { pr.executeFunction(function, this, null, executeOnBucketSet); - return NO_RESULT; + return noResult(); } - ResultCollector inRc = (rc == null) ? new DefaultResultCollector() : rc; - ResultCollector rcToReturn = + ResultCollector inRc = + (rc == null) ? uncheckedCast(new DefaultResultCollector<>()) : rc; + ResultCollector rcToReturn = pr.executeFunction(function, this, inRc, executeOnBucketSet); if (timeout > 0) { try { @@ -238,19 +226,19 @@ public ResultCollector executeFunction(final Function function, long timeout, Ti } @Override - public Execution withFilter(Set filter) { + public Execution withFilter(Set filter) { if (filter == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "filter")); } executeOnBucketSet = false; - return new PartitionedRegionFunctionExecutor(this, filter); + return new PartitionedRegionFunctionExecutor<>(this, filter); } @Override - public InternalExecution withBucketFilter(Set bucketIDs) { + public InternalExecution withBucketFilter(Set bucketIDs) { if (bucketIDs == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", @@ -271,7 +259,7 @@ public InternalExecution withBucketFilter(Set bucketIDs) { if (bucketIDs.isEmpty()) { throw new FunctionException("No valid buckets to execute on"); } - return new PartitionedRegionFunctionExecutor(this, bucketIDs, true); + return new PartitionedRegionFunctionExecutor<>(this, bucketIDs, true); } public LocalRegion getRegion() { @@ -283,33 +271,33 @@ public ServerToClientFunctionResultSender getServerResultSender() { } @Override - public Execution setArguments(Object args) { + public Execution setArguments(IN args) { if (args == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "args")); } - return new PartitionedRegionFunctionExecutor(this, args); + return new PartitionedRegionFunctionExecutor<>(this, args); } @Override - public Execution withArgs(Object args) { + public Execution withArgs(IN args) { return setArguments(args); } @Override - public Execution withCollector(ResultCollector rs) { + public Execution withCollector(ResultCollector rs) { if (rs == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "Result Collector")); } - return new PartitionedRegionFunctionExecutor(this, rs); + return new PartitionedRegionFunctionExecutor<>(this, rs); } @Override - public AbstractExecution setIsReExecute() { - return new PartitionedRegionFunctionExecutor(this, true); + public AbstractExecution setIsReExecute() { + return new PartitionedRegionFunctionExecutor<>(this, true); } public boolean isPrSingleHop() { @@ -317,13 +305,13 @@ public boolean isPrSingleHop() { } @Override - public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { + public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { if (argument == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "MemberMapped arg")); } - return new PartitionedRegionFunctionExecutor(this, argument); + return new PartitionedRegionFunctionExecutor<>(this, argument); } @Override @@ -339,7 +327,8 @@ public String toString() { } @Override - public void validateExecution(Function function, Set targetMembers) { + public void validateExecution(final Function function, + final Set targetMembers) { InternalCache cache = pr.getGemFireCache(); if (cache.getTxManager().getTXState() != null) { if (targetMembers.size() > 1) { @@ -347,7 +336,7 @@ public void validateExecution(Function function, Set targetMembers) { "Function inside a transaction cannot execute on more than one node"); } else { assert targetMembers.size() == 1; - DistributedMember funcTarget = (DistributedMember) targetMembers.iterator().next(); + DistributedMember funcTarget = targetMembers.iterator().next(); DistributedMember target = cache.getTxManager().getTXState().getTarget(); if (target == null) { cache.getTxManager().getTXState().setTarget(funcTarget); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java index a63c8a1b5249..050e95a19f82 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultSender.java @@ -16,6 +16,8 @@ package org.apache.geode.internal.cache.execute; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.function.BiFunction; import org.apache.logging.log4j.Logger; @@ -42,8 +44,8 @@ * FunctionContext. * */ - -public class PartitionedRegionFunctionResultSender implements InternalResultSender { +public class PartitionedRegionFunctionResultSender + implements InternalResultSender { private static final Logger logger = LogService.getLogger(); @@ -57,7 +59,7 @@ public class PartitionedRegionFunctionResultSender implements InternalResultSend private final boolean forwardExceptions; - private final ResultCollector rc; + private final ResultCollector rc; private final ServerToClientFunctionResultSender serverSender; @@ -69,15 +71,15 @@ public class PartitionedRegionFunctionResultSender implements InternalResultSend private boolean completelyDoneFromRemote = false; - private final Function function; + private final Function function; - private boolean enableOrderedResultStreming; + private boolean enableOrderedResultStreaming; private final int[] bucketArray; private BucketMovedException bme; - private BiFunction functionStatsFunctionProvider; + private final BiFunction functionStatsFunctionProvider; public KnownVersion getClientVersion() { if (serverSender != null && serverSender.sc != null) { // is a client-server connection @@ -88,26 +90,26 @@ public KnownVersion getClientVersion() { public PartitionedRegionFunctionResultSender(DistributionManager dm, PartitionedRegion pr, long time, PartitionedRegionFunctionStreamingMessage msg, - Function function, int[] bucketArray) { + Function function, int[] bucketArray) { this(dm, pr, time, null, null, false, false, false, function, bucketArray, msg, - (x, y) -> FunctionStatsManager.getFunctionStats((String) x, (InternalDistributedSystem) y)); + FunctionStatsManager::getFunctionStats); } public PartitionedRegionFunctionResultSender(DistributionManager dm, - PartitionedRegion partitionedRegion, long time, ResultCollector rc, + PartitionedRegion partitionedRegion, long time, ResultCollector rc, ServerToClientFunctionResultSender sender, boolean onlyLocal, boolean onlyRemote, - boolean forwardExceptions, Function function, int[] bucketArray) { + boolean forwardExceptions, Function function, int[] bucketArray) { this(dm, partitionedRegion, time, rc, sender, onlyLocal, onlyRemote, forwardExceptions, function, bucketArray, null, - (x, y) -> FunctionStatsManager.getFunctionStats((String) x, (InternalDistributedSystem) y)); + FunctionStatsManager::getFunctionStats); } PartitionedRegionFunctionResultSender(DistributionManager dm, - PartitionedRegion partitionedRegion, long time, ResultCollector rc, + PartitionedRegion partitionedRegion, long time, ResultCollector rc, ServerToClientFunctionResultSender sender, boolean onlyLocal, boolean onlyRemote, - boolean forwardExceptions, Function function, int[] bucketArray, + boolean forwardExceptions, Function function, int[] bucketArray, PartitionedRegionFunctionStreamingMessage msg, - BiFunction functionStatsFunctionProvider) { + BiFunction functionStatsFunctionProvider) { this.dm = dm; pr = partitionedRegion; this.time = time; @@ -139,7 +141,7 @@ private void checkForBucketMovement(Object oneResult) { // this must be getting called directly from function @Override - public void lastResult(Object oneResult) { + public void lastResult(final OUT oneResult) { if (!function.hasResult()) { throw new IllegalStateException( String.format("Cannot %s result as the Function#hasResult() is false", @@ -174,16 +176,14 @@ public void lastResult(Object oneResult) { try { if (bme != null) { msg.sendReplyForOneResult(dm, pr, time, oneResult, false, - enableOrderedResultStreming); + enableOrderedResultStreaming); throw bme; } else { msg.sendReplyForOneResult(dm, pr, time, oneResult, true, - enableOrderedResultStreming); + enableOrderedResultStreaming); } - } catch (ForceReattemptException e) { - throw new FunctionException(e); - } catch (InterruptedException e) { + } catch (ForceReattemptException | InterruptedException e) { throw new FunctionException(e); } } else { @@ -194,7 +194,7 @@ public void lastResult(Object oneResult) { checkForBucketMovement(oneResult); if (bme != null) { rc.addResult(dm.getDistributionManagerId(), oneResult); - rc.addResult(dm.getDistributionManagerId(), bme); + rc.addResult(dm.getDistributionManagerId(), uncheckedCast(bme)); } else { rc.addResult(dm.getDistributionManagerId(), oneResult); } @@ -219,7 +219,7 @@ public void lastResult(Object oneResult) { } } - private synchronized void lastResult(Object oneResult, ResultCollector collector, + private synchronized void lastResult(OUT oneResult, ResultCollector collector, boolean lastRemoteResult, boolean lastLocalResult, DistributedMember memberID) { @@ -264,7 +264,7 @@ private synchronized void lastResult(Object oneResult, ResultCollector collector checkForBucketMovement(oneResult); if (bme != null) { collector.addResult(memberID, oneResult); - collector.addResult(memberID, bme); + collector.addResult(memberID, uncheckedCast(bme)); } else { collector.addResult(memberID, oneResult); } @@ -277,7 +277,7 @@ private synchronized void lastResult(Object oneResult, ResultCollector collector checkForBucketMovement(oneResult); if (bme != null) { collector.addResult(memberID, oneResult); - collector.addResult(memberID, bme); + collector.addResult(memberID, uncheckedCast(bme)); } else { collector.addResult(memberID, oneResult); } @@ -291,8 +291,8 @@ private synchronized void lastResult(Object oneResult, ResultCollector collector } } - public synchronized void lastResult(Object oneResult, boolean completelyDoneFromRemote, - ResultCollector reply, DistributedMember memberID) { + public synchronized void lastResult(OUT oneResult, boolean completelyDoneFromRemote, + ResultCollector reply, DistributedMember memberID) { logger.debug("PartitionedRegionFunctionResultSender Sending lastResult {}", oneResult); if (serverSender != null) { // Client-Server @@ -336,7 +336,7 @@ public synchronized void lastResult(Object oneResult, boolean completelyDoneFrom } @Override - public void sendResult(Object oneResult) { + public void sendResult(final OUT oneResult) { if (!function.hasResult()) { throw new IllegalStateException( String.format("Cannot %s result as the Function#hasResult() is false", @@ -353,10 +353,8 @@ public void sendResult(Object oneResult) { logger.debug("PartitionedRegionFunctionResultSender sending result from remote node {}", oneResult); msg.sendReplyForOneResult(dm, pr, time, oneResult, false, - enableOrderedResultStreming); - } catch (ForceReattemptException e) { - throw new FunctionException(e); - } catch (InterruptedException e) { + enableOrderedResultStreaming); + } catch (ForceReattemptException | InterruptedException e) { throw new FunctionException(e); } } else { @@ -392,9 +390,8 @@ private void lastClientSend(DistributedMember memberID, Object lastResult) { } @Override - public void sendException(Throwable exception) { - InternalFunctionException iFunxtionException = new InternalFunctionException(exception); - lastResult(iFunxtionException); + public void sendException(final Throwable exception) { + lastResult(uncheckedCast(new InternalFunctionException(exception))); localLastResultReceived = true; } @@ -403,17 +400,24 @@ public void setException(Throwable exception) { if (serverSender != null) { serverSender.setException(exception); } else { - ((LocalResultCollector) rc).setException(exception); - logger.info("Unexpected exception during function execution on local node Partitioned Region", - exception); + ((LocalResultCollector) rc).setException(exception); + if (exception.getCause() instanceof InternalFunctionInvocationTargetException) { + logger.debug( + "Unexpected exception during function execution on local node Partitioned Region", + exception); + } else { + logger.info( + "Unexpected exception during function execution on local node Partitioned Region", + exception); + } } rc.endResults(); localLastResultReceived = true; } @Override - public void enableOrderedResultStreming(boolean enable) { - enableOrderedResultStreming = enable; + public void enableOrderedResultStreaming(boolean enable) { + enableOrderedResultStreaming = enable; } @Override diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultWaiter.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultWaiter.java index 13d08d88ac44..1a42462aa763 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultWaiter.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/PartitionedRegionFunctionResultWaiter.java @@ -40,14 +40,15 @@ */ public class PartitionedRegionFunctionResultWaiter extends StreamingFunctionOperation { - private ResultCollector reply; + private ResultCollector reply; private final int regionId; private Set recipients = null; public PartitionedRegionFunctionResultWaiter(InternalDistributedSystem sys, int regionId, - ResultCollector rc, final Function function, PartitionedRegionFunctionResultSender sender) { + ResultCollector rc, final Function function, + PartitionedRegionFunctionResultSender sender) { super(sys, rc, function, sender); this.regionId = regionId; } @@ -64,17 +65,15 @@ public DistributionMessage createRequestMessage(Set s * Returns normally if succeeded to get data, otherwise throws an exception Have to wait outside * this function and when getResult() is called. For the time being get the correct results. */ - public ResultCollector getPartitionedDataFrom( + public ResultCollector getPartitionedDataFrom( Map recipMap, PartitionedRegion pr, - AbstractExecution execution) { + AbstractExecution execution) { if (recipMap.isEmpty()) { return rc; } - Set recipientsSet = new HashSet<>(); - for (InternalDistributedMember member : recipMap.keySet()) { - recipientsSet.add(member); - } + + Set recipientsSet = new HashSet<>(recipMap.keySet()); recipients = recipientsSet; PRFunctionStreamingResultCollector processor = new PRFunctionStreamingResultCollector(this, @@ -93,10 +92,7 @@ public ResultCollector getPartitionedDataFrom( protected PartitionMessage createRequestMessage(InternalDistributedMember recipient, ReplyProcessor21 processor, FunctionRemoteContext context) { - PartitionedRegionFunctionStreamingMessage msg = - new PartitionedRegionFunctionStreamingMessage(recipient, regionId, processor, context); - - return msg; + return new PartitionedRegionFunctionStreamingMessage(recipient, regionId, processor, context); } /** diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/RegionFunctionContextImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/RegionFunctionContextImpl.java index 30fb196cb8d4..05f4b6049176 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/RegionFunctionContextImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/RegionFunctionContextImpl.java @@ -30,7 +30,7 @@ /** * Context available to data dependent functions. When function is executed using - * {@link FunctionService#onRegion(Region)}, the FunctionContext can be type casted to + * {@link FunctionService#onRegion(Region)}, the FunctionContext can be type cast to * RegionFunctionContext. Methods provided to retrieve the Region and filter passed to the function * execution * @@ -39,14 +39,14 @@ * * @see FunctionContextImpl */ -public class RegionFunctionContextImpl extends FunctionContextImpl - implements InternalRegionFunctionContext { +public class RegionFunctionContextImpl extends FunctionContextImpl + implements InternalRegionFunctionContext { - private final Region dataSet; + private final Region dataSet; private final Set filter; - private final Map colocatedLocalDataMap; + private final Map> colocatedLocalDataMap; private final int[] localBucketArray; @@ -55,20 +55,20 @@ public class RegionFunctionContextImpl extends FunctionContextImpl private final Object principal; public RegionFunctionContextImpl(final Cache cache, final String functionId, - final Region dataSet, final Object args, final Set routingObjects, - final Map colocatedLocalDataMap, int[] localBucketArray, + final Region dataSet, final T args, final Set filter, + final Map> colocatedLocalDataMap, int[] localBucketArray, ResultSender resultSender, boolean isPossibleDuplicate) { - this(cache, functionId, dataSet, args, routingObjects, colocatedLocalDataMap, localBucketArray, + this(cache, functionId, dataSet, args, filter, colocatedLocalDataMap, localBucketArray, resultSender, isPossibleDuplicate, null); } public RegionFunctionContextImpl(final Cache cache, final String functionId, - final Region dataSet, final Object args, final Set routingObjects, - final Map colocatedLocalDataMap, int[] localBucketArray, + final Region dataSet, final T args, final Set filter, + final Map> colocatedLocalDataMap, int[] localBucketArray, ResultSender resultSender, boolean isPossibleDuplicate, Object principal) { super(cache, functionId, args, resultSender); this.dataSet = dataSet; - filter = routingObjects; + this.filter = filter; this.colocatedLocalDataMap = colocatedLocalDataMap; this.localBucketArray = localBucketArray; this.isPossibleDuplicate = isPossibleDuplicate; @@ -83,7 +83,7 @@ public RegionFunctionContextImpl(final Cache cache, final String functionId, private void setFunctionContexts() { if (colocatedLocalDataMap != null) { - for (LocalDataSet ls : colocatedLocalDataMap.values()) { + for (LocalDataSet ls : colocatedLocalDataMap.values()) { ls.setFunctionContext(this); } } @@ -96,9 +96,10 @@ private void setFunctionContexts() { * * @return Returns the Region on which function is executed */ + @SuppressWarnings("unchecked") @Override public Region getDataSet() { - return dataSet; + return (Region) dataSet; } /** @@ -117,31 +118,27 @@ public Set getFilter() { @Override public String toString() { return "[RegionFunctionContextImpl:" - + "dataSet=" - + dataSet - + ";filter=" - + filter - + ";args=" - + getArguments() - + ";principal=" - + getPrincipal() + + "dataSet=" + dataSet + + ";filter=" + filter + + ";args=" + getArguments() + + ";principal=" + getPrincipal() + ']'; } + @SuppressWarnings("unchecked") @Override - public Region getLocalDataSet(Region r) { + public Region getLocalDataSet(Region region) { if (colocatedLocalDataMap != null) { - return colocatedLocalDataMap.get(r.getFullPath()); + return (Region) colocatedLocalDataMap.get(region.getFullPath()); } else { return null; } } @Override - public Map getColocatedLocalDataSets() { + public Map> getColocatedLocalDataSets() { if (colocatedLocalDataMap != null) { - HashMap ret = - new HashMap<>(colocatedLocalDataMap); + final HashMap> ret = new HashMap<>(colocatedLocalDataMap); ret.remove(dataSet.getFullPath()); return Collections.unmodifiableMap(ret); } else { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java index a937090784e3..74fd6734a9e4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerFunctionExecutor.java @@ -15,6 +15,8 @@ package org.apache.geode.internal.cache.execute; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; @@ -33,12 +35,13 @@ import org.apache.geode.cache.execute.FunctionException; import org.apache.geode.cache.execute.FunctionService; import org.apache.geode.cache.execute.ResultCollector; +import org.apache.geode.distributed.DistributedMember; import org.apache.geode.internal.cache.TXManagerImpl; import org.apache.geode.internal.cache.execute.metrics.FunctionStats; import org.apache.geode.internal.cache.execute.metrics.FunctionStatsManager; import org.apache.geode.internal.cache.execute.util.SynchronizedResultCollector; -public class ServerFunctionExecutor extends AbstractExecution { +public class ServerFunctionExecutor extends AbstractExecution { private PoolImpl pool; @@ -60,7 +63,7 @@ public class ServerFunctionExecutor extends AbstractExecution { this.groups = groups; } - private ServerFunctionExecutor(ServerFunctionExecutor sfe) { + private ServerFunctionExecutor(ServerFunctionExecutor sfe) { super(sfe); if (sfe.pool != null) { pool = sfe.pool; @@ -69,23 +72,26 @@ private ServerFunctionExecutor(ServerFunctionExecutor sfe) { groups = sfe.groups; } - private ServerFunctionExecutor(ServerFunctionExecutor sfe, Object args) { + private ServerFunctionExecutor(ServerFunctionExecutor sfe, IN args) { this(sfe); this.args = args; } - private ServerFunctionExecutor(ServerFunctionExecutor sfe, ResultCollector collector) { + private ServerFunctionExecutor(ServerFunctionExecutor sfe, + ResultCollector collector) { this(sfe); - rc = collector != null ? new SynchronizedResultCollector(collector) : null; + rc = collector != null ? new SynchronizedResultCollector<>(collector) : null; } - private ServerFunctionExecutor(ServerFunctionExecutor sfe, MemberMappedArgument argument) { + private ServerFunctionExecutor(ServerFunctionExecutor sfe, + MemberMappedArgument argument) { this(sfe); memberMappedArg = argument; isMemberMappedArgument = true; } - protected ResultCollector executeFunction(final String functionId, boolean result, boolean isHA, + protected ResultCollector executeFunction(final String functionId, boolean result, + boolean isHA, boolean optimizeForWrite, long timeout, TimeUnit unit) { try { if (proxyCache != null) { @@ -100,7 +106,8 @@ protected ResultCollector executeFunction(final String functionId, boolean resul if (result) { hasResult = 1; if (rc == null) { - ResultCollector defaultCollector = new DefaultResultCollector(); + ResultCollector defaultCollector = + uncheckedCast(new DefaultResultCollector<>()); return executeOnServer(functionId, defaultCollector, hasResult, isHA, optimizeForWrite, timeoutMs); } else { @@ -108,7 +115,7 @@ protected ResultCollector executeFunction(final String functionId, boolean resul } } else { executeOnServerNoAck(functionId, hasResult, isHA, optimizeForWrite); - return new NoResult(); + return new NoResult<>(); } } finally { UserAttributes.userAttributes.set(null); @@ -116,7 +123,8 @@ protected ResultCollector executeFunction(final String functionId, boolean resul } @Override - protected ResultCollector executeFunction(final Function function, long timeout, TimeUnit unit) { + protected ResultCollector executeFunction(final Function function, long timeout, + TimeUnit unit) { byte hasResult = 0; try { if (proxyCache != null) { @@ -127,18 +135,18 @@ protected ResultCollector executeFunction(final Function function, long timeout, } if (function.hasResult()) { - hasResult = 1; final int timeoutMs = TimeoutHelper.toMillis(timeout, unit); if (rc == null) { - ResultCollector defaultCollector = new DefaultResultCollector(); - return executeOnServer(function, defaultCollector, hasResult, timeoutMs); + ResultCollector defaultCollector = + uncheckedCast(new DefaultResultCollector<>()); + return executeOnServer(function, defaultCollector, timeoutMs); } else { - return executeOnServer(function, rc, hasResult, timeoutMs); + return executeOnServer(function, rc, timeoutMs); } } else { executeOnServerNoAck(function, hasResult); - return new NoResult(); + return new NoResult<>(); } } finally { UserAttributes.userAttributes.set(null); @@ -146,7 +154,8 @@ protected ResultCollector executeFunction(final Function function, long timeout, } - private ResultCollector executeOnServer(Function function, ResultCollector rc, byte hasResult, + private ResultCollector executeOnServer(Function function, + ResultCollector rc, int timeoutMs) { FunctionStats stats = FunctionStatsManager.getFunctionStats(function.getId()); long start = stats.startFunctionExecution(true); @@ -155,19 +164,19 @@ private ResultCollector executeOnServer(Function function, ResultCollector rc, b final ExecuteFunctionOpImpl executeFunctionOp = new ExecuteFunctionOpImpl(function, args, memberMappedArg, - rc, isFnSerializationReqd, (byte) 0, groups, allServers, isIgnoreDepartedMembers(), + rc, isFunctionSerializationRequired, (byte) 0, groups, allServers, + isIgnoreDepartedMembers(), timeoutMs); final Supplier executeFunctionOpSupplier = () -> new ExecuteFunctionOpImpl(function, args, memberMappedArg, - rc, isFnSerializationReqd, (byte) 0, + rc, isFunctionSerializationRequired, (byte) 0, null/* onGroups does not use single-hop for now */, false, false, timeoutMs); final Supplier reExecuteFunctionOpSupplier = - () -> new ExecuteFunctionOpImpl(function, getArguments(), - getMemberMappedArgument(), rc, - isFnSerializationReqd, (byte) 1, groups, allServers, + () -> new ExecuteFunctionOpImpl(function, getArguments(), getMemberMappedArgument(), rc, + isFunctionSerializationRequired, (byte) 1, groups, allServers, isIgnoreDepartedMembers(), timeoutMs); ExecuteFunctionOp.execute(pool, allServers, @@ -190,7 +199,8 @@ private ResultCollector executeOnServer(Function function, ResultCollector rc, b } } - private ResultCollector executeOnServer(String functionId, ResultCollector rc, byte hasResult, + private ResultCollector executeOnServer(String functionId, ResultCollector rc, + byte hasResult, boolean isHA, boolean optimizeForWrite, int timeoutMs) { FunctionStats stats = FunctionStatsManager.getFunctionStats(functionId); long start = stats.startFunctionExecution(true); @@ -199,19 +209,20 @@ private ResultCollector executeOnServer(String functionId, ResultCollector rc, b final ExecuteFunctionOpImpl executeFunctionOp = new ExecuteFunctionOpImpl(functionId, args, memberMappedArg, hasResult, - rc, isFnSerializationReqd, isHA, optimizeForWrite, (byte) 0, groups, allServers, + rc, isFunctionSerializationRequired, isHA, optimizeForWrite, (byte) 0, groups, + allServers, isIgnoreDepartedMembers(), timeoutMs); final Supplier executeFunctionOpSupplier = () -> new ExecuteFunctionOpImpl(functionId, args, memberMappedArg, hasResult, - rc, isFnSerializationReqd, isHA, optimizeForWrite, (byte) 0, + rc, isFunctionSerializationRequired, isHA, optimizeForWrite, (byte) 0, null/* onGroups does not use single-hop for now */, false, false, timeoutMs); final Supplier reExecuteFunctionOpSupplier = () -> new ExecuteFunctionOpImpl(functionId, args, getMemberMappedArgument(), - hasResult, rc, isFnSerializationReqd, isHA, optimizeForWrite, (byte) 1, + hasResult, rc, isFunctionSerializationRequired, isHA, optimizeForWrite, (byte) 1, groups, allServers, isIgnoreDepartedMembers(), timeoutMs); ExecuteFunctionOp.execute(pool, allServers, @@ -235,13 +246,13 @@ private ResultCollector executeOnServer(String functionId, ResultCollector rc, b } } - private void executeOnServerNoAck(Function function, byte hasResult) { + private void executeOnServerNoAck(Function function, byte hasResult) { FunctionStats stats = FunctionStatsManager.getFunctionStats(function.getId()); long start = stats.startFunctionExecution(false); try { validateExecution(function, null); ExecuteFunctionNoAckOp.execute(pool, function, args, memberMappedArg, allServers, - hasResult, isFnSerializationReqd, groups); + hasResult, isFunctionSerializationRequired, groups); stats.endFunctionExecution(start, false); } catch (FunctionException functionException) { stats.endFunctionExecutionWithException(start, false); @@ -261,7 +272,7 @@ private void executeOnServerNoAck(String functionId, byte hasResult, boolean isH try { validateExecution(null, null); ExecuteFunctionNoAckOp.execute(pool, functionId, args, memberMappedArg, allServers, - hasResult, isFnSerializationReqd, isHA, optimizeForWrite, groups); + hasResult, isFunctionSerializationRequired, isHA, optimizeForWrite, groups); stats.endFunctionExecution(start, false); } catch (FunctionException functionException) { stats.endFunctionExecutionWithException(start, false); @@ -279,69 +290,70 @@ public Pool getPool() { } @Override - public Execution withFilter(Set filter) { + public Execution withFilter(Set filter) { throw new FunctionException( String.format("Cannot specify %s for data independent functions", "filter")); } @Override - public InternalExecution withBucketFilter(Set bucketIDs) { + public InternalExecution withBucketFilter(Set bucketIDs) { throw new FunctionException( String.format("Cannot specify %s for data independent functions", "buckets as filter")); } @Override - public Execution setArguments(Object args) { + public Execution setArguments(IN args) { if (args == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "args")); } - return new ServerFunctionExecutor(this, args); + return new ServerFunctionExecutor<>(this, args); } @Override - public Execution withArgs(Object args) { + public Execution withArgs(IN args) { return setArguments(args); } @Override - public Execution withCollector(ResultCollector rs) { + public Execution withCollector(ResultCollector rs) { if (rs == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "Result Collector")); } - return new ServerFunctionExecutor(this, rs); + return new ServerFunctionExecutor<>(this, rs); } @Override - public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { + public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { if (argument == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "MemberMapped Args")); } - return new ServerFunctionExecutor(this, argument); + return new ServerFunctionExecutor<>(this, argument); } @Override - public void validateExecution(Function function, Set targetMembers) { + public void validateExecution(final Function function, + final Set targetMembers) { if (TXManagerImpl.getCurrentTXUniqueId() != TXManagerImpl.NOTX) { throw new UnsupportedOperationException(); } } @Override - public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { + public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { if (functionName == null) { throw new FunctionException( "The input function for the execute function request is null"); } - isFnSerializationReqd = false; - Function functionObject = FunctionService.getFunction(functionName); + isFunctionSerializationRequired = false; + Function functionObject = uncheckedCast(FunctionService.getFunction(functionName)); if (functionObject == null) { byte[] functionAttributes = getFunctionAttributes(functionName); if (functionAttributes == null) { @@ -373,7 +385,7 @@ public ResultCollector execute(final String functionName, long timeout, TimeUnit } @Override - public ResultCollector execute(final String functionName) { + public ResultCollector execute(final String functionName) { return execute(functionName, getTimeoutMs(), TimeUnit.MILLISECONDS); } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java index 45f5f3d00baa..95457f563d16 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerRegionFunctionExecutor.java @@ -15,6 +15,8 @@ package org.apache.geode.internal.cache.execute; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Set; import java.util.concurrent.TimeUnit; @@ -29,6 +31,7 @@ import org.apache.geode.cache.execute.FunctionException; import org.apache.geode.cache.execute.FunctionService; import org.apache.geode.cache.execute.ResultCollector; +import org.apache.geode.distributed.DistributedMember; import org.apache.geode.internal.cache.GemFireCacheImpl; import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.LocalRegion; @@ -39,18 +42,18 @@ import org.apache.geode.logging.internal.log4j.api.LogService; /** - * Executes Function with FunctionService#onRegion(Region region) in client server mode. + * Executes Function with {@link FunctionService#onRegion(Region)} in client server mode. * * @see FunctionService#onRegion(Region) * * @since GemFire 5.8 LA */ -public class ServerRegionFunctionExecutor extends AbstractExecution { +public class ServerRegionFunctionExecutor extends AbstractExecution { private static final Logger logger = LogService.getLogger(); private final LocalRegion region; private boolean executeOnBucketSet = false; - ServerRegionFunctionExecutor(Region r, ProxyCache proxyCache) { + ServerRegionFunctionExecutor(Region r, ProxyCache proxyCache) { if (r == null) { throw new IllegalArgumentException( String.format("The input %s for the execute function request is null", @@ -60,8 +63,9 @@ public class ServerRegionFunctionExecutor extends AbstractExecution { this.proxyCache = proxyCache; } - private ServerRegionFunctionExecutor(ServerRegionFunctionExecutor serverRegionFunctionExecutor, - Object args) { + private ServerRegionFunctionExecutor( + ServerRegionFunctionExecutor serverRegionFunctionExecutor, + IN args) { super(serverRegionFunctionExecutor); region = serverRegionFunctionExecutor.region; @@ -71,41 +75,45 @@ private ServerRegionFunctionExecutor(ServerRegionFunctionExecutor serverRegionFu executeOnBucketSet = serverRegionFunctionExecutor.executeOnBucketSet; } - private ServerRegionFunctionExecutor(ServerRegionFunctionExecutor serverRegionFunctionExecutor, - MemberMappedArgument memberMapargs) { + private ServerRegionFunctionExecutor( + ServerRegionFunctionExecutor serverRegionFunctionExecutor, + MemberMappedArgument memberMappedArgument) { super(serverRegionFunctionExecutor); region = serverRegionFunctionExecutor.region; filter.clear(); filter.addAll(serverRegionFunctionExecutor.filter); - memberMappedArg = memberMapargs; + memberMappedArg = memberMappedArgument; executeOnBucketSet = serverRegionFunctionExecutor.executeOnBucketSet; } - private ServerRegionFunctionExecutor(ServerRegionFunctionExecutor serverRegionFunctionExecutor, - ResultCollector rc) { + private ServerRegionFunctionExecutor( + ServerRegionFunctionExecutor serverRegionFunctionExecutor, + ResultCollector rc) { super(serverRegionFunctionExecutor); region = serverRegionFunctionExecutor.region; filter.clear(); filter.addAll(serverRegionFunctionExecutor.filter); - this.rc = rc != null ? new SynchronizedResultCollector(rc) : null; + this.rc = rc != null ? new SynchronizedResultCollector<>(rc) : null; executeOnBucketSet = serverRegionFunctionExecutor.executeOnBucketSet; } - private ServerRegionFunctionExecutor(ServerRegionFunctionExecutor serverRegionFunctionExecutor, - Set filter2) { + private ServerRegionFunctionExecutor( + ServerRegionFunctionExecutor serverRegionFunctionExecutor, + Set filter2) { super(serverRegionFunctionExecutor); region = serverRegionFunctionExecutor.region; filter.clear(); - filter.addAll(filter2); + filter.addAll(uncheckedCast(filter2)); executeOnBucketSet = serverRegionFunctionExecutor.executeOnBucketSet; } - private ServerRegionFunctionExecutor(ServerRegionFunctionExecutor serverRegionFunctionExecutor, + private ServerRegionFunctionExecutor( + ServerRegionFunctionExecutor serverRegionFunctionExecutor, Set bucketsAsFilter, boolean executeOnBucketSet) { super(serverRegionFunctionExecutor); @@ -117,28 +125,29 @@ private ServerRegionFunctionExecutor(ServerRegionFunctionExecutor serverRegionFu } @Override - public Execution withFilter(Set fltr) { - if (fltr == null) { + public Execution withFilter(Set filter) { + if (filter == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "filter")); } executeOnBucketSet = false; - return new ServerRegionFunctionExecutor(this, fltr); + return new ServerRegionFunctionExecutor<>(this, filter); } @Override - public InternalExecution withBucketFilter(Set bucketIDs) { + public InternalExecution withBucketFilter(Set bucketIDs) { if (bucketIDs == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "buckets as filter")); } - return new ServerRegionFunctionExecutor(this, bucketIDs, true /* execute on bucketset */); + return new ServerRegionFunctionExecutor<>(this, bucketIDs, true /* execute on bucketset */); } @Override - protected ResultCollector executeFunction(final Function function, long timeout, TimeUnit unit) { + protected ResultCollector executeFunction(final Function function, long timeout, + TimeUnit unit) { byte hasResult = 0; try { if (proxyCache != null) { @@ -152,21 +161,22 @@ protected ResultCollector executeFunction(final Function function, long timeout, final int timeoutMs = TimeoutHelper.toMillis(timeout, unit); hasResult = 1; if (rc == null) { // Default Result Collector - ResultCollector defaultCollector = new DefaultResultCollector(); + ResultCollector defaultCollector = + uncheckedCast(new DefaultResultCollector<>()); return executeOnServer(function, defaultCollector, hasResult, timeoutMs); - } else { // Custome Result COllector + } else { // Custom Result Collector return executeOnServer(function, rc, hasResult, timeoutMs); } } else { // No results executeOnServerNoAck(function, hasResult); - return new NoResult(); + return new NoResult<>(); } } finally { UserAttributes.userAttributes.set(null); } } - protected ResultCollector executeFunction(final String functionId, boolean resultReq, + protected ResultCollector executeFunction(final String functionId, boolean resultReq, boolean isHA, boolean optimizeForWrite, long timeout, TimeUnit unit) { try { if (proxyCache != null) { @@ -180,22 +190,24 @@ protected ResultCollector executeFunction(final String functionId, boolean resul hasResult = 1; final int timeoutMs = TimeoutHelper.toMillis(timeout, unit); if (rc == null) { // Default Result Collector - ResultCollector defaultCollector = new DefaultResultCollector(); + ResultCollector defaultCollector = + uncheckedCast(new DefaultResultCollector<>()); return executeOnServer(functionId, defaultCollector, hasResult, isHA, optimizeForWrite, timeoutMs); - } else { // Custome Result COllector + } else { // Custom Result Collector return executeOnServer(functionId, rc, hasResult, isHA, optimizeForWrite, timeoutMs); } } else { // No results executeOnServerNoAck(functionId, hasResult, isHA, optimizeForWrite); - return new NoResult(); + return new NoResult<>(); } } finally { UserAttributes.userAttributes.set(null); } } - private ResultCollector executeOnServer(Function function, ResultCollector collector, + private ResultCollector executeOnServer(Function function, + ResultCollector collector, byte hasResult, int timeoutMs) throws FunctionException { ServerRegionProxy srp = getServerRegionProxy(); FunctionStats stats = @@ -216,7 +228,8 @@ private ResultCollector executeOnServer(Function function, ResultCollector colle } } - private ResultCollector executeOnServer(String functionId, ResultCollector collector, + private ResultCollector executeOnServer(String functionId, + ResultCollector collector, byte hasResult, boolean isHA, boolean optimizeForWrite, int timeoutMs) throws FunctionException { @@ -239,7 +252,8 @@ private ResultCollector executeOnServer(String functionId, ResultCollector colle } - private void executeOnServerNoAck(Function function, byte hasResult) throws FunctionException { + private void executeOnServerNoAck(Function function, byte hasResult) + throws FunctionException { ServerRegionProxy srp = getServerRegionProxy(); FunctionStats stats = FunctionStatsManager.getFunctionStats(function.getId(), region.getSystem()); @@ -277,18 +291,19 @@ private void executeOnServerNoAck(String functionId, byte hasResult, boolean isH } private ServerRegionProxy getServerRegionProxy() throws FunctionException { - ServerRegionProxy srp = region.getServerProxy(); - if (srp != null) { - if (logger.isDebugEnabled()) { - logger.debug("Found server region proxy on region. RegionName: {}", region.getName()); - } - return srp; - } else { - String message = srp + ": " - + "No available connection was found. Server Region Proxy is not available for this region " - + region.getName(); - throw new FunctionException(message); + final ServerRegionProxy srp = region.getServerProxy(); + + if (srp == null) { + throw new FunctionException( + "No available connection was found. Server Region Proxy is not available for this region " + + region.getName()); + } + + if (logger.isDebugEnabled()) { + logger.debug("Found server region proxy on region. RegionName: {}", region.getName()); } + + return srp; } public LocalRegion getRegion() { @@ -302,42 +317,43 @@ public String toString() { } @Override - public Execution setArguments(Object args) { + public Execution setArguments(IN args) { if (args == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "args")); } - return new ServerRegionFunctionExecutor(this, args); + return new ServerRegionFunctionExecutor<>(this, args); } @Override - public Execution withArgs(Object args) { + public Execution withArgs(IN args) { return setArguments(args); } @Override - public Execution withCollector(ResultCollector rs) { + public Execution withCollector(ResultCollector rs) { if (rs == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "Result Collector")); } - return new ServerRegionFunctionExecutor(this, rs); + return new ServerRegionFunctionExecutor<>(this, rs); } @Override - public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { + public InternalExecution withMemberMappedArgument(MemberMappedArgument argument) { if (argument == null) { throw new FunctionException( String.format("The input %s for the execute function request is null", "MemberMappedArgument")); } - return new ServerRegionFunctionExecutor(this, argument); + return new ServerRegionFunctionExecutor<>(this, argument); } @Override - public void validateExecution(Function function, Set targetMembers) { + public void validateExecution(final Function function, + final Set targetMembers) { InternalCache cache = GemFireCacheImpl.getInstance(); if (cache != null && cache.getTxManager().getTXState() != null) { TXStateProxyImpl tx = (TXStateProxyImpl) cache.getTxManager().getTXState(); @@ -347,24 +363,23 @@ public void validateExecution(Function function, Set targetMembers) { } @Override - public ResultCollector execute(final String functionName) { + public ResultCollector execute(final String functionName) { return execute(functionName, getTimeoutMs(), TimeUnit.MILLISECONDS); } @Override - public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { + public ResultCollector execute(final String functionName, long timeout, TimeUnit unit) { if (functionName == null) { throw new FunctionException( "The input function for the execute function request is null"); } - int timeoutInMs = (int) TimeUnit.MILLISECONDS.convert(timeout, unit); - isFnSerializationReqd = false; - Function functionObject = FunctionService.getFunction(functionName); + isFunctionSerializationRequired = false; + Function functionObject = uncheckedCast(FunctionService.getFunction(functionName)); if (functionObject == null) { byte[] functionAttributes = getFunctionAttributes(functionName); if (functionAttributes == null) { - // GEODE-5618: Set authentication properties before executing the internal function. + // Set authentication properties before executing the internal function. try { if (proxyCache != null) { if (proxyCache.isClosed()) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerToClientFunctionResultSender.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerToClientFunctionResultSender.java index d471b2a8560a..9e5900f73d82 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerToClientFunctionResultSender.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/ServerToClientFunctionResultSender.java @@ -310,9 +310,8 @@ public synchronized void setException(Throwable exception) { if (lastResultReceived) { return; } - if (logger.isDebugEnabled()) { - logger.debug("ServerToClientFunctionResultSender setting exception:", exception); - } + + logger.debug("ServerToClientFunctionResultSender setting exception:", exception); synchronized (msg) { if (!sc.getTransientFlag(Command.RESPONDED)) { alreadySendException.set(true); @@ -322,12 +321,12 @@ public synchronized void setException(Throwable exception) { } String exceptionMessage = exception.getMessage() != null ? exception.getMessage() : "Exception occurred during function execution"; - logger.warn(String.format("Exception on server while executing function : %s", - fn), - exception); - if (logger.isDebugEnabled()) { - logger.debug("ServerToClientFunctionResultSender sending Function Exception : "); + if (exception.getCause() instanceof InternalFunctionInvocationTargetException) { + logger.debug("Exception on server while executing function: {}", fn, exception); + } else { + logger.warn("Exception on server while executing function: {}", fn, exception); } + logger.debug("ServerToClientFunctionResultSender sending Function Exception : "); writeFunctionExceptionResponse(msg, exceptionMessage, exception); lastResultReceived = true; } catch (IOException ignored) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/StreamingFunctionOperation.java b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/StreamingFunctionOperation.java index 6b61440a3793..24503dd03371 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/execute/StreamingFunctionOperation.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/execute/StreamingFunctionOperation.java @@ -86,8 +86,9 @@ public void processData(Object result, boolean lastMsg, DistributedMember member } } - public ResultCollector getFunctionResultFrom(Set recipients, Function function, - AbstractExecution execution) { + public ResultCollector getFunctionResultFrom(Set recipients, + Function function, + AbstractExecution execution) { if (recipients.isEmpty()) { return rc; } @@ -100,10 +101,10 @@ public ResultCollector getFunctionResultFrom(Set recipients, Function function, if (execution instanceof DistributedRegionFunctionExecutor || execution instanceof MultiRegionFunctionExecutor) { m = createRequestMessage(Collections.singleton(recip), processor, execution.isReExecute(), - execution.isFnSerializationReqd()); + execution.isFunctionSerializationRequired()); } else { m = createRequestMessage(Collections.singleton(recip), processor, false, - execution.isFnSerializationReqd()); + execution.isFunctionSerializationRequired()); } sys.getDistributionManager().putOutgoing(m); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java index 1915384bde1a..b0cca3557029 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/HARegionQueue.java @@ -80,7 +80,6 @@ import org.apache.geode.cache.query.internal.cq.InternalCqQuery; import org.apache.geode.cache.server.CacheServer; import org.apache.geode.cache.util.CacheListenerAdapter; -import org.apache.geode.distributed.DistributedMember; import org.apache.geode.distributed.internal.DistributionManager; import org.apache.geode.distributed.internal.InternalDistributedSystem; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; @@ -2777,7 +2776,7 @@ public void run() { QueueRemovalMessage qrm = new QueueRemovalMessage(); qrm.resetRecipients(); List servers = cache.getCacheServers(); - List recipients = new LinkedList(); + List recipients = new LinkedList<>(); for (CacheServer server : servers) { recipients.addAll(((CacheServerImpl) server).getCacheServerAdvisor() .adviseBridgeServers()); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/ThreadIdentifier.java b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/ThreadIdentifier.java index 561f8e958c18..8293408b2d1b 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/ha/ThreadIdentifier.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/ha/ThreadIdentifier.java @@ -27,7 +27,7 @@ /** * Class identifying a Thread uniquely across the distributed system. It is composed of two fields - * 1) A byte array uniquely identifying the distributed system 2) A long value unqiuely identifying + * 1) A byte array uniquely identifying the distributed system 2) A long value uniquely identifying * the thread in the distributed system * * The application thread while operating on the Region gets an EventID object ( contained in @@ -147,7 +147,7 @@ public ThreadIdentifier(final byte[] mid, long threadId) { @Override public boolean equals(Object obj) { - if ((obj == null) || !(obj instanceof ThreadIdentifier)) { + if (!(obj instanceof ThreadIdentifier)) { return false; } ThreadIdentifier other = (ThreadIdentifier) obj; @@ -189,7 +189,6 @@ public static String toDisplayString(long tid) { @Override public String toString() { - return "ThreadId[" + "id=" + membershipID.length + "bytes; " + toDisplayString(threadID) diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/AllBucketProfilesUpdateMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/AllBucketProfilesUpdateMessage.java index 4f45693bafa1..42945daf3081 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/AllBucketProfilesUpdateMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/AllBucketProfilesUpdateMessage.java @@ -33,6 +33,7 @@ import org.apache.geode.distributed.internal.OperationExecutors; import org.apache.geode.distributed.internal.ReplyMessage; import org.apache.geode.distributed.internal.ReplyProcessor21; +import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.cache.BucketAdvisor; import org.apache.geode.internal.cache.PartitionedRegion; import org.apache.geode.internal.serialization.DeserializationContext; @@ -49,8 +50,7 @@ public class AllBucketProfilesUpdateMessage extends DistributionMessage implements MessageWithReply { private static final Logger logger = LogService.getLogger(); - private static final long serialVersionUID = 1L; - private int prId; + private int partitionedRegionId; private int processorId = 0; private Map profiles; @@ -61,11 +61,12 @@ public int getProcessorType() { return OperationExecutors.WAITING_POOL_EXECUTOR; } - private AllBucketProfilesUpdateMessage(Set recipients, int partitionedRegionId, int processorId, + private AllBucketProfilesUpdateMessage(Set recipients, + int partitionedRegionId, int processorId, Map profiles) { setRecipients(recipients); this.processorId = processorId; - prId = partitionedRegionId; + this.partitionedRegionId = partitionedRegionId; this.profiles = profiles; } @@ -77,7 +78,7 @@ public int getProcessorId() { @Override protected void process(ClusterDistributionManager dm) { try { - PartitionedRegion pr = PartitionedRegion.getPRFromId(prId); + PartitionedRegion pr = PartitionedRegion.getPRFromId(partitionedRegionId); for (Map.Entry profile : profiles.entrySet()) { pr.getRegionAdvisor().putBucketProfile(profile.getKey(), profile.getValue()); } @@ -122,15 +123,14 @@ protected void process(ClusterDistributionManager dm) { * @return an instance of reply processor if requireAck is true on which the caller can wait until * the event has finished. */ - public static ReplyProcessor21 send(Set recipients, DistributionManager dm, int prId, + public static ReplyProcessor21 send(Set recipients, + DistributionManager dm, int prId, Map profiles) { if (recipients.isEmpty()) { return null; } - ReplyProcessor21 rp = null; - int procId = 0; - rp = new ReplyProcessor21(dm, recipients); - procId = rp.getProcessorId(); + final ReplyProcessor21 rp = new ReplyProcessor21(dm, recipients); + final int procId = rp.getProcessorId(); AllBucketProfilesUpdateMessage m = new AllBucketProfilesUpdateMessage(recipients, prId, procId, profiles); dm.putOutgoing(m); @@ -146,7 +146,7 @@ public int getDSFID() { public void fromData(DataInput in, DeserializationContext context) throws IOException, ClassNotFoundException { super.fromData(in, context); - prId = in.readInt(); + partitionedRegionId = in.readInt(); processorId = in.readInt(); profiles = DataSerializer.readObject(in); } @@ -155,7 +155,7 @@ public void fromData(DataInput in, public void toData(DataOutput out, SerializationContext context) throws IOException { super.toData(out, context); - out.writeInt(prId); + out.writeInt(partitionedRegionId); out.writeInt(processorId); DataSerializer.writeObject(profiles, out); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java index d968dea73553..312d1d653bff 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BecomePrimaryBucketMessage.java @@ -86,14 +86,13 @@ public static BecomePrimaryBucketResponse send(InternalDistributedMember recipie Assert.assertTrue(recipient != null, "BecomePrimaryBucketMessage NULL recipient"); BecomePrimaryBucketResponse response = - new BecomePrimaryBucketResponse(pr.getSystem(), recipient, pr); + new BecomePrimaryBucketResponse(pr.getSystem(), recipient); BecomePrimaryBucketMessage msg = new BecomePrimaryBucketMessage(recipient, pr.getPRId(), response, bid, isRebalance); msg.setTransactionDistributed(pr.getCache().getTxManager().isDistributed()); Set failures = pr.getDistributionManager().putOutgoing(msg); if (failures != null && failures.size() > 0) { - // throw new ForceReattemptException("Failed sending <" + msg + ">"); return null; } pr.getPrStats().incPartitionMessagesSent(); @@ -261,7 +260,7 @@ public static class BecomePrimaryBucketResponse extends PartitionResponse { private volatile boolean success; public BecomePrimaryBucketResponse(InternalDistributedSystem ds, - InternalDistributedMember recipient, PartitionedRegion theRegion) { + InternalDistributedMember recipient) { super(ds, recipient); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java index 54b8d73c42e3..5fc53846cdf4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketBackupMessage.java @@ -24,6 +24,7 @@ import org.apache.geode.cache.CacheException; import org.apache.geode.distributed.internal.ClusterDistributionManager; import org.apache.geode.distributed.internal.OperationExecutors; +import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.Assert; import org.apache.geode.internal.cache.PartitionedRegion; import org.apache.geode.internal.cache.PartitionedRegionDataStore; @@ -51,7 +52,8 @@ public BucketBackupMessage() { super(); } - private BucketBackupMessage(Set recipients, int regionId, int bucketId) { + private BucketBackupMessage(Set recipients, int regionId, + int bucketId) { super(recipients, regionId, null /* no processor */); this.bucketId = bucketId; } @@ -62,7 +64,8 @@ private BucketBackupMessage(Set recipients, int regionId, int bucketId) { * @param recipients the member that the contains keys/value message is sent to * @param r the PartitionedRegion that contains the bucket */ - public static void send(Set recipients, PartitionedRegion r, int bucketId) { + public static void send(Set recipients, PartitionedRegion r, + int bucketId) { Assert.assertTrue(recipients != null, "BucketBackupMessage NULL sender list"); BucketBackupMessage m = new BucketBackupMessage(recipients, r.getPRId(), bucketId); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketCountLoadProbe.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketCountLoadProbe.java index 261e9c3ca323..3df4f068bff7 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketCountLoadProbe.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketCountLoadProbe.java @@ -27,11 +27,10 @@ import org.apache.geode.internal.serialization.SerializationContext; /** - * A load probe which calculates the load of a pr using the just the number of buckets on a member. + * A load probe which calculates the load of a pr using just the number of buckets on a member. * */ public class BucketCountLoadProbe implements LoadProbe, DataSerializableFixedID { - private static final long serialVersionUID = 7040814060882774875L; @Override public PRLoad getLoad(PartitionedRegion pr) { @@ -40,15 +39,13 @@ public PRLoad getLoad(PartitionedRegion pr) { PRLoad prLoad = new PRLoad(configuredBucketCount, pr.getLocalMaxMemory()); // key: bid, value: size - for (Integer bidInt : ds.getAllLocalBucketIds()) { - int bid = bidInt; - - BucketAdvisor bucketAdvisor = pr.getRegionAdvisor().getBucket(bid).getBucketAdvisor(); + for (Integer bucketId : ds.getAllLocalBucketIds()) { + BucketAdvisor bucketAdvisor = pr.getRegionAdvisor().getBucket(bucketId).getBucketAdvisor(); // Wait for a primary to exist for this bucket, because // it might be this member. bucketAdvisor.getPrimary(); - boolean isPrimary = pr.getRegionAdvisor().getBucket(bid).getBucketAdvisor().isPrimary(); - prLoad.addBucket(bid, 1, isPrimary ? 1 : 0); + boolean isPrimary = pr.getRegionAdvisor().getBucket(bucketId).getBucketAdvisor().isPrimary(); + prLoad.addBucket(bucketId, 1, isPrimary ? 1 : 0); } return prLoad; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketProfileUpdateMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketProfileUpdateMessage.java index 4c1f485867d4..55e0942424fa 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketProfileUpdateMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketProfileUpdateMessage.java @@ -32,6 +32,7 @@ import org.apache.geode.distributed.internal.OperationExecutors; import org.apache.geode.distributed.internal.ReplyMessage; import org.apache.geode.distributed.internal.ReplyProcessor21; +import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.cache.BucketAdvisor; import org.apache.geode.internal.cache.BucketAdvisor.BucketProfile; import org.apache.geode.internal.cache.PartitionedRegion; @@ -49,7 +50,7 @@ public class BucketProfileUpdateMessage extends DistributionMessage implements M private static final Logger logger = LogService.getLogger(); private static final long serialVersionUID = 1L; - private int prId; + private int partitionedRegionId; private int bucketId; private int processorId = 0; private BucketAdvisor.BucketProfile profile; @@ -61,11 +62,12 @@ public int getProcessorType() { return OperationExecutors.WAITING_POOL_EXECUTOR; } - private BucketProfileUpdateMessage(Set recipients, int partitionedRegionId, int processorId, + private BucketProfileUpdateMessage(Set recipients, + int partitionedRegionId, int processorId, int bucketId, BucketProfile profile) { setRecipients(recipients); this.processorId = processorId; - prId = partitionedRegionId; + this.partitionedRegionId = partitionedRegionId; this.bucketId = bucketId; this.profile = profile; } @@ -83,7 +85,7 @@ public boolean sendViaUDP() { @Override protected void process(ClusterDistributionManager dm) { try { - PartitionedRegion pr = PartitionedRegion.getPRFromId(prId); + PartitionedRegion pr = PartitionedRegion.getPRFromId(partitionedRegionId); // pr.waitOnBucketInitialization(); // While PR doesn't directly do GII, wait on this for // bucket initialization -- mthomas 5/17/2007 pr.getRegionAdvisor().putBucketProfile(bucketId, profile); @@ -130,7 +132,8 @@ protected void process(ClusterDistributionManager dm) { * @return an instance of reply processor if requireAck is true on which the caller can wait until * the event has finished. */ - public static ReplyProcessor21 send(Set recipients, DistributionManager dm, int prId, + public static ReplyProcessor21 send(Set recipients, + DistributionManager dm, int prId, int bucketId, BucketProfile bp, boolean requireAck) { if (recipients.isEmpty()) { return null; @@ -156,7 +159,7 @@ public int getDSFID() { public void fromData(DataInput in, DeserializationContext context) throws IOException, ClassNotFoundException { super.fromData(in, context); - prId = in.readInt(); + partitionedRegionId = in.readInt(); bucketId = in.readInt(); processorId = in.readInt(); profile = DataSerializer.readObject(in); @@ -166,7 +169,7 @@ public void fromData(DataInput in, public void toData(DataOutput out, SerializationContext context) throws IOException { super.toData(out, context); - out.writeInt(prId); + out.writeInt(partitionedRegionId); out.writeInt(bucketId); out.writeInt(processorId); DataSerializer.writeObject(profile, out); @@ -174,12 +177,8 @@ public void toData(DataOutput out, @Override public String toString() { - StringBuilder buff = new StringBuilder(); - String className = getClass().getName(); - String shortName = - className.substring(className.lastIndexOf('.', className.lastIndexOf('.') - 1) + 1); // partition. - return buff.append(shortName).append("(prid=").append(prId).append("; bucketid=") - .append(bucketId).append("; sender=").append(getSender()).append("]; processorId=") - .append(processorId).append("; profile=").append(profile).append(")").toString(); + return "BucketProfileUpdateMessage(partitionedRegionId=" + partitionedRegionId + "; bucketId=" + + bucketId + "; sender=" + getSender() + "]; processorId=" + processorId + "; profile=" + + profile + ")"; } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java index 726786433cc6..49fdca784b48 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/BucketSizeMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -55,7 +57,7 @@ public class BucketSizeMessage extends PartitionMessage { private int bucketId; /** - * Empty contstructor provided for {@link org.apache.geode.DataSerializer} + * Empty constructor provided for {@link org.apache.geode.DataSerializer} */ public BucketSizeMessage() { super(); @@ -87,10 +89,9 @@ public static BucketSizeResponse send(InternalDistributedMember recipient, Parti BucketSizeResponse p = new BucketSizeResponse(r.getSystem(), Collections.singleton(recipient)); BucketSizeMessage m = new BucketSizeMessage(recipient, r.getPRId(), p, bucketId); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { - throw new ForceReattemptException( - String.format("Failed sending < %s >", m)); + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { + throw new ForceReattemptException(String.format("Failed sending < %s >", m)); } return p; @@ -161,7 +162,8 @@ private BucketSizeReplyMessage(int processorId, long size) { /** Send an ack */ public static void send(InternalDistributedMember recipient, int processorId, DistributionManager dm, long size) { - Assert.assertTrue(recipient != null, "PRDistribuedGetReplyMessage NULL reply message"); + Assert.assertTrue(recipient != null, + "PRDistributedBucketSizeReplyMessage NULL reply message"); BucketSizeReplyMessage m = new BucketSizeReplyMessage(processorId, size); m.setRecipient(recipient); dm.putOutgoing(m); @@ -220,7 +222,7 @@ public void toData(DataOutput out, @Override public String toString() { - return "PRDistributedBucketSizeReplyMessage " + "processorid=" + return "PRDistributedBucketSizeReplyMessage " + "processorId=" + processorId + " reply to sender " + getSender() + " returning numEntries=" + getSize(); } @@ -238,7 +240,8 @@ public long getSize() { public static class BucketSizeResponse extends ReplyProcessor21 { private volatile long returnValue; - public BucketSizeResponse(InternalDistributedSystem ds, Set recipients) { + public BucketSizeResponse(InternalDistributedSystem ds, + Set recipients) { super(ds, recipients); } @@ -259,7 +262,7 @@ public void process(DistributionMessage msg) { } /** - * @return Set the keys associated with the bucketid of the {@link BucketSizeMessage} + * @return Set the keys associated with the bucketId of the {@link BucketSizeMessage} * @throws ForceReattemptException if the peer is no longer available */ public long waitForSize() throws ForceReattemptException { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java index 48df9bdd28af..3e90ed8d5642 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ContainsKeyValueMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -83,7 +85,7 @@ public ContainsKeyValueMessage(InternalDistributedMember recipient, int regionId public static ContainsKeyValueResponse send(InternalDistributedMember recipient, PartitionedRegion r, Object key, Integer bucketId, boolean valueCheck) throws ForceReattemptException { - Assert.assertTrue(recipient != null, "PRDistribuedContainsKeyValueMessage NULL reply message"); + Assert.assertTrue(recipient != null, "PRDistributedContainsKeyValueMessage NULL reply message"); ContainsKeyValueResponse p = new ContainsKeyValueResponse(r.getSystem(), Collections.singleton(recipient), key); @@ -91,10 +93,9 @@ public static ContainsKeyValueResponse send(InternalDistributedMember recipient, new ContainsKeyValueMessage(recipient, r.getPRId(), p, key, bucketId, valueCheck); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { - throw new ForceReattemptException( - String.format("Failed sending < %s >", m)); + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { + throw new ForceReattemptException(String.format("Failed sending < %s >", m)); } return p; } @@ -118,9 +119,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part replyVal = ds.containsKeyLocally(bucketId, key); } } catch (PRLocallyDestroyedException pde) { - throw new ForceReattemptException( - "Enountered PRLocallyDestroyedException", - pde); + throw new ForceReattemptException("Encountered PRLocallyDestroyedException", pde); } r.getPrStats().endPartitionMessagesProcessing(startTime); @@ -136,8 +135,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part throw fre; } - // Unless there was an exception thrown, this message handles sending the - // response + // Unless there was an exception thrown, this message handles sending the response return false; } @@ -240,7 +238,7 @@ public void toData(DataOutput out, @Override public String toString() { - return "ContainsKeyValueReplyMessage " + "processorid=" + processorId + return "ContainsKeyValueReplyMessage " + "processorId=" + processorId + " returning " + doesItContainKeyValue(); } @@ -260,7 +258,8 @@ public static class ContainsKeyValueResponse extends PartitionResponse { private volatile boolean returnValueReceived; final Object key; - public ContainsKeyValueResponse(InternalDistributedSystem ds, Set recipients, Object key) { + public ContainsKeyValueResponse(InternalDistributedSystem ds, + Set recipients, Object key) { super(ds, recipients, false); this.key = key; } @@ -283,7 +282,7 @@ public void process(DistributionMessage msg) { } /** - * @return Set the keys associated with the bucketid of the {@link ContainsKeyValueMessage} + * @return Set the keys associated with the bucketId of the {@link ContainsKeyValueMessage} * @throws ForceReattemptException if the peer is no longer available * @throws PrimaryBucketException if the instance of the bucket that received this operation was * not primary diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java index 9fcb5b24195e..194f86f71648 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/CreateBucketMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -109,8 +111,8 @@ public static NodeResponse send(InternalDistributedMember recipient, Partitioned p.enableSevereAlertProcessing(); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { throw new ForceReattemptException("Failed sending <" + m + ">"); } @@ -290,8 +292,7 @@ public void fromData(DataInput in, @Override public String toString() { - return "CreateBucketReplyMessage " + "processorid=" - + processorId; + return "CreateBucketReplyMessage " + "processorId=" + processorId; } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java index 65b153f2d1d2..8182b536186f 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DeposePrimaryBucketMessage.java @@ -77,14 +77,13 @@ public static DeposePrimaryBucketResponse send(InternalDistributedMember recipie Assert.assertTrue(recipient != null, "DeposePrimaryBucketMessage NULL recipient"); DeposePrimaryBucketResponse response = - new DeposePrimaryBucketResponse(region.getSystem(), recipient, region); + new DeposePrimaryBucketResponse(region.getSystem(), recipient); DeposePrimaryBucketMessage msg = new DeposePrimaryBucketMessage(recipient, region.getPRId(), response, bucketId); msg.setTransactionDistributed(region.getCache().getTxManager().isDistributed()); Set failures = region.getDistributionManager().putOutgoing(msg); if (failures != null && failures.size() > 0) { - // throw new ForceReattemptException("Failed sending <" + msg + ">"); return null; } region.getPrStats().incPartitionMessagesSent(); @@ -213,7 +212,7 @@ public void fromData(DataInput in, @Override public String toString() { - return "DeposePrimaryBucketReplyMessage " + "processorid=" + processorId + return "DeposePrimaryBucketReplyMessage " + "processorId=" + processorId + " reply to sender " + getSender(); } } @@ -224,7 +223,7 @@ public String toString() { public static class DeposePrimaryBucketResponse extends PartitionResponse { public DeposePrimaryBucketResponse(InternalDistributedSystem ds, - InternalDistributedMember recipient, PartitionedRegion theRegion) { + InternalDistributedMember recipient) { super(ds, recipient); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java index 8611ff7aa9bd..c392f739b5c1 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DestroyMessage.java @@ -21,6 +21,7 @@ import java.util.Set; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.DataSerializer; import org.apache.geode.cache.CacheWriterException; @@ -195,7 +196,7 @@ public static Set notifyListeners(Set cacheOpReceivers, Set adjunctRecipients, * @return the processor used to await the potential {@link org.apache.geode.cache.CacheException} * @throws ForceReattemptException if the peer is no longer available */ - public static DestroyResponse send(DistributedMember recipient, PartitionedRegion r, + public static @NotNull DestroyResponse send(DistributedMember recipient, PartitionedRegion r, EntryEventImpl event, Object expectedOldValue) throws ForceReattemptException { // Assert.assertTrue(recipient != null, "DestroyMessage NULL recipient"); recipient may be null // for event notification @@ -462,7 +463,7 @@ public void setFilterInfo(FilterRoutingInfo filterInfo) { @Override protected boolean mayNotifySerialGatewaySender(ClusterDistributionManager dm) { - return notifiesSerialGatewaySender(dm); + return notifiesSerialGatewaySender(); } public static class DestroyReplyMessage extends ReplyMessage { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java index afdd65396ce0..294aaa7af9c3 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/DumpB2NRegion.java @@ -64,14 +64,16 @@ public class DumpB2NRegion extends PartitionMessage { public DumpB2NRegion() {} - private DumpB2NRegion(Set recipients, int regionId, ReplyProcessor21 processor, int bId, - boolean justPrimaryInfo) { + private DumpB2NRegion(Set recipients, int regionId, + ReplyProcessor21 processor, int bucketId, + boolean onlyReturnPrimaryInfo) { super(recipients, regionId, processor); - bucketId = bId; - onlyReturnPrimaryInfo = justPrimaryInfo; + this.bucketId = bucketId; + this.onlyReturnPrimaryInfo = onlyReturnPrimaryInfo; } - public static DumpB2NResponse send(Set recipients, PartitionedRegion r, int bId, + public static DumpB2NResponse send(Set recipients, PartitionedRegion r, + int bId, boolean justPrimaryInfo) { DumpB2NResponse p = new DumpB2NResponse(r.getSystem(), recipients); DumpB2NRegion m = new DumpB2NRegion(recipients, r.getPRId(), p, bId, justPrimaryInfo); @@ -90,7 +92,6 @@ public void process(final ClusterDistributionManager dm) { for (;;) { dm.getCancelCriterion().checkCancelInProgress(null); - // pr = null; (redundant assignment) pr = PartitionedRegion.getPRFromId(regionId); if (pr != null) { @@ -122,14 +123,8 @@ public void process(final ClusterDistributionManager dm) { // OK, now it's safe to process this. super.process(dm); - } catch (CancelException e) { - sendReply(sender, processorId, dm, new ReplyException(e), pr, 0); - } catch (PRLocallyDestroyedException e) { + } catch (CancelException | PRLocallyDestroyedException | RegionDestroyedException e) { sendReply(sender, processorId, dm, new ReplyException(e), pr, 0); - return; - } catch (RegionDestroyedException rde) { - sendReply(sender, processorId, dm, new ReplyException(rde), pr, 0); - return; } } @@ -137,14 +132,14 @@ public void process(final ClusterDistributionManager dm) { @Override protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion pr, long startTime) throws CacheException { - PrimaryInfo pinfo = null; + PrimaryInfo primaryInfo = null; if (onlyReturnPrimaryInfo) { - pinfo = new PrimaryInfo(pr.getRegionAdvisor().getBucket(bucketId).isHosting(), + primaryInfo = new PrimaryInfo(pr.getRegionAdvisor().getBucket(bucketId).isHosting(), pr.getRegionAdvisor().isPrimaryForBucket(bucketId), ""); } else { pr.dumpB2NForBucket(bucketId); } - DumpB2NReplyMessage.send(getSender(), getProcessorId(), dm, pinfo); + DumpB2NReplyMessage.send(getSender(), getProcessorId(), dm, primaryInfo); return false; } @@ -177,17 +172,17 @@ public static class DumpB2NReplyMessage extends ReplyMessage { public DumpB2NReplyMessage() {} - private DumpB2NReplyMessage(int procid, PrimaryInfo pinfo) { + private DumpB2NReplyMessage(int processorId, PrimaryInfo primaryInfo) { super(); - setProcessorId(procid); - primaryInfo = pinfo; + setProcessorId(processorId); + this.primaryInfo = primaryInfo; } public static void send(InternalDistributedMember recipient, int processorId, - DistributionManager dm, PrimaryInfo pinfo) { - DumpB2NReplyMessage m = new DumpB2NReplyMessage(processorId, pinfo); + DistributionManager distributionManager, PrimaryInfo primaryInfo) { + DumpB2NReplyMessage m = new DumpB2NReplyMessage(processorId, primaryInfo); m.setRecipient(recipient); - dm.putOutgoing(m); + distributionManager.putOutgoing(m); } @@ -257,7 +252,8 @@ public String toString() { public static class DumpB2NResponse extends PartitionResponse { public final ArrayList primaryInfos = new ArrayList<>(); - public DumpB2NResponse(InternalDistributedSystem dm, Set initMembers) { + public DumpB2NResponse(InternalDistributedSystem dm, + Set initMembers) { super(dm, initMembers); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/EndBucketCreationMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/EndBucketCreationMessage.java index 277ccb4ce49b..93a7b84af6e6 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/EndBucketCreationMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/EndBucketCreationMessage.java @@ -58,7 +58,7 @@ private EndBucketCreationMessage(Collection recipient * Sends a message to make the recipient primary for the bucket. * * - * @param newPrimary the member to to become primary + * @param newPrimary the member to become primary * @param pr the PartitionedRegion of the bucket * @param bid the bucket to become primary for */ @@ -81,8 +81,7 @@ public EndBucketCreationMessage(DataInput in) throws IOException, ClassNotFoundE @Override public int getProcessorType() { - // use the waiting pool because operateOnPartitionedRegion will - // try to get a dlock + // use the waiting pool because operateOnPartitionedRegion will try to get a dlock return OperationExecutors.WAITING_POOL_EXECUTOR; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java index 4939e5c64f8d..73d614a7c0e4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchBulkEntriesMessage.java @@ -15,6 +15,9 @@ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.io.ByteArrayInputStream; import java.io.DataInput; import java.io.DataInputStream; @@ -44,6 +47,7 @@ import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.Assert; import org.apache.geode.internal.HeapDataOutputStream; +import org.apache.geode.internal.InternalDataSerializer; import org.apache.geode.internal.cache.BucketDump; import org.apache.geode.internal.cache.BucketRegion; import org.apache.geode.internal.cache.ForceReattemptException; @@ -68,14 +72,14 @@ public class FetchBulkEntriesMessage extends PartitionMessage { private static final Logger logger = LogService.getLogger(); - private HashSet bucketIds; + private Set bucketIds; private String regex; /** * Map of bucket-id as key and set of keys as value. */ - private HashMap bucketKeys; + private Map> bucketKeys; private static final byte ALL_KEYS = (byte) 0; @@ -90,7 +94,8 @@ public class FetchBulkEntriesMessage extends PartitionMessage { public FetchBulkEntriesMessage() {} private FetchBulkEntriesMessage(InternalDistributedMember recipient, int regionId, - ReplyProcessor21 processor, HashMap bucketKeys, HashSet bucketIds, + ReplyProcessor21 processor, Map> bucketKeys, + Set bucketIds, String regex, boolean allowTombstones) { super(recipient, regionId, processor); this.bucketKeys = bucketKeys; @@ -111,18 +116,17 @@ private FetchBulkEntriesMessage(InternalDistributedMember recipient, int regionI * @throws ForceReattemptException if the peer is no longer available */ public static FetchBulkEntriesResponse send(InternalDistributedMember recipient, - PartitionedRegion r, HashMap bucketKeys, HashSet bucketIds, + PartitionedRegion r, Map> bucketKeys, Set bucketIds, String regex, boolean allowTombstones) throws ForceReattemptException { Assert.assertTrue(recipient != null, "FetchBulkEntriesMessage NULL reply message"); - FetchBulkEntriesResponse p = new FetchBulkEntriesResponse(r.getSystem(), r, recipient); + FetchBulkEntriesResponse p = new FetchBulkEntriesResponse(r.getSystem(), recipient); FetchBulkEntriesMessage m = new FetchBulkEntriesMessage(recipient, r.getPRId(), p, bucketKeys, bucketIds, regex, allowTombstones); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { - throw new ForceReattemptException( - String.format("Failed sending < %s >", m)); + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { + throw new ForceReattemptException(String.format("Failed sending < %s >", m)); } return p; } @@ -136,7 +140,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part } FetchBulkEntriesReplyMessage.sendReply(pr, getSender(), getProcessorId(), dm, bucketKeys, - bucketIds, regex, allowTombstones, startTime); + bucketIds, regex, allowTombstones); return false; } @@ -160,7 +164,7 @@ public void fromData(DataInput in, if (keys == KEY_LIST) { bucketKeys = DataSerializer.readHashMap(in); } else if (keys == ALL_KEYS) { - bucketIds = DataSerializer.readHashSet(in); + bucketIds = InternalDataSerializer.readSet(in); } regex = DataSerializer.readString(in); allowTombstones = DataSerializer.readPrimitiveBoolean(in); @@ -174,7 +178,7 @@ public void toData(DataOutput out, if (keys == KEY_LIST) { DataSerializer.writeHashMap(bucketKeys, out); } else if (keys == ALL_KEYS) { - DataSerializer.writeHashSet(bucketIds, out); + InternalDataSerializer.writeSet(bucketIds, out); } DataSerializer.writeString(regex, out); DataSerializer.writePrimitiveBoolean(allowTombstones, out); @@ -214,8 +218,8 @@ private FetchBulkEntriesReplyMessage(InternalDistributedMember dest, int process public static void sendReply(PartitionedRegion pr, final InternalDistributedMember recipient, final int processorId, final DistributionManager dm, - final HashMap bucketKeys, final HashSet bucketIds, String regex, - boolean allowTombstones, long startTime) throws ForceReattemptException { + final Map> bucketKeys, final Set bucketIds, + String regex, boolean allowTombstones) throws ForceReattemptException { PartitionedRegionDataStore ds = pr.getDataStore(); if (ds == null) { @@ -224,7 +228,7 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb ArrayList maps = new ArrayList<>(); HashSet failedBuckets = new HashSet<>(); - Set bucketIdSet = null; + final Set bucketIdSet; if (bucketKeys != null) { bucketIdSet = bucketKeys.keySet(); } else { // bucketIds != null @@ -244,7 +248,6 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb new HeapDataOutputStream(InitialImageOperation.CHUNK_SIZE_IN_BYTES + 2048, Versioning.getKnownVersionOrDefault(recipient.getVersion(), KnownVersion.CURRENT))) { Iterator mapsIterator = maps.iterator(); - Iterator it = null; boolean keepGoing = true; boolean writeFooter = false; @@ -257,7 +260,7 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb try { map.releaseDestroyLock(); // instead take a bucketCreationLock.getWriteLock() or pr.BucketLock? - } catch (CancelException ignored1) { + } catch (CancelException ignored) { } finally { lockAcquired = false; } @@ -278,15 +281,16 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb } try { + final Iterator it; if (bucketKeys != null) { it = bucketKeys.get(map.getId()).iterator(); } else { // bucketIds != null if (regex == null) { - it = new HashSet(map.keySet(allowTombstones)).iterator(); + it = new HashSet<>(uncheckedCast(map.keySet(allowTombstones))).iterator(); } else { - it = - map.getKeysWithInterest(InterestType.REGULAR_EXPRESSION, regex, allowTombstones) - .iterator(); + it = map + .getKeysWithInterest(InterestType.REGULAR_EXPRESSION, regex, allowTombstones) + .iterator(); } } @@ -304,7 +308,7 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb int entrySize = mos.size(); DataSerializer.writeObject(key, mos); - VersionTag versionTag = clientEvent.getVersionTag(); + VersionTag versionTag = clientEvent.getVersionTag(); if (versionTag != null) { versionTag.replaceNullIDs(map.getVersionMember()); } @@ -334,8 +338,8 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb if (lastMsg) { reply.failedBucketIds = failedBuckets; } - Set failures = dm.putOutgoing(reply); - keepGoing = (failures == null) || (failures.size() == 0); + Set failures = dm.putOutgoing(reply); + keepGoing = isEmpty(failures); if (lastMsg && keepGoing) { lastMsgSent = true; } @@ -355,7 +359,7 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb if (lockAcquired) { try { map.releaseDestroyLock(); - } catch (CancelException ignored1) { + } catch (CancelException ignored) { } finally { lockAcquired = false; } @@ -387,8 +391,8 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb FetchBulkEntriesReplyMessage reply = new FetchBulkEntriesReplyMessage(recipient, processorId, mos, msgNum, true); reply.failedBucketIds = failedBuckets; - Set failures = dm.putOutgoing(reply); - if (failures != null && failures.size() > 0) { + Set failures = dm.putOutgoing(reply); + if (!isEmpty(failures)) { throw new ForceReattemptException("Failed to send response"); } } @@ -397,8 +401,7 @@ public static void sendReply(PartitionedRegion pr, final InternalDistributedMemb if (lockAcquired) { try { map.releaseDestroyLock(); - } catch (CancelException e) { - // ignore + } catch (CancelException ignored) { } finally { lockAcquired = false; } @@ -459,7 +462,7 @@ public void fromData(DataInput in, @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("FetchBulkEntriesReplyMessage ").append("processorid=").append(processorId); + sb.append("FetchBulkEntriesReplyMessage ").append("processorId=").append(processorId); if (getSender() != null) { sb.append(",sender=").append(getSender()); } @@ -484,11 +487,9 @@ public String toString() { */ public static class FetchBulkEntriesResponse extends ReplyProcessor21 { - private final PartitionedRegion pr; - private final HashMap> returnValue; - private final HashMap> returnVersions = new HashMap(); - private final Map canonicalMembers = + private final HashMap>> returnVersions = new HashMap<>(); + private final Map, VersionSource> canonicalMembers = new ConcurrentHashMap<>(); /** lock used to synchronize chunk processing */ @@ -508,10 +509,9 @@ public static class FetchBulkEntriesResponse extends ReplyProcessor21 { private final InternalDistributedMember recipient; - public FetchBulkEntriesResponse(InternalDistributedSystem ds, final PartitionedRegion pr, + public FetchBulkEntriesResponse(InternalDistributedSystem ds, final InternalDistributedMember recipient) { super(ds, Collections.singleton(recipient)); - this.pr = pr; this.recipient = recipient; returnValue = new HashMap<>(); } @@ -541,10 +541,10 @@ void processChunkResponse(FetchBulkEntriesReplyMessage msg) { if (key != null) { deserializingKey = false; Object value = DataSerializer.readObject(in); - VersionTag versionTag = DataSerializer.readObject(in); + VersionTag> versionTag = DataSerializer.readObject(in); if (versionTag != null) { - // Fix for 47260 - canonicalize the member ids to avoid an OOME + // canonicalize the member ids to avoid java.lang.OutOfMemoryError if (canonicalMembers.containsKey(versionTag.getMemberID())) { versionTag.setMemberID(canonicalMembers.get(versionTag.getMemberID())); } else { @@ -554,7 +554,7 @@ void processChunkResponse(FetchBulkEntriesReplyMessage msg) { synchronized (returnValue) { HashMap valueMap = returnValue.get(currentId); - HashMap versionMap = returnVersions.get(currentId); + HashMap> versionMap = returnVersions.get(currentId); if (valueMap != null) { valueMap.put(key, value); } else { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java index e9a6eda75259..3589c1d594db 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessage.java @@ -14,6 +14,9 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.io.ByteArrayInputStream; import java.io.DataInput; import java.io.DataInputStream; @@ -32,6 +35,7 @@ import org.apache.geode.DataSerializer; import org.apache.geode.annotations.Immutable; import org.apache.geode.cache.CacheException; +import org.apache.geode.cache.Region; import org.apache.geode.distributed.internal.ClusterDistributionManager; import org.apache.geode.distributed.internal.DistributionManager; import org.apache.geode.distributed.internal.DistributionMessage; @@ -92,14 +96,13 @@ private FetchEntriesMessage(InternalDistributedMember recipient, int regionId, public static FetchEntriesResponse send(InternalDistributedMember recipient, PartitionedRegion r, int bucketId) throws ForceReattemptException { Assert.assertTrue(recipient != null, "FetchEntriesMessage NULL reply message"); - FetchEntriesResponse p = new FetchEntriesResponse(r.getSystem(), r, recipient, bucketId); + FetchEntriesResponse p = new FetchEntriesResponse(r.getSystem(), recipient, bucketId); FetchEntriesMessage m = new FetchEntriesMessage(recipient, r.getPRId(), p, bucketId); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { - throw new ForceReattemptException( - String.format("Failed sending < %s >", m)); + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { + throw new ForceReattemptException(String.format("Failed sending < %s >", m)); } return p; @@ -217,9 +220,9 @@ public static void send(final InternalDistributedMember recipient, final int pro final int numSeries = 1; final int seriesNum = 0; - final RegionVersionVector rvv = keys.getVersionVector(); + final RegionVersionVector rvv = keys.getVersionVector(); if (rvv != null) { - RegionVersionVector clone = rvv.getCloneForTransmission(); + RegionVersionVector clone = rvv.getCloneForTransmission(); ReplyMessage.send(recipient, processorId, clone, dm); } @@ -244,9 +247,8 @@ public boolean executeWith(Object a, int b) { HeapDataOutputStream chunk = (HeapDataOutputStream) a; last = b > 0; try { - boolean okay = sendChunk(recipient, processorId, bucketId, dm, chunk, seriesNum, + return sendChunk(recipient, processorId, bucketId, dm, chunk, seriesNum, msgNum++, numSeries, last, rvv != null); - return okay; } catch (CancelException e) { return false; } @@ -270,14 +272,14 @@ static boolean sendChunk(InternalDistributedMember recipient, int processorId, i int numSeries, boolean lastInSeries, boolean hasRVV) { FetchEntriesReplyMessage reply = new FetchEntriesReplyMessage(recipient, processorId, bucketId, chunk, seriesNum, msgNum, numSeries, lastInSeries, hasRVV); - Set failures = dm.putOutgoing(reply); - return (failures == null) || (failures.size() == 0); + Set failures = dm.putOutgoing(reply); + return isEmpty(failures); } /** * Serialize the given map's entries into byte[] chunks, calling proc for each one. proc args: * the byte[] chunk and an int indicating whether it is the last chunk (positive means last - * chunk, zero othewise). The return value of proc indicates whether to continue to the next + * chunk, otherwise zero). The return value of proc indicates whether to continue to the next * chunk (true) or abort (false). * * @return true if finished all chunks, false if stopped early @@ -285,15 +287,16 @@ static boolean sendChunk(InternalDistributedMember recipient, int processorId, i static boolean chunkMap(InternalDistributedMember receiver, BucketRegion map, int CHUNK_SIZE_IN_BYTES, boolean includeValues, ObjectIntProcedure proc) throws IOException { - Iterator it = map.entrySet().iterator(); + @SuppressWarnings("unchecked") + final Iterator> it = map.entrySet().iterator(); - boolean keepGoing = true; - boolean sentLastChunk = false; + boolean sentLastChunk; // always write at least one chunk try (HeapDataOutputStream mos = new HeapDataOutputStream( InitialImageOperation.CHUNK_SIZE_IN_BYTES + 2048, Versioning .getKnownVersionOrDefault(receiver.getVersion(), KnownVersion.CURRENT))) { + boolean keepGoing; do { mos.reset(); @@ -317,8 +320,8 @@ static boolean chunkMap(InternalDistributedMember receiver, BucketRegion map, if (Token.isInvalid(value)) { value = null; } - VersionStamp stamp = re.getVersionStamp(); - VersionTag versionTag = stamp != null ? stamp.asVersionTag() : null; + VersionStamp stamp = re.getVersionStamp(); + VersionTag versionTag = stamp != null ? stamp.asVersionTag() : null; if (versionTag != null) { versionTag.replaceNullIDs(map.getVersionMember()); } @@ -414,7 +417,7 @@ public void fromData(DataInput in, @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("FetchEntriesReplyMessage ").append("processorid=").append(processorId) + sb.append("FetchEntriesReplyMessage ").append("processorId=").append(processorId) .append(",bucketId=").append(bucketId); if (getSender() != null) { sb.append(",sender=").append(getSender()); @@ -441,12 +444,10 @@ public String toString() { */ public static class FetchEntriesResponse extends ReplyProcessor21 { - private final PartitionedRegion pr; - - protected volatile RegionVersionVector returnRVV; + protected volatile RegionVersionVector> returnRVV; protected final HashMap returnValue; - protected final HashMap returnVersions = new HashMap(); - private final Map canonicalMembers = + protected final HashMap> returnVersions = new HashMap<>(); + private final Map, VersionSource> canonicalMembers = new ConcurrentHashMap<>(); /** lock used to synchronize chunk processing */ @@ -465,10 +466,9 @@ public static class FetchEntriesResponse extends ReplyProcessor21 { private final InternalDistributedMember recipient; - public FetchEntriesResponse(InternalDistributedSystem ds, final PartitionedRegion pr, + public FetchEntriesResponse(InternalDistributedSystem ds, final InternalDistributedMember recipient, final int bucketId) { super(ds, Collections.singleton(recipient)); - this.pr = pr; this.bucketId = bucketId; this.recipient = recipient; returnValue = new HashMap() { @@ -489,7 +489,7 @@ public void process(DistributionMessage msg) { ReplyMessage reply = (ReplyMessage) msg; Object returnValue = reply.getReturnValue(); if (returnValue instanceof RegionVersionVector) { - returnRVV = (RegionVersionVector) returnValue; + returnRVV = uncheckedCast(returnValue); synchronized (endLock) { if (allMessagesReceived(true)) { super.process(msg); @@ -502,7 +502,7 @@ public void process(DistributionMessage msg) { } void processChunk(FetchEntriesReplyMessage msg) { - // this processing algorighm won't work well if there are multiple recipients. currently the + // this processing algorithm won't work well if there are multiple recipients. The // retry logic for failed recipients is in PartitionedRegion. If we parallelize the sending // of this message, we'll need to handle failover in this processor class and track results // differently. @@ -526,10 +526,10 @@ void processChunk(FetchEntriesReplyMessage msg) { if (key != null) { deserializingKey = false; Object value = DataSerializer.readObject(in); - VersionTag versionTag = DataSerializer.readObject(in); + VersionTag> versionTag = DataSerializer.readObject(in); - // Fix for 47260 - canonicalize the mebmer ids to avoid an OOME - VersionSource id = versionTag == null ? null : versionTag.getMemberID(); + // canonicalize the member ids to avoid java.lang.OutOfMemoryError + VersionSource id = versionTag == null ? null : versionTag.getMemberID(); if (id != null) { if (canonicalMembers.containsKey(id)) { versionTag.setMemberID(canonicalMembers.get(id)); @@ -593,7 +593,7 @@ private boolean allMessagesReceived(boolean hasRVV) { } /** - * @return Set the keys associated with the bucketid of the {@link FetchKeysMessage} + * @return Set the keys associated with the bucketId of the {@link FetchKeysMessage} * @throws ForceReattemptException if the peer is no longer available */ public BucketDump waitForEntries() throws ForceReattemptException { @@ -619,10 +619,9 @@ public BucketDump waitForEntries() throws ForceReattemptException { throw new ForceReattemptException( "No replies received"); } - // Deserialize all CachedDeserializable here so we have access to applications thread context + // Deserialize all CachedDeserializable here, so we have access to applications thread context // class loader - for (final Map.Entry objectObjectEntry : returnValue.entrySet()) { - Map.Entry entry = (Map.Entry) objectObjectEntry; + for (final Map.Entry entry : returnValue.entrySet()) { Object value = entry.getValue(); if (value instanceof CachedDeserializable) { entry.setValue(((CachedDeserializable) value).getDeserializedValue(null, null)); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java index 597486d69ce2..edbdedbb5833 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FetchKeysMessage.java @@ -164,7 +164,8 @@ public static FetchKeysResponse sendInterestQuery(InternalDistributedMember reci // override processor type @Override - PartitionResponse createReplyProcessor(PartitionedRegion r, Set recipients) { + PartitionResponse createReplyProcessor(PartitionedRegion r, + Set recipients) { return new FetchKeysResponse(r.getSystem(), recipients); } @@ -305,7 +306,7 @@ public static void send(final @NotNull InternalDistributedMember recipient, // chunkEntries returns false if didn't finish if (logger.isDebugEnabled()) { - logger.debug("Starting pr keys chunking for {} kets to member {}", keys.size(), recipient); + logger.debug("Starting pr keys chunking for {} keys to member {}", keys.size(), recipient); } try { boolean finished = chunkSet(recipient, keys, InitialImageOperation.CHUNK_SIZE_IN_BYTES, @@ -358,16 +359,16 @@ static boolean sendChunk(InternalDistributedMember recipient, int processorId, } /** - * Serialize the given set's elments into byte[] chunks, calling proc for each one. proc args: + * Serialize the given set's elements into byte[] chunks, calling proc for each one. proc args: * the byte[] chunk and an int indicating whether it is the last chunk (positive means last - * chunk, zero othewise). The return value of proc indicates whether to continue to the next + * chunk, otherwise zero). The return value of proc indicates whether to continue to the next * chunk (true) or abort (false). * * @return true if finished all chunks, false if stopped early */ static boolean chunkSet(final @NotNull InternalDistributedMember recipient, Set set, - final int chunkSizeInBytes, - final boolean includeValues, final @NotNull ObjectIntProcedure proc) throws IOException { + final int chunkSizeInBytes, final boolean includeValues, + final @NotNull ObjectIntProcedure proc) throws IOException { Iterator it = set.iterator(); boolean keepGoing; @@ -469,7 +470,7 @@ public void fromData(DataInput in, @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("FetchKeysReplyMessage ").append("processorid=").append(processorId); + sb.append("FetchKeysReplyMessage ").append("processorId=").append(processorId); if (getSender() != null) { sb.append(",sender=").append(getSender()); } @@ -517,13 +518,14 @@ public static class FetchKeysResponse extends PartitionResponse { */ private volatile boolean lastChunkReceived; - public FetchKeysResponse(InternalDistributedSystem ds, Set recipients) { + public FetchKeysResponse(InternalDistributedSystem ds, + Set recipients) { super(ds, recipients); returnValue = new HashSet<>(); } void processChunk(FetchKeysReplyMessage msg) { - // this processing algorighm won't work well if there are multiple recipients. currently the + // this processing algorithm won't work well if there are multiple recipients. The // retry logic for failed recipients is in PartitionedRegion. If we parallelize the sending // of this message, we'll need to handle failover in this processor class and track results // differently. @@ -579,7 +581,7 @@ void processChunk(FetchKeysReplyMessage msg) { } /** - * @return Set the keys associated with the bucketid of the {@link FetchKeysMessage} + * @return Set the keys associated with the bucketId of the {@link FetchKeysMessage} * @throws ForceReattemptException if the peer is no longer available */ public Set waitForKeys() throws ForceReattemptException { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FlushMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FlushMessage.java index 7f54275521f6..9d3de91a9dc5 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FlushMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/FlushMessage.java @@ -51,8 +51,6 @@ public class FlushMessage extends SerialDistributionMessage implements MessageWithReply { private static final Logger logger = LogService.getLogger(); - private static final long serialVersionUID = 1L; - int prId; int bucketId; int processorId; @@ -68,7 +66,7 @@ private FlushMessage(int prId, int bucketId, int processorId, } /* - * Used both for the receipt of a FlushMessage and the reply to a Flushmessage + * Used both for the receipt of a FlushMessage and the reply to a FlushMessage */ @Override protected void process(ClusterDistributionManager dm) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java index e209f373f2e5..7643fd1429db 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/InvalidateMessage.java @@ -21,6 +21,7 @@ import java.util.Set; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.DataSerializer; import org.apache.geode.cache.CacheException; @@ -128,7 +129,7 @@ public static Set notifyListeners(Set cacheOpReceivers, Set adjunctRecipients, * {@link org.apache.geode.cache.CacheException} * @throws ForceReattemptException if the peer is no longer available */ - public static InvalidateResponse send(DistributedMember recipient, PartitionedRegion r, + public static @NotNull InvalidateResponse send(DistributedMember recipient, PartitionedRegion r, EntryEventImpl event) throws ForceReattemptException { // Assert.assertTrue(recipient != null, "InvalidateMessage NULL recipient"); recipient may be // null for remote notifications diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBackupBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBackupBucketMessage.java index 0cc93b8ec974..b0f9e8fa910e 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBackupBucketMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBackupBucketMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -120,8 +122,8 @@ public static NodeResponse send(InternalDistributedMember recipient, Partitioned p.enableSevereAlertProcessing(); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { throw new ForceReattemptException("Failed sending <" + m + ">"); } @@ -402,7 +404,7 @@ public void process(DistributionMessage m) { ManageBackupBucketReplyMessage reply = (ManageBackupBucketReplyMessage) m; msg = reply; if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { - logger.trace(LogMarker.DM_VERBOSE, "NodeResponse return value is {} isInitializng={}", + logger.trace(LogMarker.DM_VERBOSE, "NodeResponse return value is {} isInitializing={}", reply.acceptedBucket, reply.notYetInitialized); } } else { @@ -455,14 +457,6 @@ public boolean waitForAcceptance() throws ForceReattemptException { } return (msg != null) && msg.acceptedBucket; } - - /** - * After a response has been returned from waitForAcceptance, this method may be used to see if - * the other vm rejected the bucket because it was still initializing. - */ - public boolean rejectedDueToInitialization() { - return (msg != null) && msg.notYetInitialized; - } } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBucketMessage.java index fcb343511895..c89f6726e1b7 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBucketMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/ManageBucketMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -71,11 +73,11 @@ public class ManageBucketMessage extends PartitionMessage { public ManageBucketMessage() {} private ManageBucketMessage(InternalDistributedMember recipient, int regionId, - ReplyProcessor21 processor, int bucketId, int bucketSize, boolean hostItNow) { + ReplyProcessor21 processor, int bucketId, int bucketSize, boolean forceCreation) { super(recipient, regionId, processor); this.bucketId = bucketId; this.bucketSize = bucketSize; - forceCreation = hostItNow; + this.forceCreation = forceCreation; } public ManageBucketMessage(DataInput in) throws IOException, ClassNotFoundException { @@ -108,10 +110,9 @@ public static NodeResponse send(InternalDistributedMember recipient, Partitioned p.enableSevereAlertProcessing(); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { - throw new ForceReattemptException( - String.format("Failed sending < %s >", m)); + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { + throw new ForceReattemptException(String.format("Failed sending < %s >", m)); } return p; @@ -136,17 +137,16 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part } // This is to ensure that initialization is complete before bucket creation request is - // serviced. BUGFIX for 35888 + // serviced. r.waitOnInitialization(); - - r.checkReadiness(); // Don't allow closed PartitionedRegions that have datastores to host - // buckets + // Don't allow closed PartitionedRegions that have data stores to host buckets + r.checkReadiness(); PartitionedRegionDataStore prDs = r.getDataStore(); boolean managingBucket = prDs.handleManageBucketRequest(bucketId, bucketSize, sender, forceCreation); r.getPrStats().endPartitionMessagesProcessing(startTime); if (managingBucket) { - // fix for bug 39356 - If the sender died while we were creating the bucket + // If the sender died while we were creating the bucket // notify other nodes that they should invoke grabBackupBuckets to // make copies of this bucket. Normally the sender would be responsible // for creating those copies. @@ -192,12 +192,6 @@ public void toData(DataOutput out, out.writeBoolean(forceCreation); } - - /** - * Assists the toString method in reporting the contents of this message - * - * @see PartitionMessage#toString() - */ @Override protected void appendFields(StringBuilder buff) { super.appendFields(buff); @@ -335,7 +329,7 @@ public void fromData(DataInput in, @Override public String toString() { - return "ManageBucketReplyMessage " + "processorid=" + return "ManageBucketReplyMessage " + "processorId=" + processorId + " accepted bucket=" + acceptedBucket + " isInitializing=" + notYetInitialized; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/MoveBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/MoveBucketMessage.java index b8f5b607dc67..532b638f821e 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/MoveBucketMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/MoveBucketMessage.java @@ -84,7 +84,7 @@ public static MoveBucketResponse send(InternalDistributedMember recipient, Assert.assertTrue(recipient != null, "MoveBucketMessage NULL recipient"); - MoveBucketResponse response = new MoveBucketResponse(region.getSystem(), recipient, region); + MoveBucketResponse response = new MoveBucketResponse(region.getSystem(), recipient); MoveBucketMessage msg = new MoveBucketMessage(recipient, region.getPRId(), response, bucketId, source); msg.setTransactionDistributed(region.getCache().getTxManager().isDistributed()); @@ -225,7 +225,7 @@ public void fromData(DataInput in, @Override public String toString() { - return "MoveBucketReplyMessage " + "processorid=" + processorId + return "MoveBucketReplyMessage " + "processorId=" + processorId + " moved=" + moved + " reply to sender " + getSender(); } @@ -238,8 +238,7 @@ public static class MoveBucketResponse extends PartitionResponse { private volatile boolean moved = false; - public MoveBucketResponse(InternalDistributedSystem ds, InternalDistributedMember recipient, - PartitionedRegion theRegion) { + public MoveBucketResponse(InternalDistributedSystem ds, InternalDistributedMember recipient) { super(ds, recipient); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetails.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetails.java index 06698efa1e23..a49ce4f45598 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetails.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetails.java @@ -16,7 +16,6 @@ import java.io.DataInput; import java.io.DataOutput; -import java.io.IOException; import java.util.Collections; import java.util.Set; @@ -34,10 +33,10 @@ public Set getOfflineMembers(int bucketId) { } @Override - public void fromData(DataInput in) throws IOException, ClassNotFoundException {} + public void fromData(DataInput in) {} @Override - public void toData(DataOutput out) throws IOException {} + public void toData(DataOutput out) {} }; Set getOfflineMembers(int bucketId); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetailsImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetailsImpl.java index 5fd4a9bbc63b..f1db50420fbd 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetailsImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/OfflineMemberDetailsImpl.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -38,19 +40,15 @@ public OfflineMemberDetailsImpl(Set[] offlineMembers) { this.offlineMembers = offlineMembers; } - - @Override public Set getOfflineMembers(int bucketId) { return offlineMembers[bucketId]; } - - @Override public void fromData(DataInput in) throws IOException, ClassNotFoundException { int offlineMembersLength = in.readInt(); - offlineMembers = new Set[offlineMembersLength]; + offlineMembers = uncheckedCast(new Set[offlineMembersLength]); for (int i = 0; i < offlineMembersLength; i++) { int setSize = in.readInt(); Set set = new HashSet<>(setSize); @@ -63,7 +61,6 @@ public void fromData(DataInput in) throws IOException, ClassNotFoundException { } } - @Override public void toData(DataOutput out) throws IOException { out.writeInt(offlineMembers.length); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java index 74e14f76e83b..c039ae152e2d 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRFunctionStreamingResultCollector.java @@ -103,7 +103,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -128,7 +128,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -154,7 +154,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -180,7 +180,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -206,7 +206,7 @@ public Object getResultInternal() throws FunctionException { clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -250,7 +250,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -272,7 +272,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -296,7 +296,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -320,7 +320,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); @@ -344,7 +344,7 @@ public Object getResultInternal(long timeout, TimeUnit unit) clearResults(); execution = execution.setIsReExecute(); ResultCollector newRc = null; - if (execution.isFnSerializationReqd()) { + if (execution.isFunctionSerializationRequired()) { newRc = execution.execute(fn); } else { newRc = execution.execute(fn.getId()); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRLoad.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRLoad.java index 10f4a72a8a09..d4b140656e4b 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRLoad.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRLoad.java @@ -61,7 +61,7 @@ public PRLoad() { /** * Constructs a new PRLoad. Please use {@link #addBucket(int, float, float)} to add bucket loads. * - * @param numBuckets the number of buckets in the the PR + * @param numBuckets the number of buckets in the PR * @param weight the weight of the PR */ public PRLoad(int numBuckets, float weight) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRUpdateEntryVersionMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRUpdateEntryVersionMessage.java index cb031ded17c8..0b44febed3e8 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRUpdateEntryVersionMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PRUpdateEntryVersionMessage.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.cache.CacheException; import org.apache.geode.cache.EntryNotFoundException; @@ -251,7 +252,7 @@ public void waitForResult() throws CacheException, ForceReattemptException { } } - public static UpdateEntryVersionResponse send(InternalDistributedMember recipient, + public static @NotNull UpdateEntryVersionResponse send(InternalDistributedMember recipient, PartitionedRegion r, EntryEventImpl event) throws ForceReattemptException { Set recipients = Collections.singleton(recipient); UpdateEntryVersionResponse p = diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMemberInfoImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMemberInfoImpl.java index 9c5e81136288..ebc633721e92 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMemberInfoImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMemberInfoImpl.java @@ -47,8 +47,6 @@ public PartitionMemberInfoImpl(DistributedMember distributedMember, long configu public PartitionMemberInfoImpl(DistributedMember distributedMember, long configuredMaxMemory, long size, int bucketCount, int primaryCount, PRLoad prLoad, long[] bucketSizes) { - // TODO rebalance disabling this unit bug 39868 is fixed. - // Assert.assertTrue(size >= 0); this.distributedMember = distributedMember; this.configuredMaxMemory = configuredMaxMemory; this.size = size; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java index 8ebc4da46970..761952bf116f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionMessage.java @@ -23,7 +23,6 @@ import org.apache.logging.log4j.Logger; import org.apache.geode.CancelException; -import org.apache.geode.InternalGemFireError; import org.apache.geode.InternalGemFireException; import org.apache.geode.SystemFailure; import org.apache.geode.annotations.Immutable; @@ -139,7 +138,7 @@ public PartitionMessage() {} public PartitionMessage(InternalDistributedMember recipient, int regionId, ReplyProcessor21 processor) { - Assert.assertTrue(recipient != null, "PartitionMesssage recipient can not be null"); + Assert.assertTrue(recipient != null, "PartitionMessage recipient can not be null"); setRecipient(recipient); this.regionId = regionId; processorId = processor == null ? 0 : processor.getProcessorId(); @@ -232,7 +231,7 @@ public int getRegionId() { } /** - * @return the {@link ReplyProcessor21}id associated with the message, null if no acknowlegement + * @return the {@link ReplyProcessor21}id associated with the message, null if no acknowledgement * is required. */ @Override @@ -242,7 +241,7 @@ public int getProcessorId() { /** * @param processorId1 the {@link org.apache.geode.distributed.internal.ReplyProcessor21} id - * associated with the message, null if no acknowlegement is required. + * associated with the message, null if no acknowledgement is required. */ public void registerProcessor(int processorId1) { processorId = processorId1; @@ -252,7 +251,8 @@ public void registerProcessor(int processorId1) { * @return return the message that should be sent to listeners, or null if this message should not * be relayed */ - public PartitionMessage getMessageForRelayToListeners(EntryEventImpl event, Set recipients) { + public PartitionMessage getMessageForRelayToListeners(EntryEventImpl event, + Set recipients) { return null; } @@ -438,11 +438,11 @@ protected Throwable processCheckForPR(PartitionedRegion pr, DistributionManager distributionManager) { if ((pr == null || !pr.getDistributionAdvisor().isInitialized()) && failIfRegionMissing()) { // if the distributed system is disconnecting, don't send a reply saying - // the partitioned region can't be found (bug 36585) - Throwable thr = new ForceReattemptException( + // the partitioned region can't be found + return new ForceReattemptException( String.format("%s : could not find partitioned region with Id %s", distributionManager.getDistributionManagerId(), regionId)); - return thr; // reply sent in finally block below + // reply sent in finally block below } return null; } @@ -485,14 +485,16 @@ protected boolean failIfRegionMissing() { * @param r the region being operated on * @param processor the reply processor to be notified */ - public Set relayToListeners(Set cacheOpRecipients, Set adjunctRecipients, + public Set relayToListeners( + Set cacheOpRecipients, + Set adjunctRecipients, FilterRoutingInfo filterRoutingInfo, EntryEventImpl event, PartitionedRegion r, DirectReplyProcessor processor) { processorId = processor == null ? 0 : processor.getProcessorId(); notificationOnly = true; setFilterInfo(filterRoutingInfo); - Set failures1 = null; + Set failures1 = null; if (!adjunctRecipients.isEmpty()) { if (logger.isTraceEnabled(LogMarker.DM_VERBOSE)) { logger.trace(LogMarker.DM_VERBOSE, @@ -510,16 +512,11 @@ public Set relayToListeners(Set cacheOpRecipients, Set adjunctRecipients, * return a new reply processor for this class, for use in relaying a response. This must * be an instance method so subclasses can override it properly. */ - PartitionResponse createReplyProcessor(PartitionedRegion r, Set recipients) { + PartitionResponse createReplyProcessor(PartitionedRegion r, + Set recipients) { return new PartitionResponse(r.getSystem(), recipients); } - - protected boolean operateOnRegion(ClusterDistributionManager dm, PartitionedRegion pr) { - throw new InternalGemFireError( - "Sorry, use operateOnPartitionedRegion for PR messages"); - } - /** * An operation upon the messages partitioned region which each subclassing message must implement * @@ -629,8 +626,6 @@ protected short computeCompressedShort(short s) { public String toString() { StringBuilder buff = new StringBuilder(); String className = getClass().getName(); - // className.substring(className.lastIndexOf('.', className.lastIndexOf('.') - 1) + 1); // - // partition. more generic version buff.append(className.substring(className.indexOf(PN_TOKEN) + PN_TOKEN.length())); // partition. buff.append("(prid="); // make sure this is the first one buff.append(regionId); @@ -643,8 +638,6 @@ public String toString() { name = pr.getFullPath(); } } catch (Exception ignore) { - /* ignored */ - name = null; } if (name != null) { buff.append(" (name = \"").append(name).append("\")"); @@ -723,16 +716,14 @@ public boolean canParticipateInTransaction() { return true; } - protected boolean notifiesSerialGatewaySender(ClusterDistributionManager dm) { + protected boolean notifiesSerialGatewaySender() { try { PartitionedRegion pr = PartitionedRegion.getPRFromId(regionId); if (pr == null) { return false; } return pr.notifiesSerialGatewaySender(); - } catch (PRLocallyDestroyedException ignore) { - return false; - } catch (RuntimeException ignore) { + } catch (PRLocallyDestroyedException | RuntimeException ignore) { return false; } } @@ -760,11 +751,13 @@ public static class PartitionResponse extends DirectReplyProcessor { */ boolean responseRequired; - public PartitionResponse(InternalDistributedSystem dm, Set initMembers) { + public PartitionResponse(InternalDistributedSystem dm, + Set initMembers) { this(dm, initMembers, true); } - public PartitionResponse(InternalDistributedSystem dm, Set initMembers, boolean register) { + public PartitionResponse(InternalDistributedSystem dm, + Set initMembers, boolean register) { super(dm, initMembers); if (register) { register(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionFunctionStreamingMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionFunctionStreamingMessage.java index d3a15b6cf630..5e30c7d77058 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionFunctionStreamingMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionFunctionStreamingMessage.java @@ -49,8 +49,6 @@ public class PartitionedRegionFunctionStreamingMessage extends PartitionMessage private int replyMsgNum; - private Object result; - private FunctionRemoteContext context; public PartitionedRegionFunctionStreamingMessage() { @@ -153,7 +151,6 @@ protected void sendReply(InternalDistributedMember member, int procId, Distribut boolean lastResult, boolean sendResultsInOrder) { // if there was an exception, then throw out any data if (ex != null) { - this.result = null; replyMsgNum = 0; replyLastMsg = true; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionObserverAdapter.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionObserverAdapter.java index c99173c2dd07..1c844706d368 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionObserverAdapter.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionObserverAdapter.java @@ -17,18 +17,11 @@ import org.apache.geode.internal.cache.PartitionedRegion; /** - * This class provides 'do-nothing' implementations of all of the methods of interface + * This class provides 'do-nothing' implementations of all the methods of interface * PartitionedRegionObserver. See the documentation for class PartitionedRegionObserverHolder for * details. - * */ - public class PartitionedRegionObserverAdapter implements PartitionedRegionObserver { - - /** - * This callback is called just before calculating starting bucket id on datastore - */ - @Override public void beforeCalculatingStartingBucketId() {} diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionRebalanceOp.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionRebalanceOp.java index ade143a761b5..b470beb69149 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionRebalanceOp.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PartitionedRegionRebalanceOp.java @@ -116,7 +116,7 @@ public PartitionedRegionRebalanceOp(PartitionedRegion region, boolean simulate, /** * Create a rebalance operation for a single region. * - * @param region the region to rebalance + * @param targetRegion the region to rebalance * @param simulate true to only simulate rebalancing, without actually doing anything * @param replaceOfflineData true to replace offline copies of buckets with new live copies of * buckets @@ -126,16 +126,16 @@ public PartitionedRegionRebalanceOp(PartitionedRegion region, boolean simulate, * value to true then the rebalance will be cancelled * @param stats the ResourceManagerStats to use for rebalancing stats */ - public PartitionedRegionRebalanceOp(PartitionedRegion region, boolean simulate, + public PartitionedRegionRebalanceOp(PartitionedRegion targetRegion, boolean simulate, RebalanceDirector director, boolean replaceOfflineData, boolean isRebalance, AtomicBoolean cancelled, ResourceManagerStats stats) { - PartitionedRegion leader = ColocationHelper.getLeaderRegion(region); - Assert.assertTrue(leader != null); + PartitionedRegion leaderRegion = ColocationHelper.getLeaderRegion(targetRegion); + Assert.assertTrue(leaderRegion != null); // set the region we are rebalancing to be leader of the colocation group. - leaderRegion = leader; - targetRegion = region; + this.leaderRegion = leaderRegion; + this.targetRegion = targetRegion; this.simulate = simulate; this.director = director; this.cancelled = cancelled; @@ -188,14 +188,14 @@ public Set execute() { // TODO rebalance - we should really add a membership listener to ALL of // the colocated regions. leaderRegion.getRegionAdvisor().addMembershipListener(listener); - PartitionedRegionLoadModel loadModel = null; InternalCache cache = leaderRegion.getCache(); Map detailsMap = fetchDetails(cache); BucketOperatorWrapper serialOperator = getBucketOperator(detailsMap); ParallelBucketOperator parallelOperator = new ParallelBucketOperator(MAX_PARALLEL_OPERATIONS, cache.getDistributionManager().getExecutors().getWaitingThreadPool(), serialOperator); - loadModel = buildModel(parallelOperator, detailsMap, resourceManager); + PartitionedRegionLoadModel loadModel = + buildModel(parallelOperator, detailsMap, resourceManager); for (PartitionRebalanceDetailsImpl details : serialOperator.getDetailSet()) { details.setPartitionMemberDetailsBefore( loadModel.getPartitionedMemberDetails(details.getRegionPath())); @@ -349,18 +349,17 @@ public Set executeFPA() { // for primary and secondary. We are not creating extra bucket for any of peers // who goes down. - PartitionedRegionLoadModel model = null; Map detailsMap = fetchDetails(cache); BucketOperatorWrapper operator = getBucketOperator(detailsMap); - model = buildModel(operator, detailsMap, resourceManager); + PartitionedRegionLoadModel model = buildModel(operator, detailsMap, resourceManager); for (PartitionRebalanceDetailsImpl details : operator.getDetailSet()) { details.setPartitionMemberDetailsBefore( model.getPartitionedMemberDetails(details.getRegionPath())); } /* - * We haen't taken the distributed recovery lock as only this node is creating bucket for + * We haven't taken the distributed recovery lock as only this node is creating bucket for * itself. It will take bucket creation lock anyway. To move primary too, it has to see what * all bucket it can host as primary and make them. We don't need to do all the calculation * for fair balance between the nodes as this is a fixed partitioned region. @@ -420,9 +419,7 @@ private BucketOperatorWrapper getBucketOperator( } BucketOperator operator = simulate ? new SimulatedBucketOperator() : new BucketOperatorImpl(this); - BucketOperatorWrapper wrapper = - new BucketOperatorWrapper(operator, rebalanceDetails, stats, leaderRegion); - return wrapper; + return new BucketOperatorWrapper(operator, rebalanceDetails, stats, leaderRegion); } /** @@ -434,8 +431,6 @@ private PartitionedRegionLoadModel buildModel(BucketOperator operator, Map detailsMap, InternalResourceManager resourceManager) { PartitionedRegionLoadModel model; - final boolean isDebugEnabled = logger.isDebugEnabled(); - final DistributionManager dm = leaderRegion.getDistributionManager(); AddressComparor comparor = new AddressComparor() { @@ -456,7 +451,6 @@ public boolean enforceUniqueZones() { int totalNumberOfBuckets = leaderRegion.getTotalNumberOfBuckets(); Set criticalMembers = resourceManager.getResourceAdvisor().adviseCriticalMembers(); - boolean removeOverRedundancy = true; debug("Building Model for rebalancing " + leaderRegion + ". redundantCopies=" + redundantCopies + ", totalNumBuckets=" + totalNumberOfBuckets + ", criticalMembers=" + criticalMembers diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PersistentBucketRecoverer.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PersistentBucketRecoverer.java index 8789cc4f87e9..797ecbfcab33 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PersistentBucketRecoverer.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PersistentBucketRecoverer.java @@ -57,7 +57,7 @@ public class PersistentBucketRecoverer extends RecoveryRunnable implements Persi /** * True when one or more buckets have reported a change in status. */ - private volatile boolean membershipChanged = true; + private volatile boolean membershipChanged; /** * Sleep period between posting log entries. @@ -328,7 +328,7 @@ private PersistentMemberID createPersistentMemberID(PartitionedRegion region) { */ private Map> getMembersToWaitFor(boolean offlineOnly) throws RegionDestroyedException { - Map> waitingForMembers = + final Map> waitingForMembers = new HashMap<>(); @@ -352,12 +352,7 @@ private Map> getMembersToWaitFor(boolean offlin if (missingMembers != null) { for (PersistentMemberID missingMember : missingMembers) { - Set buckets = waitingForMembers.get(missingMember); - if (buckets == null) { - buckets = new TreeSet<>(); - waitingForMembers.put(missingMember, buckets); - } - buckets.add(bucketId); + waitingForMembers.computeIfAbsent(missingMember, k -> new TreeSet<>()).add(bucketId); } } } @@ -438,7 +433,7 @@ else if (!loggedDoneMessage) { } /** - * Get a consolodated set of all buckets that are waiting. + * Get a consolidated set of all buckets that are waiting. */ private Set getAllWaitingBuckets( Map> offlineMembers) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PrimaryRequestMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PrimaryRequestMessage.java index a40c0ca5f753..ed84b02aa1c6 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PrimaryRequestMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PrimaryRequestMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -51,8 +53,6 @@ public class PrimaryRequestMessage extends PartitionMessage { private static final Logger logger = LogService.getLogger(); - private static final long serialVersionUID = 1L; - /** The bucketId needing primary */ private int bucketId; @@ -61,22 +61,22 @@ public class PrimaryRequestMessage extends PartitionMessage { * * @param recipients those members which own the bucket * @param r the Partitioned Region which uses/owns the bucket - * @param bucketId the idenity of the bucket + * @param bucketId the identity of the bucket * @return a response object on which the caller waits for acknowledgement of which member is the * primary * @throws ForceReattemptException if the message was unable to be sent */ - public static PrimaryResponse send(Set recipients, PartitionedRegion r, int bucketId) + public static PrimaryResponse send(Set recipients, PartitionedRegion r, + int bucketId) throws ForceReattemptException { Assert.assertTrue(recipients != null, "PrimaryRequestMessage NULL recipient"); PrimaryResponse p = new PrimaryResponse(r.getSystem(), recipients); PrimaryRequestMessage m = new PrimaryRequestMessage(recipients, r.getPRId(), p, bucketId); m.setTransactionDistributed(r.getCache().getTxManager().isDistributed()); - Set failures = r.getDistributionManager().putOutgoing(m); - if (failures != null && failures.size() > 0) { - throw new ForceReattemptException( - String.format("Failed sending < %s >", m)); + Set failures = r.getDistributionManager().putOutgoing(m); + if (!isEmpty(failures)) { + throw new ForceReattemptException(String.format("Failed sending < %s >", m)); } return p; @@ -84,9 +84,10 @@ public static PrimaryResponse send(Set recipients, PartitionedRegion r, int buck public PrimaryRequestMessage() {} - private PrimaryRequestMessage(Set recipients, int regionId, ReplyProcessor21 processor, int bId) { + private PrimaryRequestMessage(Set recipients, int regionId, + ReplyProcessor21 processor, int bucketId) { super(recipients, regionId, processor); - bucketId = bId; + this.bucketId = bucketId; } @Override @@ -104,11 +105,9 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part } pr.checkReadiness(); - final boolean isPrimary; - // TODO, I am sure if this is the method to call to elect the primary -- mthomas 4/19/2007 - isPrimary = dm.getId().equals(pr.getBucketPrimary(bucketId)); - + final boolean isPrimary = dm.getId().equals(pr.getBucketPrimary(bucketId)); PrimaryRequestReplyMessage.sendReply(getSender(), getProcessorId(), isPrimary, dm); + return false; } @@ -137,11 +136,9 @@ public int getProcessorType() { } /** - * The reply to a PrimarRequestMessage, indicating if the sender is the primary + * The reply to a PrimaryRequestMessage, indicating if the sender is the primary */ public static class PrimaryRequestReplyMessage extends ReplyMessage { - private static final long serialVersionUID = 1L; - public volatile boolean isPrimary; protected static void sendReply(InternalDistributedMember member, int procId, boolean isPrimary, @@ -186,7 +183,8 @@ public void toData(DataOutput out, public static class PrimaryResponse extends ReplyProcessor21 { private volatile PrimaryRequestReplyMessage msg; - protected PrimaryResponse(InternalDistributedSystem ds, Set recipients) { + protected PrimaryResponse(InternalDistributedSystem ds, + Set recipients) { super(ds, recipients); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java index bf8dd2138ffb..e30fe12b749d 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutAllPRMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -32,8 +34,6 @@ import org.apache.geode.cache.EntryExistsException; import org.apache.geode.cache.Operation; import org.apache.geode.cache.RegionDestroyedException; -import org.apache.geode.cache.client.PoolFactory; -import org.apache.geode.distributed.DistributedMember; import org.apache.geode.distributed.internal.ClusterDistributionManager; import org.apache.geode.distributed.internal.DirectReplyProcessor; import org.apache.geode.distributed.internal.DistributionManager; @@ -64,6 +64,7 @@ import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID; import org.apache.geode.internal.cache.tier.sockets.VersionedObjectList; import org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException; +import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionTag; import org.apache.geode.internal.logging.log4j.LogMarker; import org.apache.geode.internal.offheap.annotations.Released; @@ -99,8 +100,6 @@ public class PutAllPRMessage extends PartitionMessageWithDirectReply { protected static final short HAS_BRIDGE_CONTEXT = UNRESERVED_FLAGS_START; protected static final short SKIP_CALLBACKS = (HAS_BRIDGE_CONTEXT << 1); - private transient InternalDistributedSystem internalDs; - /** whether direct-acknowledgement is desired */ private transient boolean directAck = false; @@ -131,9 +130,9 @@ public void addEntry(PutAllEntryData entry) { putAllPRData[putAllPRDataSize++] = entry; } - public void initMessage(PartitionedRegion r, Set recipients, boolean notifyOnly, + public void initMessage(PartitionedRegion r, Set recipients, + boolean notifyOnly, DirectReplyProcessor p) { - setInternalDs(r.getSystem()); setDirectAck(false); resetRecipients(); if (recipients != null) { @@ -167,8 +166,8 @@ public int getSize() { return putAllPRDataSize; } - public Set getKeys() { - Set keys = new HashSet(getSize()); + public Set getKeys() { + Set keys = new HashSet<>(getSize()); for (final PutAllEntryData putAllPRDatum : putAllPRData) { if (putAllPRDatum != null) { keys.add(putAllPRDatum.getKey()); @@ -186,11 +185,9 @@ public Set getKeys() { * indicate that no acknowledgement will be sent * @throws ForceReattemptException if the peer is no longer available */ - public PartitionResponse send(DistributedMember recipient, PartitionedRegion r) + public PartitionResponse send(InternalDistributedMember recipient, PartitionedRegion r) throws ForceReattemptException { - // Assert.assertTrue(recipient != null, "PutAllPRMessage NULL recipient"); recipient can be null - // for event notifications - Set recipients = Collections.singleton(recipient); + Set recipients = Collections.singleton(recipient); PutAllResponse p = new PutAllResponse(r.getSystem(), recipients); initMessage(r, recipients, false, p); setTransactionDistributed(r.getCache().getTxManager().isDistributed()); @@ -198,8 +195,8 @@ public PartitionResponse send(DistributedMember recipient, PartitionedRegion r) logger.debug("PutAllPRMessage.send: recipient is {}, msg is {}", recipient, this); } - Set failures = r.getDistributionManager().putOutgoing(this); - if (failures != null && failures.size() > 0) { + Set failures = r.getDistributionManager().putOutgoing(this); + if (!isEmpty(failures)) { throw new ForceReattemptException("Failed sending <" + this + ">"); } return p; @@ -267,7 +264,7 @@ public void toData(DataOutput out, hasTags = true; } - VersionTag tag = putAllPRData[i].versionTag; + VersionTag> tag = putAllPRData[i].versionTag; versionTags.add(tag); putAllPRData[i].versionTag = null; putAllPRData[i].toData(out, context); @@ -319,7 +316,6 @@ public EventID getEventID() { @Override protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion pr, long startTime) throws EntryExistsException, DataLocationException { - boolean sendReply = true; InternalDistributedMember eventSender = getSender(); @@ -331,9 +327,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part return false; } - if (sendReply) { - sendReply(getSender(), getProcessorId(), dm, null, pr, startTime); - } + sendReply(getSender(), getProcessorId(), dm, null, pr, startTime); return false; } @@ -371,11 +365,9 @@ protected Object clone() throws CloneNotSupportedException { public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, DataLocationException { - boolean didPut = false; - long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT; if (r.hasServerProxy()) { - clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout(); if (logger.isDebugEnabled()) { + final long clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout(); logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut); } } @@ -383,11 +375,10 @@ public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember even DistributedPutAllOperation dpao = null; @Released EntryEventImpl baseEvent = null; - BucketRegion bucketRegion = null; - PartitionedRegionDataStore ds = r.getDataStore(); - InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId(); + final PartitionedRegionDataStore ds = r.getDataStore(); + final InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId(); try { - + BucketRegion bucketRegion = null; if (!notificationOnly) { // bucketRegion is not null only when !notificationOnly bucketRegion = ds.getInitializedBucketForId(null, bucketId); @@ -441,8 +432,8 @@ public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember even } locked = bucketRegion.waitUntilLocked(keys); boolean lockedForPrimary = false; - final HashMap succeeded = new HashMap(); - PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize); + final HashMap succeeded = new HashMap<>(); + final PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize); Object key = keys[0]; try { bucketRegion.doLockForPrimary(false); @@ -470,6 +461,7 @@ public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember even // ev will be added into dpao in putLocally() // oldValue and real operation will be modified into ev in putLocally() // then in basicPutPart3(), the ev is added into dpao + boolean didPut; try { didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true); @@ -641,19 +633,10 @@ public static EntryEventImpl getEventFromEntry(InternalRegion r, InternalDistrib } } - // override reply processor type from PartitionMessage - PartitionResponse createReplyProcessor(PartitionedRegion r, Set recipients, Object key) { - return new PutAllResponse(r.getSystem(), recipients); - } - // override reply message type from PartitionMessage @Override protected void sendReply(InternalDistributedMember member, int procId, DistributionManager dm, ReplyException ex, PartitionedRegion pr, long startTime) { - // if (!result && getOperation().isCreate()) { - // System.err.println("DEBUG: put returning false. ifNew=" + ifNew - // +" ifOld="+ifOld + " message=" + this); - // } if (pr != null) { if (startTime > 0) { pr.getPrStats().endPartitionMessagesProcessing(startTime); @@ -683,17 +666,13 @@ protected void appendFields(StringBuilder buff) { } } - public void setInternalDs(InternalDistributedSystem internalDs) { - this.internalDs = internalDs; - } - public void setDirectAck(boolean directAck) { this.directAck = directAck; } @Override protected boolean mayNotifySerialGatewaySender(ClusterDistributionManager dm) { - return notifiesSerialGatewaySender(dm); + return notifiesSerialGatewaySender(); } @Override @@ -712,8 +691,6 @@ public String toString() { name = pr.getFullPath(); } } catch (Exception ignore) { - /* ignored */ - name = null; } if (name != null) { buff.append(" (name = \"").append(name).append("\")"); @@ -813,7 +790,7 @@ public void toData(DataOutput out, @Override public String toString() { - return "PutAllReplyMessage " + "processorid=" + processorId + return "PutAllReplyMessage " + "processorId=" + processorId + " returning " + result + " exception=" + getException() + " versions= " + versions; } @@ -829,7 +806,7 @@ public static class PutAllResponse extends PartitionResponse { private volatile boolean returnValue; private VersionedObjectList versions; - public PutAllResponse(InternalDistributedSystem ds, Set recipients) { + public PutAllResponse(InternalDistributedSystem ds, Set recipients) { super(ds, recipients, false); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java index 1fe87af841d5..07aa92372ae9 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/PutMessage.java @@ -24,6 +24,7 @@ import java.util.Set; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; import org.apache.geode.DataSerializer; import org.apache.geode.InvalidDeltaException; @@ -366,7 +367,7 @@ protected Object clone() throws CloneNotSupportedException { * indicate that no acknowledgement will be sent * @throws ForceReattemptException if the peer is no longer available */ - public static PartitionResponse send(DistributedMember recipient, PartitionedRegion r, + public static @NotNull PartitionResponse send(DistributedMember recipient, PartitionedRegion r, EntryEventImpl event, final long lastModified, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue) throws ForceReattemptException { // Assert.assertTrue(recipient != null, "PutMessage NULL recipient"); recipient can be null for @@ -839,7 +840,7 @@ public void setInternalDs(InternalDistributedSystem internalDs) { @Override protected boolean mayNotifySerialGatewaySender(ClusterDistributionManager dm) { - return notifiesSerialGatewaySender(dm); + return notifiesSerialGatewaySender(); } public static class PutReplyMessage extends ReplyMessage implements OldValueImporter { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/QueryMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/QueryMessage.java index 885f2568a552..16a7d10e9734 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/QueryMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/QueryMessage.java @@ -46,6 +46,7 @@ import org.apache.geode.distributed.internal.ReplyProcessor21; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.distributed.internal.streaming.StreamingOperation.StreamingReplyMessage; +import org.apache.geode.internal.InternalDataSerializer; import org.apache.geode.internal.NanoTimer; import org.apache.geode.internal.cache.ForceReattemptException; import org.apache.geode.internal.cache.PRQueryProcessor; @@ -63,13 +64,13 @@ public class QueryMessage extends StreamingPartitionOperation.StreamingPartition private volatile String queryString; private volatile boolean cqQuery; private volatile Object[] parameters; - private volatile List buckets; + private volatile List buckets; private volatile boolean isPdxSerialized; private volatile boolean traceOn; - private final List resultCollector = new ArrayList<>(); - private Iterator currentResultIterator; - private Iterator currentSelectResultIterator; + private final List> resultCollector = new ArrayList<>(); + private Iterator currentResultIterator; + private Iterator> currentSelectResultIterator; private boolean isTraceInfoIteration = false; private boolean isStructType = false; @@ -81,7 +82,7 @@ public QueryMessage() { } public QueryMessage(InternalDistributedMember recipient, int regionId, ReplyProcessor21 processor, - DefaultQuery query, Object[] parameters, final List buckets) { + DefaultQuery query, Object[] parameters, final List buckets) { super(recipient, regionId, processor); queryString = query.getQueryString(); this.buckets = buckets; @@ -91,11 +92,11 @@ public QueryMessage(InternalDistributedMember recipient, int regionId, ReplyProc } /** - * Provide results to send back to requestor. terminate by returning END_OF_STREAM token object + * Provide results to send back to requester. terminate by returning END_OF_STREAM token object */ @Override protected Object getNextReplyObject(PartitionedRegion pr) - throws CacheException, ForceReattemptException, InterruptedException { + throws CacheException, InterruptedException { final boolean isDebugEnabled = logger.isDebugEnabled(); if (QueryMonitor.isLowMemory()) { @@ -113,7 +114,7 @@ protected Object getNextReplyObject(PartitionedRegion pr) if (isTraceInfoIteration && currentResultIterator != null) { isTraceInfoIteration = false; } - Collection results = currentSelectResultIterator.next(); + Collection results = currentSelectResultIterator.next(); if (isDebugEnabled) { logger.debug("Query result size: {}", results.size()); } @@ -182,7 +183,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part query.setRemoteQuery(true); QueryObserver indexObserver = query.startTrace(); boolean isQueryTraced = false; - List queryTraceList = null; + List queryTraceList = null; try { query.setIsCqQuery(cqQuery); @@ -208,7 +209,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part } isStructType = qp.executeQuery(resultCollector); - // Add the trace info list object after the NWayMergeResults is created so as to exclude it + // Add the trace info list object after the NWayMergeResults is created to exclude it // from the sorted collection of NWayMergeResults if (isQueryTraced) { resultCollector.add(0, queryTraceList); @@ -216,7 +217,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part currentSelectResultIterator = resultCollector.iterator(); // If trace is enabled, we will generate a trace object to send back. The time info will be - // slightly different than the one logged on this node due to generating the trace object + // slightly different from the one logged on this node due to generating the trace object // information here rather than the finally block. if (isQueryTraced) { if (DefaultQuery.testHook != null) { @@ -234,13 +235,14 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part // created the indexes used string if (indexObserver instanceof IndexTrackingQueryObserver) { - Map indexesUsed = ((IndexTrackingQueryObserver) indexObserver).getUsedIndexes(); + Map indexesUsed = ((IndexTrackingQueryObserver) indexObserver).getUsedIndexes(); StringBuilder sb = new StringBuilder(); sb.append(" indexesUsed(").append(indexesUsed.size()).append(")"); if (indexesUsed.size() > 0) { sb.append(":"); - for (Iterator itr = indexesUsed.entrySet().iterator(); itr.hasNext();) { - Map.Entry entry = (Map.Entry) itr.next(); + for (Iterator> itr = indexesUsed.entrySet().iterator(); itr + .hasNext();) { + Map.Entry entry = itr.next(); sb.append(entry.getKey()).append(entry.getValue()); if (itr.hasNext()) { sb.append(","); @@ -277,7 +279,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part @Override protected void appendFields(StringBuilder buff) { super.appendFields(buff); - buff.append("; query=").append(queryString).append("; bucketids=").append(buckets); + buff.append("; query=").append(queryString).append("; buckets=").append(buckets); } @Override @@ -314,7 +316,7 @@ public void fromData(DataInput in, DeserializationContext context) throws IOException, ClassNotFoundException { super.fromData(in, context); queryString = DataSerializer.readString(in); - buckets = DataSerializer.readArrayList(in); + buckets = InternalDataSerializer.readList(in); parameters = DataSerializer.readObjectArray(in); cqQuery = DataSerializer.readBoolean(in); isPdxSerialized = DataSerializer.readBoolean(in); @@ -326,7 +328,7 @@ public void toData(DataOutput out, SerializationContext context) throws IOException { super.toData(out, context); DataSerializer.writeString(queryString, out); - DataSerializer.writeArrayList((ArrayList) buckets, out); + InternalDataSerializer.writeList(buckets, out); DataSerializer.writeObjectArray(parameters, out); DataSerializer.writeBoolean(cqQuery, out); DataSerializer.writeBoolean(true, out); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java index b19c263cb1d6..17554c46bd32 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RegionAdvisor.java @@ -38,6 +38,8 @@ import java.util.function.Predicate; import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import org.apache.geode.DataSerializable; import org.apache.geode.DataSerializer; @@ -129,15 +131,15 @@ public synchronized void initializeRegionAdvisor() { } PartitionedRegion p = getPartitionedRegion(); int numBuckets = p.getAttributes().getPartitionAttributes().getTotalNumBuckets(); - ProxyBucketRegion[] bucs = new ProxyBucketRegion[numBuckets]; + ProxyBucketRegion[] buckets = new ProxyBucketRegion[numBuckets]; InternalRegionArguments args = new InternalRegionArguments(); args.setPartitionedRegionAdvisor(this); - for (int i = 0; i < bucs.length; i++) { - bucs[i] = new ProxyBucketRegion(i, p, args); - bucs[i].initialize(); + for (int i = 0; i < buckets.length; i++) { + buckets[i] = new ProxyBucketRegion(i, p, args); + buckets[i].initialize(); } - buckets = bucs; + this.buckets = buckets; } /** @@ -147,12 +149,11 @@ public synchronized void initializeRegionAdvisor() { */ public void processProfilesQueuedDuringInitialization() { synchronized (preInitQueueMonitor) { - Iterator pi = preInitQueue.iterator(); + Iterator pi = preInitQueue.iterator(); boolean finishedInitQueue = false; try { while (pi.hasNext()) { - Object o = pi.next(); - QueuedBucketProfile qbp = (QueuedBucketProfile) o; + QueuedBucketProfile qbp = pi.next(); if (!qbp.isRemoval) { if (logger.isTraceEnabled(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE)) { logger.trace(LogMarker.DISTRIBUTION_ADVISOR_VERBOSE, @@ -238,10 +239,9 @@ public Map> getAllClientBucketProfiles() { Map> bucketToServerLocations = new HashMap<>(); for (Integer bucketId : clientBucketProfilesMap.keySet()) { ArrayList clientBucketProfiles = new ArrayList<>(); - for (BucketProfile profile : clientBucketProfilesMap.get(bucketId)) { + for (ServerBucketProfile profile : clientBucketProfilesMap.get(bucketId)) { if (profile.isHosting) { - ServerBucketProfile cProfile = (ServerBucketProfile) profile; - Set bucketServerLocations = cProfile.getBucketServerLocations(); + Set bucketServerLocations = profile.getBucketServerLocations(); // Either we can make BucketServeLocation having ServerLocation with them // Or we can create bucketServerLocation as it is by iterating over the set of servers clientBucketProfiles.addAll(bucketServerLocations); @@ -296,7 +296,7 @@ public ConcurrentHashMap> getAllClientBucketPr } } if (logger.isDebugEnabled()) { - logger.debug("This maps is sksk {} and size is {}", map, map.keySet().size()); + logger.debug("This maps is sksk {} and size is {}", map, map.size()); } return map; } @@ -459,7 +459,7 @@ public void markBucketsOnMember(DistributedMember member, boolean sick) { return; } for (int i = 0; i < buckets.length; i++) { - if (sick && !buckets[i].getBucketOwners().contains(member)) { + if (sick && !buckets[i].getBucketOwners().contains((InternalDistributedMember) member)) { continue; } buckets[i].setBucketSick(member, sick); @@ -476,7 +476,7 @@ public void updateBucketStatus(int bucketId, DistributedMember member, boolean p } else { ResourceAdvisor advisor = getPartitionedRegion().getCache().getResourceAdvisor(); - boolean sick = advisor.adviseCriticalMembers().contains(member); + boolean sick = advisor.adviseCriticalMembers().contains((InternalDistributedMember) member); if (logger.isDebugEnabled()) { logger.debug("updateBucketStatus:({}):member:{}:sick:{}", getPartitionedRegion().bucketStringForLogs(bucketId), member, sick); @@ -669,11 +669,11 @@ public void waitForProfileStatus(int status) { int memberNum; String regionName = getPartitionedRegion().getFullPath(); do { - Region pr = getPartitionedRegion().getCache().getRegion(regionName); + Region pr = getPartitionedRegion().getCache().getRegion(regionName); if (pr == null || pr.isDestroyed()) { break; } - Set members = adviseNotAtShutDownAllStatus(status); + Set members = adviseNotAtShutDownAllStatus(status); memberNum = members.size(); if (memberNum > 0) { if (logger.isDebugEnabled()) { @@ -863,7 +863,7 @@ public Set adviseAllPRNodes() { }); } - Set adviseAllServersWithInterest() { + Set adviseAllServersWithInterest() { return adviseFilter(profile -> { CacheProfile prof = (CacheProfile) profile; return prof.hasCacheServer && prof.filterProfile != null @@ -1114,7 +1114,7 @@ public int size() { } @Override - public Iterator iterator() { + public @NotNull Iterator iterator() { return new BucketSetIterator(); } @@ -1299,7 +1299,7 @@ public void notPrimary(int bucketId, InternalDistributedMember wasPrimary) { * * @return set of InternalDistributedMember ids */ - public Set advisePrimaryOwners() { + public Set advisePrimaryOwners() { ProxyBucketRegion[] bucs = buckets; HashSet hs = new HashSet<>(); for (int i = 0; i < bucs.length; i++) { @@ -1513,7 +1513,8 @@ public int getCreatedBucketsCount() { * @return a list of BucketProfileAndId instances; may be null * @since GemFire 5.5 */ - public ArrayList getBucketRegionProfiles() { + @Nullable + public List getBucketRegionProfiles() { final ProxyBucketRegion[] bucs = buckets; if (bucs == null) { return null; @@ -1539,7 +1540,7 @@ public ArrayList getBucketRegionProfiles() { * * @since GemFire 5.5 */ - public void putBucketRegionProfiles(ArrayList l) { + public void putBucketRegionProfiles(List l) { for (BucketProfileAndId bp : l) { int id = bp.getId(); getBucket(id).getBucketAdvisor().putProfile(bp.getBucketProfile()); @@ -1633,7 +1634,7 @@ private class ProfileShutdownListener implements ProfileListener { private boolean profileChanged = false; void waitForChange() { - Region pr = getPartitionedRegion(); + PartitionedRegion pr = getPartitionedRegion(); synchronized (this) { while (!profileChanged && pr != null && !pr.isDestroyed()) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java index ccf37197d212..dd2366529679 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveAllPRMessage.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned; +import static org.apache.commons.lang3.ObjectUtils.isEmpty; + import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -32,8 +34,6 @@ import org.apache.geode.cache.EntryNotFoundException; import org.apache.geode.cache.Operation; import org.apache.geode.cache.RegionDestroyedException; -import org.apache.geode.cache.client.PoolFactory; -import org.apache.geode.distributed.DistributedMember; import org.apache.geode.distributed.internal.ClusterDistributionManager; import org.apache.geode.distributed.internal.DirectReplyProcessor; import org.apache.geode.distributed.internal.DistributionManager; @@ -64,6 +64,7 @@ import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID; import org.apache.geode.internal.cache.tier.sockets.VersionedObjectList; import org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException; +import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionTag; import org.apache.geode.internal.logging.log4j.LogMarker; import org.apache.geode.internal.offheap.annotations.Released; @@ -130,7 +131,8 @@ public void addEntry(RemoveAllEntryData entry) { removeAllPRData[removeAllPRDataSize++] = entry; } - public void initMessage(PartitionedRegion r, Set recipients, boolean notifyOnly, + public void initMessage(PartitionedRegion r, Set recipients, + boolean notifyOnly, DirectReplyProcessor p) { setInternalDs(r.getSystem()); setDirectAck(false); @@ -161,8 +163,8 @@ public int getSize() { return removeAllPRDataSize; } - public Set getKeys() { - Set keys = new HashSet(getSize()); + public Set getKeys() { + Set keys = new HashSet<>(getSize()); for (final RemoveAllEntryData removeAllPRDatum : removeAllPRData) { if (removeAllPRDatum != null) { keys.add(removeAllPRDatum.getKey()); @@ -180,11 +182,11 @@ public Set getKeys() { * that no acknowledgement will be sent * @throws ForceReattemptException if the peer is no longer available */ - public PartitionResponse send(DistributedMember recipient, PartitionedRegion r) + public PartitionResponse send(InternalDistributedMember recipient, PartitionedRegion r) throws ForceReattemptException { // Assert.assertTrue(recipient != null, "RemoveAllPRMessage NULL recipient"); recipient can be // null for event notifications - Set recipients = Collections.singleton(recipient); + Set recipients = Collections.singleton(recipient); RemoveAllResponse p = new RemoveAllResponse(r.getSystem(), recipients); initMessage(r, recipients, false, p); setTransactionDistributed(r.getCache().getTxManager().isDistributed()); @@ -192,8 +194,8 @@ public PartitionResponse send(DistributedMember recipient, PartitionedRegion r) logger.debug("RemoveAllPRMessage.send: recipient is {}, msg is {}", recipient, this); } - Set failures = r.getDistributionManager().putOutgoing(this); - if (failures != null && failures.size() > 0) { + Set failures = r.getDistributionManager().putOutgoing(this); + if (!isEmpty(failures)) { throw new ForceReattemptException("Failed sending <" + this + ">"); } return p; @@ -260,7 +262,7 @@ public void toData(DataOutput out, hasTags = true; } - VersionTag tag = removeAllPRData[i].versionTag; + VersionTag> tag = removeAllPRData[i].versionTag; versionTags.add(tag); removeAllPRData[i].versionTag = null; removeAllPRData[i].serializeTo(out, context); @@ -312,9 +314,7 @@ public EventID getEventID() { @Override protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion r, long startTime) throws EntryExistsException, DataLocationException { - boolean sendReply = true; - - InternalDistributedMember eventSender = getSender(); + final InternalDistributedMember eventSender = getSender(); try { result = doLocalRemoveAll(r, eventSender, true); @@ -323,9 +323,7 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part return false; } - if (sendReply) { - sendReply(getSender(), getProcessorId(), dm, null, r, startTime); - } + sendReply(getSender(), getProcessorId(), dm, null, r, startTime); return false; } @@ -356,18 +354,16 @@ protected Object clone() throws CloneNotSupportedException { * * @param r partitioned region * @param eventSender the endpoint server who received request from client - * @param cacheWrite if true invoke cacheWriter before desrtoy + * @param cacheWrite if true invoke cacheWriter before destroy * @return If succeeds, return true, otherwise, throw exception */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IMSE_DONT_CATCH_IMSE") public boolean doLocalRemoveAll(PartitionedRegion r, InternalDistributedMember eventSender, boolean cacheWrite) throws EntryExistsException, DataLocationException { - boolean didRemove = false; - long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT; if (r.hasServerProxy()) { - clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout(); if (logger.isDebugEnabled()) { + long clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout(); logger.debug("RemoveAllPRMessage: doLocalRemoveAll: clientReadTimeOut is {}", clientReadTimeOut); } @@ -376,11 +372,11 @@ public boolean doLocalRemoveAll(PartitionedRegion r, InternalDistributedMember e DistributedRemoveAllOperation op = null; @Released EntryEventImpl baseEvent = null; - BucketRegion bucketRegion = null; - PartitionedRegionDataStore ds = r.getDataStore(); - InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId(); + final PartitionedRegionDataStore ds = r.getDataStore(); + final InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId(); try { + BucketRegion bucketRegion = null; if (!notificationOnly) { // bucketRegion is not null only when !notificationOnly bucketRegion = ds.getInitializedBucketForId(null, bucketId); @@ -458,6 +454,7 @@ public boolean doLocalRemoveAll(PartitionedRegion r, InternalDistributedMember e // ev will be added into the op in removeLocally() // real operation will be modified into ev in removeLocally() // then in basicPutPart3(), the ev is added into op + boolean didRemove; try { r.getDataView().destroyOnRemote(ev, cacheWrite, null); didRemove = true; @@ -478,7 +475,7 @@ public boolean doLocalRemoveAll(PartitionedRegion r, InternalDistributedMember e if (ev.getVersionTag() == null) { if (logger.isDebugEnabled()) { logger.debug( - "doLocalRemoveAll:RemoveAll encoutered EntryNotFoundException: event={}", + "doLocalRemoveAll:RemoveAll encountered EntryNotFoundException: event={}", ev); } } @@ -632,11 +629,6 @@ public static EntryEventImpl getEventFromEntry(InternalRegion r, InternalDistrib } } - // override reply processor type from PartitionMessage - PartitionResponse createReplyProcessor(PartitionedRegion r, Set recipients, Object key) { - return new RemoveAllResponse(r.getSystem(), recipients); - } - // override reply message type from PartitionMessage @Override protected void sendReply(InternalDistributedMember member, int procId, DistributionManager dm, @@ -664,7 +656,7 @@ protected void appendFields(StringBuilder buff) { buff.append("; directAck=").append(directAck); for (int i = 0; i < removeAllPRDataSize; i++) { - buff.append("; entry" + i + ":").append(removeAllPRData[i].getKey()).append(",") + buff.append("; entry").append(i).append(":").append(removeAllPRData[i].getKey()).append(",") .append(removeAllPRData[i].versionTag); } } @@ -683,7 +675,7 @@ public void setDirectAck(boolean directAck) { @Override protected boolean mayNotifySerialGatewaySender(ClusterDistributionManager dm) { - return notifiesSerialGatewaySender(dm); + return notifiesSerialGatewaySender(); } public static class RemoveAllReplyMessage extends ReplyMessage { @@ -769,7 +761,7 @@ public void toData(DataOutput out, @Override public String toString() { - return "RemoveAllReplyMessage " + "processorid=" + processorId + return "RemoveAllReplyMessage " + "processorId=" + processorId + " returning " + result + " exception=" + getException() + " versions= " + versions; } @@ -785,7 +777,8 @@ public static class RemoveAllResponse extends PartitionResponse { private volatile boolean returnValue; private VersionedObjectList versions; - public RemoveAllResponse(InternalDistributedSystem ds, Set recipients) { + public RemoveAllResponse(InternalDistributedSystem ds, + Set recipients) { super(ds, recipients, false); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveBucketMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveBucketMessage.java index 8bd2aa8c4a85..59f5e3c1f923 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveBucketMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/RemoveBucketMessage.java @@ -80,7 +80,7 @@ public static RemoveBucketResponse send(InternalDistributedMember recipient, Assert.assertTrue(recipient != null, "RemoveBucketMessage NULL recipient"); - RemoveBucketResponse response = new RemoveBucketResponse(region.getSystem(), recipient, region); + RemoveBucketResponse response = new RemoveBucketResponse(region.getSystem(), recipient); RemoveBucketMessage msg = new RemoveBucketMessage(recipient, region.getPRId(), response, bucketId, forceRemovePrimary); msg.setTransactionDistributed(region.getCache().getTxManager().isDistributed()); @@ -220,7 +220,7 @@ public void fromData(DataInput in, @Override public String toString() { - return "RemoveBucketReplyMessage " + "processorid=" + processorId + return "RemoveBucketReplyMessage " + "processorId=" + processorId + " removed=" + removed + " reply to sender " + getSender(); } @@ -233,8 +233,7 @@ public static class RemoveBucketResponse extends PartitionResponse { private volatile boolean removed = false; - public RemoveBucketResponse(InternalDistributedSystem ds, InternalDistributedMember recipient, - PartitionedRegion theRegion) { + public RemoveBucketResponse(InternalDistributedSystem ds, InternalDistributedMember recipient) { super(ds, recipient); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java index 3b3c8bb1de89..fe1c333530d8 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizeMessage.java @@ -68,7 +68,7 @@ public SizeMessage() {} private transient boolean estimate; /** - * The message sent to a set of {@link InternalDistributedMember}s to caculate the number of + * The message sent to a set of {@link InternalDistributedMember}s to calculate the number of * Entries in each of their buckets * * @param recipients members to receive the message @@ -76,7 +76,8 @@ public SizeMessage() {} * @param processor the reply processor used to wait on the response * @param bucketIds the list of bucketIds to get the size for or null for all buckets */ - private SizeMessage(Set recipients, int regionId, ReplyProcessor21 processor, + private SizeMessage(Set recipients, int regionId, + ReplyProcessor21 processor, ArrayList bucketIds, boolean estimate) { super(recipients, regionId, processor); if (bucketIds != null && bucketIds.isEmpty()) { @@ -95,7 +96,8 @@ private SizeMessage(Set recipients, int regionId, ReplyProcessor21 processor, * @param r the local PartitionedRegion instance * @param bucketIds the buckets to look for, or null for all buckets */ - public static SizeResponse send(Set recipients, PartitionedRegion r, ArrayList bucketIds, + public static SizeResponse send(Set recipients, PartitionedRegion r, + ArrayList bucketIds, boolean estimate) { Assert.assertTrue(recipients != null, "SizeMessage NULL recipients set"); SizeResponse p = new SizeResponse(r.getSystem(), recipients); @@ -138,11 +140,12 @@ protected short computeCompressedShort(short s) { } @Override - protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, PartitionedRegion r, + protected boolean operateOnPartitionedRegion(ClusterDistributionManager distributionManager, + PartitionedRegion region, long startTime) throws CacheException, ForceReattemptException { Map sizes; - if (r != null) { - PartitionedRegionDataStore ds = r.getDataStore(); + if (region != null) { + PartitionedRegionDataStore ds = region.getDataStore(); if (ds != null) { // datastore exists if (bucketIds != null) { if (estimate) { @@ -157,15 +160,14 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part sizes = ds.getSizeForLocalBuckets(); } } - r.getPrStats().endPartitionMessagesProcessing(startTime); - SizeReplyMessage.send(getSender(), getProcessorId(), dm, sizes); + region.getPrStats().endPartitionMessagesProcessing(startTime); + SizeReplyMessage.send(getSender(), getProcessorId(), distributionManager, sizes); } // datastore exists else { logger.warn("SizeMessage: data store not configured for this member"); ReplyMessage.send(getSender(), getProcessorId(), - new ReplyException(new ForceReattemptException( - "no datastore here")), - dm, r.isInternalRegion()); + new ReplyException(new ForceReattemptException("no datastore here")), + distributionManager, region.isInternalRegion()); } } else { if (logger.isDebugEnabled()) { @@ -176,8 +178,8 @@ protected boolean operateOnPartitionedRegion(ClusterDistributionManager dm, Part ReplyMessage.send(getSender(), getProcessorId(), new ReplyException(new ForceReattemptException( String.format("%s : could not find partitioned region with Id %s", - dm.getDistributionManagerId(), regionId))), - dm, r != null && r.isInternalRegion()); + distributionManager.getDistributionManagerId(), regionId))), + distributionManager, false); } // Unless there was an exception thrown, this message handles sending the // response @@ -281,7 +283,7 @@ public void fromData(DataInput in, @Override public String toString() { - return getClass().getName() + " processorid=" + processorId + return getClass().getName() + " processorId=" + processorId + " reply to sender " + getSender() + " returning bucketSizes.size=" + getBucketSizes().size(); } @@ -300,12 +302,13 @@ public Map getBucketSizes() { public static class SizeResponse extends ReplyProcessor21 { private final HashMap returnValue = new HashMap<>(); - public SizeResponse(InternalDistributedSystem ds, Set recipients) { - super(ds, recipients); + public SizeResponse(InternalDistributedSystem distributedSystem, + Set recipients) { + super(distributedSystem, recipients); } /** - * The SizeResponse processor ignores remote exceptions by implmenting this method. Ignoring + * The SizeResponse processor ignores remote exceptions by implementing this method. Ignoring * remote exceptions is acceptable since the SizeMessage is sent to all Nodes and all * {@link SizeMessage.SizeReplyMessage}s are processed for each individual bucket size. The hope * is that any failure due to an exception will be covered by healthy Nodes. diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizedBasedLoadProbe.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizedBasedLoadProbe.java index 576fbaaf21c1..5f8b78869cd8 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizedBasedLoadProbe.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/SizedBasedLoadProbe.java @@ -34,7 +34,6 @@ */ public class SizedBasedLoadProbe implements LoadProbe, DataSerializableFixedID { private static final long serialVersionUID = 7040814060882774875L; - // TODO rebalancing come up with a better threshold for minumum bucket size? public static final int MIN_BUCKET_SIZE = Integer.getInteger(GeodeGlossary.GEMFIRE_PREFIX + "MIN_BUCKET_SIZE", 1); @@ -78,7 +77,6 @@ public int getDSFID() { @Override public KnownVersion[] getSerializationVersions() { - // TODO Auto-generated method stub return null; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperator.java index adc17a835e16..09e77bfd46e6 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperator.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperator.java @@ -27,7 +27,7 @@ public interface BucketOperator { /** * Create a redundancy copy of a bucket on a given node. This call may be asynchronous, it will - * notify the completion when the the operation is done. + * notify the completion when the operation is done. * * Note that the completion is not required to be threadsafe, so implementors should ensure the * completion is invoked by the calling thread of createRedundantBucket, usually by invoking the @@ -73,12 +73,12 @@ boolean movePrimary(InternalDistributedMember source, InternalDistributedMember /** * Wait for any pending asynchronous operations that this thread submitted earlier to complete. - * Currently only createRedundantBucket may be asynchronous. + * Only createRedundantBucket may be asynchronous. */ void waitForOperations(); /** - * Callbacks for asnychonous operations. These methods will be invoked when an ansynchronous + * Callbacks for asynchronous operations. These methods will be invoked when an asynchronous * operation finishes. * * The completions are NOT THREADSAFE. diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperatorWrapper.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperatorWrapper.java index 1697eaabc954..42d9312f07c3 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperatorWrapper.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/BucketOperatorWrapper.java @@ -110,7 +110,7 @@ public void onSuccess() { for (PartitionRebalanceDetailsImpl details : detailSet) { String regionPath = details.getRegionPath(); Long lrb = colocatedRegionBytes.get(regionPath); - if (lrb != null) { // region could have gone away - esp during shutdow + if (lrb != null) { // region could have gone away - esp during shutdown long regionBytes = lrb; // Only add the elapsed time to the leader region. details.incCreates(regionBytes, details.getRegion().equals(leaderRegion) ? elapsed : 0); @@ -167,7 +167,7 @@ public boolean removeBucket(InternalDistributedMember targetMember, int i, for (PartitionRebalanceDetailsImpl details : detailSet) { String regionPath = details.getRegionPath(); Long lrb = colocatedRegionBytes.get(regionPath); - if (lrb != null) { // region could have gone away - esp during shutdow + if (lrb != null) { // region could have gone away - esp during shutdown long regionBytes = lrb; // Only add the elapsed time to the leader region. details.incRemoves(regionBytes, details.getRegion().equals(leaderRegion) ? elapsed : 0); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ExplicitMoveDirector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ExplicitMoveDirector.java index 5879d63d77a2..2b581fb1c440 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ExplicitMoveDirector.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ExplicitMoveDirector.java @@ -33,7 +33,7 @@ public class ExplicitMoveDirector extends RebalanceDirectorAdapter { private final InternalDistributedMember source; private final InternalDistributedMember target; private final Object key; - private final InternalDistributedSystem ds; + private final InternalDistributedSystem distributedSystem; public ExplicitMoveDirector(Object key, int bucketId, DistributedMember source, @@ -42,7 +42,7 @@ public ExplicitMoveDirector(Object key, int bucketId, DistributedMember source, this.bucketId = bucketId; this.source = (InternalDistributedMember) source; this.target = (InternalDistributedMember) target; - ds = (InternalDistributedSystem) distributedSystem; + this.distributedSystem = (InternalDistributedSystem) distributedSystem; } @Override @@ -89,14 +89,15 @@ public boolean nextStep() { if (reason.willAccept()) { if (!model.moveBucket(new Move(sourceMember, targetMember, bucket))) { // Double check to see if the source or destination have left the DS - Set allMembers = ds.getDistributionManager().getDistributionManagerIdsIncludingAdmin(); - if (!allMembers.contains(sourceMember)) { + Set allMembers = + distributedSystem.getDistributionManager().getDistributionManagerIdsIncludingAdmin(); + if (!allMembers.contains(sourceMember.getDistributedMember())) { throw new IllegalStateException( String.format( "Source member does not exist or is not a data store for the partitioned region %s: %s", model.getName(), source)); } - if (!allMembers.contains(targetMember)) { + if (!allMembers.contains(targetMember.getDistributedMember())) { throw new IllegalStateException( String.format( "Target member does not exist or is not a data store for the partitioned region %s: %s", diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ParallelBucketOperator.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ParallelBucketOperator.java index 2b4fee434a3f..2a3fef476bb6 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ParallelBucketOperator.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/ParallelBucketOperator.java @@ -46,16 +46,14 @@ public class ParallelBucketOperator implements BucketOperator { private final ExecutorService executor; private final Semaphore operationSemaphore; private final int maxParallelOperations; - private final ConcurrentLinkedQueue pendingSuccess = - new ConcurrentLinkedQueue<>(); - private final ConcurrentLinkedQueue pendingFailure = - new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue pendingSuccess = new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue pendingFailure = new ConcurrentLinkedQueue<>(); /** * Create a parallel bucket operator * - * @param maxParallelOperations The number of operations that can execute concurrently. Futher + * @param maxParallelOperations The number of operations that can execute concurrently. Additional * calls to createRedundantBucket will block. * @param executor the executor to submit tasks to. This executor should be able to create at * least maxParallelOperations threads. @@ -98,10 +96,7 @@ public void onFailure() { pendingFailure.add(completion); } }); - } catch (CancelException e) { - // ignore - } catch (RegionDestroyedException e) { - // ignore + } catch (CancelException | RegionDestroyedException ignored) { } finally { operationSemaphore.release(); } @@ -129,7 +124,7 @@ public boolean movePrimary(InternalDistributedMember source, InternalDistributed } public void drainCompletions() { - Completion next = null; + Completion next; while ((next = pendingSuccess.poll()) != null) { next.onSuccess(); } @@ -141,7 +136,7 @@ public void drainCompletions() { } /** - * Wait for any pending operations, and notify the the completions that the operations and done. + * Wait for any pending operations, and notify the completions that the operations and done. */ @Override public void waitForOperations() { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/PercentageMoveDirector.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/PercentageMoveDirector.java index c066028cf643..2e51e010cc51 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/PercentageMoveDirector.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/PercentageMoveDirector.java @@ -33,7 +33,8 @@ * This uses a first fit decreasing strategy to choose which buckets to move. It sorts the buckets * by size, and then moves the largest bucket that is below the load we are trying to move. * - * An improvement would be find the bucket that can be moved with the least cost for the most load + * An improvement would be to find the bucket that can be moved with the least cost for the most + * load * change, but because the load probe currently use the same value for load and cost, there's no * need to complicate things now. * @@ -124,7 +125,7 @@ public boolean nextStep() { float load = bucket.getLoad(); - // See if we can move this bucket to the taret node. + // See if we can move this bucket to the target node. if (targetMember.willAcceptBucket(bucket, sourceMember, model.enforceUniqueZones()) .willAccept()) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/PartitionedRegionLoadModel.java b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/PartitionedRegionLoadModel.java index b8d1542b340d..9f6a8b4b49e4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/PartitionedRegionLoadModel.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/partitioned/rebalance/model/PartitionedRegionLoadModel.java @@ -14,6 +14,8 @@ */ package org.apache.geode.internal.cache.partitioned.rebalance.model; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; + import java.util.Collection; import java.util.Comparator; import java.util.HashMap; @@ -43,23 +45,23 @@ import org.apache.geode.logging.internal.log4j.api.LogService; /** - * A model of the load on all of the members for a partitioned region. This model is used to find - * the best members to create buckets on or move buckets or primaries too. All of the actual work of + * A model of the load on all the members for a partitioned region. This model is used to find + * the best members to create buckets on or move buckets or primaries too. All the actual work of * creating a copy, moving a primary, etc. Is performed by the BucketOperator that is passed to the * constructor. * * To use, create a model and populate it using the addMember method. addMember takes a region - * argument, to indicate which region the data is for. All of the regions added to a single model + * argument, to indicate which region the data is for. All the regions added to a single model * are assumed to be colocated, and the model adds together the load from each of the individual - * regions to balance all of the regions together. + * regions to balance all the regions together. * * Rebalancing operations are performed by repeatedly calling model.nextStep until it returns false. * Each call to nextStep should perform another operation. The model will make callbacks to the - * BucketOperator you provide to the contructor perform the actual create or move. + * BucketOperator you provide to the constructor perform the actual create or move. * * While creating redundant copies our moving buckets, this model tries to minimize the standard * deviation in the weighted loads for the members. The weighted load for the member is the sum of - * the load for all of the buckets on the member divided by that members weight. + * the load for all the buckets on the member divided by that members weight. * * This model is not threadsafe. * @@ -179,8 +181,8 @@ public void addRegion(String region, OfflineMemberDetails offlineDetails, boolean enforceLocalMaxMemory) { allColocatedRegions.add(region); // build up a list of members and an array of buckets for this - // region. Each bucket has a reference to all of the members - // that host it and each member has a reference to all of the buckets + // region. Each bucket has a reference to all the members + // that host it and each member has a reference to all the buckets // it hosts Map regionMember = new HashMap<>(); Bucket[] regionBuckets = new Bucket[buckets.length]; @@ -234,7 +236,7 @@ public void addRegion(String region, for (int i = 0; i < buckets.length; i++) { if (regionBuckets[i] == null) { // do nothing, this bucket is not hosted for this region. - // [sumedh] remove from buckets array too to be consistent since + // remove from buckets array too to be consistent since // this method will be invoked repeatedly for all colocated regions, // and then we may miss some colocated regions for a bucket leading // to all kinds of issues later @@ -247,7 +249,7 @@ public void addRegion(String region, buckets[i] = new BucketRollup(i); } - // Add all of the members hosting the bucket to the rollup + // Add all the members hosting the bucket to the rollup for (Member member : regionBuckets[i].getMembersHosting()) { InternalDistributedMember memberId = member.getDistributedMember(); buckets[i].addMember(members.get(memberId)); @@ -463,7 +465,7 @@ private void identifyOverRedundantBuckets() { /** * Determine if the passed in bucket is on more than one member in a zone and mark it as - * overredundant. If by marking a bucket over redundant, that would make the redundancy + * over-redundant. If by marking a bucket over redundant, that would make the redundancy * insufficient, add the bucket to lowRedundancy as well so a member in a different zone * can host it. * @@ -488,7 +490,7 @@ private void determineOverRedundancyInZones( lowRedundancyBuckets.add(bucketRollup); } } else { - // otherwise add the redundancy zone to the list of redundancy zones + // otherwise, add the redundancy zone to the list of redundancy zones redundancyZonesFound.add(redundancyZone); } } @@ -603,11 +605,11 @@ public Move findBestRemove(Bucket bucket) { public Move findBestTargetForFPR(Bucket bucket, boolean checkIPAddress) { InternalDistributedMember targetMemberID; Member targetMember; - List fpas = + List attributes = partitionedRegion.getFixedPartitionAttributesImpl(); - if (fpas != null) { - for (FixedPartitionAttributesImpl fpaImpl : fpas) { + if (attributes != null) { + for (FixedPartitionAttributesImpl fpaImpl : attributes) { if (fpaImpl.hasBucket(bucket.getId())) { targetMemberID = partitionedRegion.getDistributionManager().getDistributionManagerId(); @@ -615,7 +617,7 @@ public Move findBestTargetForFPR(Bucket bucket, boolean checkIPAddress) { targetMember = members.get(targetMemberID); if (targetMember.willAcceptBucket(bucket, null, checkIPAddress).willAccept()) { // We should have just one move for creating - // all the buckets for a FPR on this node. + // all the buckets for an FPR on this node. return new Move(null, targetMember, bucket); } } @@ -639,7 +641,7 @@ public boolean movePrimary(Move bestMove) { boolean entryAdded = attemptedPrimaryMoves.add(bestMove); Assert.assertTrue(entryAdded, - "PartitionedRegionLoadModel.movePrimarys - excluded set is not growing, so we probably would have an infinite loop here"); + "PartitionedRegionLoadModel.movePrimary - excluded set is not growing, so we probably would have an infinite loop here"); return successfulMove; } @@ -706,7 +708,7 @@ private float getAverageLoad() { } /** - * Calculate the minimum improvement in variance that will we consider worth while. Currently this + * Calculate the minimum improvement in variance that will we consider worthwhile. This * is calculated as the improvement in variance that would occur by removing the smallest bucket * from the member with the largest weight. */ @@ -734,7 +736,7 @@ private double getMinPrimaryImprovement() { } /** - * Calculate the minimum improvement in variance that will we consider worth while. Currently this + * Calculate the minimum improvement in variance that will we consider worthwhile. This * is calculated as the improvement in variance that would occur by removing the smallest bucket * from the member with the largest weight. */ @@ -856,7 +858,7 @@ public boolean moveBucket(Move bestMove) { * @return a set of partitioned member details. */ public Set getPartitionedMemberDetails(String region) { - TreeSet result = new TreeSet<>(); + TreeSet result = new TreeSet<>(); for (MemberRollup member : members.values()) { Member colocatedMember = member.getColocatedMember(region); if (colocatedMember != null) { @@ -865,7 +867,7 @@ public Set getPartitionedMemberDetails(String region) { colocatedMember.getBucketCount(), colocatedMember.getPrimaryCount())); } } - return result; + return uncheckedCast(result); } /** diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/properties.md b/geode-core/src/main/java/org/apache/geode/internal/cache/properties.md index 6da71389ceb9..e94559b79bb6 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/properties.md +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/properties.md @@ -62,7 +62,7 @@ DistributionConfigImpl constructor prepends `gemfire.` to each valid attribute n | AdminDistributedSystemImpl.TIMEOUT_MS | Integer | `60000` | See `org.apache.geode.admin.internal.AdminDistributedSystemImpl#TIMEOUT_MS`. | | AvailablePort.fastRandom | Boolean | `false` | See `org.apache.geode.internal.AvailablePort`.

If true, an instance of `java.util.Random` is used instead of `java.security.SecureRandom` to randomly select a port.

This property is available in case there is a performance issue involved with random port selection.

| | AvailablePort.timeout | Integer | `2000` | See `org.apache.geode.internal.AvailablePort#isPortAvailable`.

When establishing a locator, this sets the `SO_TIMEOUT` characteristic on the UDP port that we attempt to test.

Units are in milliseconds.

| -| BridgeServer.HANDSHAKE_POOL_SIZE | Integer | `4` | See `org.apache.geode.internal.cache.tier.sockets.AcceptorImpl#HANDSHAKE_POOL_SIZE`. | +| BridgeServer.HANDSHAKE_POOL_SIZE | Integer | `50` | See `org.apache.geode.internal.cache.tier.sockets.AcceptorImpl#HANDSHAKE_POOL_SIZE`. | | BridgeServer.MAXIMUM_CHUNK_SIZE | Integer | `100` | See `org.apache.geode.internal.cache.tier.sockets.BaseCommand#MAXIMUM_CHUNK_SIZE`. | | BridgeServer.MAX_INCOMING_DATA | Integer | `-1` | See `org.apache.geode.internal.cache.tier.sockets.BaseCommand#MAX_INCOMING_DATA`.

Maximum number of concurrent incoming client message bytes that a cache server will allow. Once a server is working on this number additional incoming client messages will wait until one of them completes or fails. The bytes are computed based in the size sent in the incoming msg header.

| | BridgeServer.MAX_INCOMING_MSGS | Integer | `-1` | See `org.apache.geode.internal.cache.tier.sockets.BaseCommand#MAX_INCOMING_MSGS`.

Maximum number of concurrent incoming client messages that a cache server will allow. Once a server is working on this number additional incoming client messages will wait until one of them completes or fails. | diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java index f7e55b4e7f25..4da2df14b2e8 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/AcceptorImpl.java @@ -117,7 +117,7 @@ public class AcceptorImpl implements Acceptor, Runnable { private static final Logger logger = LogService.getLogger(); private static final boolean isJRockit = System.getProperty("java.vm.name").contains("JRockit"); - private static final int HANDSHAKER_DEFAULT_POOL_SIZE = 4; + private static final int HANDSHAKER_DEFAULT_POOL_SIZE = 50; private static final int CLIENT_QUEUE_INITIALIZATION_POOL_SIZE = 16; private final CacheServerStats stats; diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java index 9000a5503c00..1137f044dfa9 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/BaseCommand.java @@ -1252,7 +1252,7 @@ public static void appendNewRegisterInterestResponseChunkFromLocal( public static void appendNewRegisterInterestResponseChunk(final @NotNull LocalRegion region, final @NotNull VersionedObjectList values, final @NotNull Object riKeys, - final @NotNull Set> set, + final @NotNull Set>> set, final @NotNull ServerConnection servConn) throws IOException { for (Entry entry : set) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/VersionedObjectList.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/VersionedObjectList.java index 7495939a5b55..d50733a38f1a 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/VersionedObjectList.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/VersionedObjectList.java @@ -235,7 +235,7 @@ public void addVersion(VersionTag tag) { * save the current key/tag pairs in the given map * */ - public void saveVersions(Map vault) { + public void saveVersions(Map>> vault) { Iterator it = iterator(); while (it.hasNext()) { Entry e = it.next(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java index 722904ee7c21..fdcbed176cd9 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66.java @@ -14,7 +14,9 @@ */ package org.apache.geode.internal.cache.tier.sockets.command; +import static java.lang.String.format; import static org.apache.geode.internal.cache.execute.ServerFunctionExecutor.DEFAULT_CLIENT_FUNCTION_TIMEOUT; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.io.IOException; import java.util.HashSet; @@ -71,18 +73,19 @@ public static Command getCommand() { public void cmdExecute(final @NotNull Message clientMessage, final @NotNull ServerConnection serverConnection, final @NotNull SecurityService securityService, long start) throws IOException { - String regionName = null; Object function = null; - Object args = null; - MemberMappedArgument memberMappedArg = null; + int functionTimeout = DEFAULT_CLIENT_FUNCTION_TIMEOUT; + byte hasResult = 0; + + final String regionName; + final Object args; + final MemberMappedArgument memberMappedArg; final boolean isBucketsAsFilter; final byte isReExecute; - Set filter = null; - byte hasResult = 0; - Set removedNodesSet = null; - int partNumber = 0; - byte functionState = 0; - int functionTimeout = DEFAULT_CLIENT_FUNCTION_TIMEOUT; + final Set filter; + final Set removedNodesSet; + final int partNumber; + final byte functionState; try { byte[] bytes = clientMessage.getPart(0).getSerializedForm(); functionState = bytes[0]; @@ -118,11 +121,10 @@ public void cmdExecute(final @NotNull Message clientMessage, partNumber = 7 + filterSize; int removedNodesSize = clientMessage.getPart(partNumber).getInt(); - removedNodesSet = - populateRemovedNodes(clientMessage, removedNodesSize, partNumber); + removedNodesSet = populateRemovedNodes(clientMessage, removedNodesSize, partNumber); } catch (ClassNotFoundException exception) { - logger.warn(String.format("Exception on server while executing function : %s", + logger.warn(format("Exception on server while executing function : %s", function), exception); if (hasResult == 1) { @@ -140,11 +142,11 @@ public void cmdExecute(final @NotNull Message clientMessage, return; } - CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper(); - Region region = crHelper.getRegion(regionName); + final CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper(); + final Region region = crHelper.getRegion(regionName); if (region == null) { String message = - String.format("The region named %s was not found during execute Function request.", + format("The region named %s was not found during execute Function request.", regionName); logger.warn("{}: {}", serverConnection.getName(), message); sendError(hasResult, clientMessage, message, serverConnection); @@ -164,7 +166,7 @@ public void cmdExecute(final @NotNull Message clientMessage, return; } } else { - functionObject = (Function) function; + functionObject = (Function) function; } // check if the caller is authorized to do this operation on server @@ -181,7 +183,7 @@ public void cmdExecute(final @NotNull Message clientMessage, new ServerToClientFunctionResultSender65(m, MessageType.EXECUTE_REGION_FUNCTION_RESULT, serverConnection, functionObject, executeContext); - AbstractExecution execution = + AbstractExecution execution = createExecution(args, memberMappedArg, isBucketsAsFilter, filter, removedNodesSet, region, resultSender); @@ -206,7 +208,7 @@ public void cmdExecute(final @NotNull Message clientMessage, writeReply(clientMessage, serverConnection); } } catch (IOException ioe) { - logger.warn(String.format("Exception on server while executing function : %s", + logger.warn(format("Exception on server while executing function : %s", function), ioe); final String message = "Server could not send the reply"; @@ -220,7 +222,7 @@ public void cmdExecute(final @NotNull Message clientMessage, resultSender.setException(fe); } else { if (setLastResultReceived(resultSender)) { - logger.warn(String.format("Exception on server while executing function : %s", + logger.warn(format("Exception on server while executing function : %s", function), fe); sendException(hasResult, clientMessage, message, serverConnection, fe); @@ -228,7 +230,7 @@ public void cmdExecute(final @NotNull Message clientMessage, } } catch (Exception e) { if (setLastResultReceived(resultSender)) { - logger.warn(String.format("Exception on server while executing function : %s", + logger.warn(format("Exception on server while executing function : %s", function), e); String message = e.getMessage(); @@ -243,42 +245,38 @@ public void cmdExecute(final @NotNull Message clientMessage, void logFunctionExceptionCause(Object function, Function functionObject, FunctionException fe, String message, Object cause) { if (cause instanceof InternalFunctionInvocationTargetException) { - // Fix for #44709: User should not be aware of + // User should not be aware of // InternalFunctionInvocationTargetException. No instance of // InternalFunctionInvocationTargetException is giving useful // information to user to take any corrective action hence logging // this at fine level logging // 1> When bucket is moved - // 2> Incase of HA FucntionInvocationTargetException thrown. Since - // it is HA, fucntion will be reexecuted on right node + // 2> In case of HA FunctionInvocationTargetException thrown. Since + // it is HA, function will be reexecuted on right node // 3> Multiple target nodes found for single hop operation // 4> in case of HA member departed if (logger.isDebugEnabled()) { - logger.debug(String.format("Exception on server while executing function: %s", - function), - fe); + logger.debug("Exception on server while executing function: {}", function, fe); } } else if (functionObject.isHA()) { - logger.warn("Exception on server while executing function : {}", - function + " :" + message); + logger.warn("Exception on server while executing function : {} :{}", function, message); } else { - logger.warn(String.format("Exception on server while executing function : %s", - function), - fe); + logger.warn("Exception on server while executing function : {}", function, fe); } } - AbstractExecution createExecution(Object args, MemberMappedArgument memberMappedArg, + AbstractExecution createExecution(IN args, + MemberMappedArgument memberMappedArg, boolean isBucketsAsFilter, Set filter, - Set removedNodesSet, Region region, + Set removedNodesSet, Region region, ServerToClientFunctionResultSender resultSender) { - AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region); + AbstractExecution execution = uncheckedCast(FunctionService.onRegion(region)); if (execution instanceof PartitionedRegionFunctionExecutor) { - execution = new PartitionedRegionFunctionExecutor((PartitionedRegion) region, filter, args, + execution = new PartitionedRegionFunctionExecutor<>((PartitionedRegion) region, filter, args, memberMappedArg, resultSender, removedNodesSet, isBucketsAsFilter); } else { - execution = new DistributedRegionFunctionExecutor((DistributedRegion) region, filter, args, + execution = new DistributedRegionFunctionExecutor<>((DistributedRegion) region, filter, args, memberMappedArg, resultSender); } return execution; @@ -289,7 +287,7 @@ boolean validateFunctionObject(Message clientMessage, ServerConnection serverCon Function functionObject) throws IOException { if (functionObject == null) { String message = - String.format("The function, %s, has not been registered", + format("The function, %s, has not been registered", function); logger.warn("{}: {}", serverConnection.getName(), message); sendError(hasResult, clientMessage, message, serverConnection); @@ -303,7 +301,7 @@ boolean validateFunctionObject(Message clientMessage, ServerConnection serverCon } if (functionStateOnServerSide != functionState) { String message = - String.format("Function attributes at client and server don't match: %s", + format("Function attributes at client and server don't match: %s", function); logger.warn("{}: {}", serverConnection.getName(), message); sendError(hasResult, clientMessage, message, serverConnection); @@ -336,15 +334,14 @@ MemberMappedArgument extractMemberMappedArgument(Part part) return memberMappedArg; } - Set populateRemovedNodes(Message clientMessage, int removedNodesSize, int partNumber) + Set populateRemovedNodes(Message clientMessage, int removedNodesSize, int partNumber) throws IOException, ClassNotFoundException { - Set removedNodesSet = null; + Set removedNodesSet = null; if (removedNodesSize != 0) { removedNodesSet = new HashSet<>(); partNumber = partNumber + 1; - for (int i = 0; i < removedNodesSize; i++) { - removedNodesSet.add(clientMessage.getPart(partNumber + i).getStringOrObject()); + removedNodesSet.add((String) clientMessage.getPart(partNumber + i).getStringOrObject()); } } return removedNodesSet; @@ -379,12 +376,10 @@ ExecuteFunctionOperationContext getAuthorizedExecuteFunctionOperationContext(Obj } void executeFunctionNoResult(Object function, byte functionState, - Function functionObject, AbstractExecution execution) { + Function functionObject, AbstractExecution execution) { if (function instanceof String) { switch (functionState) { case AbstractExecution.NO_HA_NO_HASRESULT_NO_OPTIMIZEFORWRITE: - execution.execute((String) function); - break; case AbstractExecution.NO_HA_NO_HASRESULT_OPTIMIZEFORWRITE: execution.execute((String) function); break; @@ -395,18 +390,12 @@ void executeFunctionNoResult(Object function, byte functionState, } void executeFunctionWithResult(Object function, byte functionState, - Function functionObject, AbstractExecution execution) { + Function functionObject, AbstractExecution execution) { if (function instanceof String) { switch (functionState) { case AbstractExecution.NO_HA_HASRESULT_NO_OPTIMIZEFORWRITE: - execution.execute((String) function).getResult(); - break; case AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE: - execution.execute((String) function).getResult(); - break; case AbstractExecution.HA_HASRESULT_OPTIMIZEFORWRITE: - execution.execute((String) function).getResult(); - break; case AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE: execution.execute((String) function).getResult(); break; @@ -447,7 +436,7 @@ protected static void writeFunctionResponseException(Message origMsg, MessageTyp String message, ServerConnection serverConnection, Throwable e) throws IOException { ChunkedMessage functionResponseMsg = serverConnection.getFunctionResponseMessage(); ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage(); - int numParts = 0; + final int numParts; if (functionResponseMsg.headerHasBeenSent()) { if (e instanceof FunctionException && e.getCause() instanceof InternalFunctionInvocationTargetException) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18.java index cdff19034eb0..e2a52c779823 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18.java @@ -36,23 +36,18 @@ private ExecuteRegionFunctionGeode18() {} @Override void executeFunctionWithResult(Object function, byte functionState, - Function functionObject, AbstractExecution execution) { + Function functionObject, AbstractExecution execution) { if (function instanceof String) { switch (functionState) { case AbstractExecution.NO_HA_HASRESULT_NO_OPTIMIZEFORWRITE: + case AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE: execution.setWaitOnExceptionFlag(true); execution.execute((String) function).getResult(); break; case AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE: - execution.execute((String) function).getResult(); - break; case AbstractExecution.HA_HASRESULT_OPTIMIZEFORWRITE: execution.execute((String) function).getResult(); break; - case AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE: - execution.setWaitOnExceptionFlag(true); - execution.execute((String) function).getResult(); - break; } } else { if (!functionObject.isHA()) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java index c13a08808d85..509699acdd59 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionSingleHop.java @@ -78,9 +78,7 @@ public void cmdExecute(final @NotNull Message clientMessage, Set buckets = null; byte hasResult = 0; byte functionState = 0; - int removedNodesSize = 0; - Set removedNodesSet = null; - int filterSize = 0, bucketIdsSize = 0, partNumber = 0; + Set removedNodesSet = null; CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper(); int functionTimeout = DEFAULT_CLIENT_FUNCTION_TIMEOUT; try { @@ -109,9 +107,10 @@ public void cmdExecute(final @NotNull Message clientMessage, } } isExecuteOnAllBuckets = clientMessage.getPart(5).getSerializedForm()[0]; + int partNumber; if (isExecuteOnAllBuckets == 1) { - filter = new HashSet(); - bucketIdsSize = clientMessage.getPart(6).getInt(); + filter = new HashSet<>(); + int bucketIdsSize = clientMessage.getPart(6).getInt(); if (bucketIdsSize != 0) { buckets = new HashSet<>(); partNumber = 7; @@ -121,7 +120,7 @@ public void cmdExecute(final @NotNull Message clientMessage, } partNumber = 7 + bucketIdsSize; } else { - filterSize = clientMessage.getPart(6).getInt(); + int filterSize = clientMessage.getPart(6).getInt(); if (filterSize != 0) { filter = new HashSet<>(); partNumber = 7; @@ -132,15 +131,14 @@ public void cmdExecute(final @NotNull Message clientMessage, partNumber = 7 + filterSize; } - - removedNodesSize = clientMessage.getPart(partNumber).getInt(); + int removedNodesSize = clientMessage.getPart(partNumber).getInt(); if (removedNodesSize != 0) { - removedNodesSet = new HashSet<>(); + removedNodesSet = new HashSet<>(removedNodesSize); partNumber = partNumber + 1; for (int i = 0; i < removedNodesSize; i++) { - removedNodesSet.add(clientMessage.getPart(partNumber + i).getStringOrObject()); + removedNodesSet.add((String) clientMessage.getPart(partNumber + i).getStringOrObject()); } } @@ -171,7 +169,7 @@ public void cmdExecute(final @NotNull Message clientMessage, return; } - Region region = crHelper.getRegion(regionName); + Region region = crHelper.getRegion(regionName); if (region == null) { String message = String.format("The region named %s was not found during execute Function request.", @@ -208,7 +206,7 @@ public void cmdExecute(final @NotNull Message clientMessage, } } } else { - functionObject = (Function) function; + functionObject = (Function) function; } // check if the caller is authorized to do this operation on server @@ -223,7 +221,7 @@ public void cmdExecute(final @NotNull Message clientMessage, } // Construct execution - AbstractExecution execution = (AbstractExecution) FunctionService.onRegion(region); + final AbstractExecution execution; ChunkedMessage m = serverConnection.getFunctionResponseMessage(); m.setTransactionId(clientMessage.getTransactionId()); resultSender = @@ -240,11 +238,13 @@ public void cmdExecute(final @NotNull Message clientMessage, if (buckets.isEmpty()) { throw new FunctionException("Buckets are null"); } - execution = new PartitionedRegionFunctionExecutor((PartitionedRegion) region, buckets, args, - memberMappedArg, resultSender, removedNodesSet, true, true); + execution = + new PartitionedRegionFunctionExecutor<>((PartitionedRegion) region, buckets, args, + memberMappedArg, resultSender, removedNodesSet, true, true); } else { - execution = new PartitionedRegionFunctionExecutor((PartitionedRegion) region, filter, args, - memberMappedArg, resultSender, removedNodesSet, false, true); + execution = + new PartitionedRegionFunctionExecutor<>((PartitionedRegion) region, filter, args, + memberMappedArg, resultSender, removedNodesSet, false, true); } if ((hasResult == 1) && filter != null && filter.size() == 1) { @@ -259,14 +259,8 @@ public void cmdExecute(final @NotNull Message clientMessage, if (function instanceof String) { switch (functionState) { case AbstractExecution.NO_HA_HASRESULT_NO_OPTIMIZEFORWRITE: - execution.execute((String) function).getResult(); - break; case AbstractExecution.HA_HASRESULT_NO_OPTIMIZEFORWRITE: - execution.execute((String) function).getResult(); - break; case AbstractExecution.HA_HASRESULT_OPTIMIZEFORWRITE: - execution.execute((String) function).getResult(); - break; case AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE: execution.execute((String) function).getResult(); break; @@ -278,8 +272,6 @@ public void cmdExecute(final @NotNull Message clientMessage, if (function instanceof String) { switch (functionState) { case AbstractExecution.NO_HA_NO_HASRESULT_NO_OPTIMIZEFORWRITE: - execution.execute((String) function); - break; case AbstractExecution.NO_HA_NO_HASRESULT_OPTIMIZEFORWRITE: execution.execute((String) function); break; @@ -289,18 +281,17 @@ public void cmdExecute(final @NotNull Message clientMessage, } } } catch (IOException ioe) { - logger.warn(String.format("Exception on server while executing function : %s", - function), - ioe); + logger.warn("Exception on server while executing function: {}", + function, ioe); final String message = "Server could not send the reply"; sendException(hasResult, clientMessage, message, serverConnection, ioe); } catch (FunctionException fe) { String message = fe.getMessage(); if (fe.getCause() instanceof FunctionInvocationTargetException) { - if (functionObject.isHA() && logger.isDebugEnabled()) { + if (functionObject.isHA()) { logger.debug("Exception on server while executing function: {}: {}", function, message); - } else if (logger.isDebugEnabled()) { + } else { logger.debug("Exception on server while executing function: {}: {}", function, message, fe); } @@ -308,17 +299,13 @@ public void cmdExecute(final @NotNull Message clientMessage, resultSender.setException(fe); } } else { - if (logger.isDebugEnabled()) { - logger.debug(String.format("Exception on server while executing function : %s", - function), - fe); - } + logger.debug("Exception on server while executing function: {}", + function, fe); sendException(hasResult, clientMessage, message, serverConnection, fe); } } catch (Exception e) { - logger.warn(String.format("Exception on server while executing function : %s", - function), - e); + logger.warn("Exception on server while executing function: {}", + function, e); String message = e.getMessage(); sendException(hasResult, clientMessage, message, serverConnection, e); } finally { @@ -352,7 +339,7 @@ protected static void writeFunctionResponseException(Message origMsg, MessageTyp String message, ServerConnection serverConnection, Throwable e) throws IOException { ChunkedMessage functionResponseMsg = serverConnection.getFunctionResponseMessage(); ChunkedMessage chunkedResponseMsg = serverConnection.getChunkedResponseMessage(); - int numParts = 0; + final int numParts; if (functionResponseMsg.headerHasBeenSent()) { if (e instanceof FunctionException && e.getCause() instanceof InternalFunctionInvocationTargetException) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java index 5c1a20f4b797..dde2df5e6948 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tier/sockets/command/GetClientPRMetadataCommand66.java @@ -54,10 +54,9 @@ public void cmdExecute(final @NotNull Message clientMessage, final @NotNull ServerConnection serverConnection, final @NotNull SecurityService securityService, long start) throws IOException, ClassNotFoundException, InterruptedException { - String regionFullPath = null; - CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper(); - regionFullPath = clientMessage.getPart(0).getCachedString(); - String errMessage = ""; + final CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper(); + final String regionFullPath = clientMessage.getPart(0).getCachedString(); + final String errMessage; if (regionFullPath == null) { logger.warn("The input region path for the GetClientPRMetadata request is null"); errMessage = @@ -66,7 +65,7 @@ public void cmdExecute(final @NotNull Message clientMessage, errMessage, serverConnection); serverConnection.setAsTrue(RESPONDED); } else { - Region region = crHelper.getRegion(regionFullPath); + Region region = crHelper.getRegion(regionFullPath); if (region == null) { logger.warn("Region was not found during GetClientPRMetadata request for region path : {}", regionFullPath); @@ -81,9 +80,9 @@ public void cmdExecute(final @NotNull Message clientMessage, responseMsg.setTransactionId(clientMessage.getTransactionId()); responseMsg.setMessageType(MessageType.RESPONSE_CLIENT_PR_METADATA); - PartitionedRegion prRgion = (PartitionedRegion) region; + PartitionedRegion prRegion = (PartitionedRegion) region; Map> bucketToServerLocations = - prRgion.getRegionAdvisor().getAllClientBucketProfiles(); + prRegion.getRegionAdvisor().getAllClientBucketProfiles(); responseMsg.setNumberOfParts(bucketToServerLocations.size()); for (List serverLocations : bucketToServerLocations.values()) { responseMsg.addObjPart(serverLocations); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/DistTxEntryEvent.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/DistTxEntryEvent.java index 8eabeedf406e..e644fb804de4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/DistTxEntryEvent.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/DistTxEntryEvent.java @@ -28,13 +28,12 @@ import org.apache.geode.internal.cache.DistributedRemoveAllOperation; import org.apache.geode.internal.cache.DistributedRemoveAllOperation.RemoveAllEntryData; import org.apache.geode.internal.cache.EntryEventImpl; +import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionTag; import org.apache.geode.internal.offheap.annotations.Retained; -import org.apache.geode.internal.serialization.ByteArrayDataInput; import org.apache.geode.internal.serialization.DeserializationContext; import org.apache.geode.internal.serialization.KnownVersion; import org.apache.geode.internal.serialization.SerializationContext; -import org.apache.geode.internal.serialization.StaticSerialization; public class DistTxEntryEvent extends EntryEventImpl { @@ -135,7 +134,7 @@ private void putAllToData(DataOutput out, if (!hasTags && putAllData[i].versionTag != null) { hasTags = true; } - VersionTag tag = putAllData[i].versionTag; + VersionTag> tag = putAllData[i].versionTag; versionTags.add(tag); putAllData[i].versionTag = null; putAllData[i].toData(out, context); @@ -152,8 +151,6 @@ private void putAllFromData(DataInput in, int putAllSize = DataSerializer.readInteger(in); PutAllEntryData[] putAllEntries = new PutAllEntryData[putAllSize]; if (putAllSize > 0) { - final KnownVersion version = StaticSerialization.getVersionForDataStreamOrNull(in); - final ByteArrayDataInput bytesIn = new ByteArrayDataInput(); for (int i = 0; i < putAllSize; i++) { putAllEntries[i] = new PutAllEntryData(in, context, eventID, i); } @@ -186,7 +183,7 @@ private void removeAllToData(DataOutput out, if (!hasTags && removeAllData[i].versionTag != null) { hasTags = true; } - VersionTag tag = removeAllData[i].versionTag; + VersionTag> tag = removeAllData[i].versionTag; versionTags.add(tag); removeAllData[i].versionTag = null; removeAllData[i].serializeTo(out, context); @@ -202,8 +199,6 @@ private void removeAllFromData(DataInput in, DeserializationContext context) throws IOException, ClassNotFoundException { int removeAllSize = DataSerializer.readInteger(in); final RemoveAllEntryData[] removeAllData = new RemoveAllEntryData[removeAllSize]; - final KnownVersion version = StaticSerialization.getVersionForDataStreamOrNull(in); - final ByteArrayDataInput bytesIn = new ByteArrayDataInput(); for (int i = 0; i < removeAllSize; i++) { removeAllData[i] = new RemoveAllEntryData(in, eventID, i, context); } @@ -246,10 +241,10 @@ public String toString() { buf.append(getKeyInfo().getBucketId()); buf.append(";oldValue="); if (putAllOp != null) { - buf.append(";putAllDataSize :" + putAllOp.putAllDataSize); + buf.append(";putAllDataSize :").append(putAllOp.putAllDataSize); } if (removeAllOp != null) { - buf.append(";removeAllDataSize :" + removeAllOp.removeAllDataSize); + buf.append(";removeAllDataSize :").append(removeAllOp.removeAllDataSize); } buf.append("]"); return buf.toString(); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStub.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStub.java index 10c97e102417..53960bfa3e72 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStub.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStub.java @@ -60,9 +60,9 @@ public class PartitionedTXRegionStub extends AbstractPeerTXRegionStub { private final PartitionedRegion region; - public PartitionedTXRegionStub(TXStateStub txstate, PartitionedRegion r) { + public PartitionedTXRegionStub(TXStateStub txstate, PartitionedRegion region) { super(txstate); - region = r; + this.region = region; } public Map getBuckets() { @@ -74,7 +74,7 @@ public void destroyExistingEntry(EntryEventImpl event, boolean cacheWrite, Object expectedOldValue) { PartitionedRegion pr = (PartitionedRegion) event.getRegion(); try { - pr.destroyRemotely(state.getTarget(), event.getKeyInfo().getBucketId(), event, + pr.destroyRemotely(state.getTarget(), event, expectedOldValue); } catch (PrimaryBucketException e) { RuntimeException re = getTransactionException(event.getKeyInfo(), e); @@ -129,8 +129,8 @@ RuntimeException getTransactionException(KeyInfo keyInfo, Throwable cause) { // of the underlying PR or its colocated PRs touched by the transaction. private boolean isKeyInNonColocatedBucket(KeyInfo keyInfo) { Map, TXRegionStub> regionStubs = state.getRegionStubs(); - Collection colcatedRegions = ColocationHelper - .getAllColocationRegions(region).values(); + Collection colcatedRegions = + ColocationHelper.getAllColocationRegions(region).values(); // get all colocated region buckets touched in the transaction for (PartitionedRegion colcatedRegion : colcatedRegions) { PartitionedTXRegionStub regionStub = @@ -154,13 +154,13 @@ void waitToRetry() { @Override - public Entry getEntry(KeyInfo keyInfo, boolean allowTombstones) { + public Entry getEntry(KeyInfo keyInfo, boolean allowTombstones) { try { - Entry e = region.getEntryRemotely((InternalDistributedMember) state.getTarget(), - keyInfo.getBucketId(), keyInfo.getKey(), false, allowTombstones); + Entry e = region.getEntryRemotely((InternalDistributedMember) state.getTarget(), + keyInfo.getKey(), false, allowTombstones); trackBucketForTx(keyInfo); return e; - } catch (EntryNotFoundException enfe) { + } catch (EntryNotFoundException e) { return null; } catch (PrimaryBucketException e) { RuntimeException re = getTransactionException(keyInfo, e); @@ -200,7 +200,7 @@ public void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallback boolean forceNewEntry) { PartitionedRegion pr = (PartitionedRegion) event.getRegion(); try { - pr.invalidateRemotely(state.getTarget(), event.getKeyInfo().getBucketId(), event); + pr.invalidateRemotely(state.getTarget(), event); } catch (PrimaryBucketException e) { RuntimeException re = getTransactionException(event.getKeyInfo(), e); re.initCause(e); @@ -293,15 +293,15 @@ public boolean containsValueForKey(KeyInfo keyInfo) { @Override public Object findObject(KeyInfo keyInfo, boolean isCreate, boolean generateCallbacks, - Object value, boolean peferCD, ClientProxyMembershipID requestingClient, + Object value, boolean preferCD, ClientProxyMembershipID requestingClient, EntryEventImpl clientEvent) { - Object retVal = null; + final Object retVal; final Object key = keyInfo.getKey(); final Object callbackArgument = keyInfo.getCallbackArg(); try { retVal = region.getRemotely((InternalDistributedMember) state.getTarget(), keyInfo.getBucketId(), - key, callbackArgument, peferCD, requestingClient, clientEvent, false); + key, callbackArgument, preferCD, requestingClient, clientEvent, false); } catch (PrimaryBucketException e) { RuntimeException re = getTransactionException(keyInfo, e); re.initCause(e); @@ -337,7 +337,7 @@ public Object getEntryForIterator(KeyInfo keyInfo, boolean allowTombstones) { public boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expectedOldValue, boolean requireOldValue, long lastModified, boolean overwriteDestroyed) { - boolean retVal = false; + final boolean retVal; final InternalRegion r = event.getRegion(); PartitionedRegion pr = (PartitionedRegion) r; try { @@ -372,14 +372,13 @@ public void postPutAll(DistributedPutAllOperation putallO, VersionedObjectList s PartitionedRegion pr = (PartitionedRegion) r; final long startTime = pr.prStats.getTime(); // build all the msgs by bucketid - HashMap prMsgMap = putallO.createPRMessages(); + HashMap prMsgMap = putallO.createPRMessages(); PutAllPartialResult partialKeys = new PutAllPartialResult(putallO.putAllDataSize); successfulPuts.clear(); // this is rebuilt by this method - for (final Object o : prMsgMap.entrySet()) { - Map.Entry mapEntry = (Map.Entry) o; - Integer bucketId = (Integer) mapEntry.getKey(); - PutAllPRMessage prMsg = (PutAllPRMessage) mapEntry.getValue(); + for (final Map.Entry mapEntry : prMsgMap.entrySet()) { + Integer bucketId = mapEntry.getKey(); + PutAllPRMessage prMsg = mapEntry.getValue(); pr.checkReadiness(); try { VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg, pr); @@ -403,8 +402,8 @@ public void postPutAll(DistributedPutAllOperation putallO, VersionedObjectList s pr.prStats.endPutAll(startTime); if (partialKeys.hasFailure()) { - pr.getCache().getLogger().info(String.format("Region %s putAll: %s", - pr.getFullPath(), partialKeys)); + pr.getCache().getLogger() + .info(String.format("Region %s putAll: %s", pr.getFullPath(), partialKeys)); if (putallO.isBridgeOperation()) { if (partialKeys.getFailure() instanceof CancelException) { throw (CancelException) partialKeys.getFailure(); @@ -461,8 +460,8 @@ public void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList pr.prStats.endRemoveAll(startTime); if (partialKeys.hasFailure()) { - pr.getCache().getLogger().info(String.format("Region %s removeAll: %s", - pr.getFullPath(), partialKeys)); + pr.getCache().getLogger() + .info(String.format("Region %s removeAll: %s", pr.getFullPath(), partialKeys)); if (op.isBridgeOperation()) { if (partialKeys.getFailure() instanceof CancelException) { throw (CancelException) partialKeys.getFailure(); @@ -486,10 +485,9 @@ public void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList * value */ private VersionedObjectList sendMsgByBucket(final Integer bucketId, PutAllPRMessage prMsg, - PartitionedRegion pr) { + PartitionedRegion pr) throws RuntimeException { // retry the put remotely until it finds the right node managing the bucket - InternalDistributedMember currentTarget = - pr.getOrCreateNodeForBucketWrite(bucketId, null); + InternalDistributedMember currentTarget = pr.getOrCreateNodeForBucketWrite(bucketId, null); if (!currentTarget.equals(state.getTarget())) { @Released EntryEventImpl firstEvent = prMsg.getFirstEvent(pr); @@ -503,13 +501,12 @@ private VersionedObjectList sendMsgByBucket(final Integer bucketId, PutAllPRMess } try { return pr.tryToSendOnePutAllMessage(prMsg, currentTarget); - } catch (ForceReattemptException prce) { + } catch (ForceReattemptException e) { pr.checkReadiness(); - throw new TransactionDataNotColocatedException(prce.getMessage()); + throw new TransactionDataNotColocatedException(e.getMessage()); } catch (PrimaryBucketException notPrimary) { - RuntimeException re = new TransactionDataRebalancedException( + throw new TransactionDataRebalancedException( "Transactional data moved, due to rebalancing.", notPrimary); - throw re; } catch (DataLocationException dle) { throw new TransactionException(dle); } @@ -537,13 +534,12 @@ private VersionedObjectList sendMsgByBucket(final Integer bucketId, RemoveAllPRM } try { return pr.tryToSendOneRemoveAllMessage(prMsg, currentTarget); - } catch (ForceReattemptException prce) { + } catch (ForceReattemptException e) { pr.checkReadiness(); - throw new TransactionDataNotColocatedException(prce.getMessage()); + throw new TransactionDataNotColocatedException(e.getMessage()); } catch (PrimaryBucketException notPrimary) { - RuntimeException re = new TransactionDataRebalancedException( + throw new TransactionDataRebalancedException( "Transactional data moved, due to rebalancing.", notPrimary); - throw re; } catch (DataLocationException dle) { throw new TransactionException(dle); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteClearMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteClearMessage.java index 750827b77612..2d593f2f9588 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteClearMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteClearMessage.java @@ -58,11 +58,10 @@ public RemoteClearMessage() {} public static RemoteClearMessage create(InternalDistributedMember recipient, DistributedRegion region) { - return new RemoteClearMessage(recipient, region, Operation.CLEAR); + return new RemoteClearMessage(recipient, region); } - private RemoteClearMessage(InternalDistributedMember recipient, DistributedRegion region, - Operation op) { + private RemoteClearMessage(InternalDistributedMember recipient, DistributedRegion region) { super(recipient, region.getFullPath(), new RemoteOperationResponse(region.getSystem(), recipient)); this.region = region; @@ -179,7 +178,7 @@ public void toData(DataOutput out, @Override public String toString() { - return "RemoteClearReplyMessage " + "processorid=" + processorId + return "RemoteClearReplyMessage " + "processorId=" + processorId + " reply to sender " + getSender(); } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteOperationMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteOperationMessage.java index 3cd3106e69c9..3e059951ee21 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteOperationMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteOperationMessage.java @@ -526,7 +526,8 @@ public static class RemoteOperationResponse extends DirectReplyProcessor { */ private boolean responseRequired; - public RemoteOperationResponse(InternalDistributedSystem dm, Collection initMembers, + public RemoteOperationResponse(InternalDistributedSystem dm, + Collection initMembers, boolean register) { super(dm, initMembers); if (register) { diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutAllMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutAllMessage.java index b917fd8815ac..6a1dc6e8e682 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutAllMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemotePutAllMessage.java @@ -57,6 +57,7 @@ import org.apache.geode.internal.cache.partitioned.PutAllPRMessage; import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID; import org.apache.geode.internal.cache.tier.sockets.VersionedObjectList; +import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionTag; import org.apache.geode.internal.logging.log4j.LogMarker; import org.apache.geode.internal.offheap.annotations.Released; @@ -274,7 +275,7 @@ public void toData(DataOutput out, if (!hasTags && putAllData[i].versionTag != null) { hasTags = true; } - VersionTag tag = putAllData[i].versionTag; + VersionTag> tag = putAllData[i].versionTag; versionTags.add(tag); putAllData[i].versionTag = null; putAllData[i].toData(out, context); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteRemoveAllMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteRemoveAllMessage.java index a6b1f9a30b81..3c04b6bd9b87 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteRemoveAllMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/RemoteRemoveAllMessage.java @@ -58,6 +58,7 @@ import org.apache.geode.internal.cache.partitioned.RemoveAllPRMessage; import org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID; import org.apache.geode.internal.cache.tier.sockets.VersionedObjectList; +import org.apache.geode.internal.cache.versions.VersionSource; import org.apache.geode.internal.cache.versions.VersionTag; import org.apache.geode.internal.logging.log4j.LogMarker; import org.apache.geode.internal.offheap.annotations.Released; @@ -270,7 +271,7 @@ public void toData(DataOutput out, if (!hasTags && removeAllData[i].versionTag != null) { hasTags = true; } - VersionTag tag = removeAllData[i].versionTag; + VersionTag> tag = removeAllData[i].versionTag; versionTags.add(tag); removeAllData[i].versionTag = null; removeAllData[i].serializeTo(out, context); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/TXRegionStub.java b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/TXRegionStub.java index 5d159d558d5c..c115afd9faed 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/tx/TXRegionStub.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/tx/TXRegionStub.java @@ -29,7 +29,7 @@ public interface TXRegionStub { void destroyExistingEntry(EntryEventImpl event, boolean cacheWrite, Object expectedOldValue); - Entry getEntry(KeyInfo keyInfo, boolean allowTombstone); + Entry getEntry(KeyInfo keyInfo, boolean allowTombstone); void invalidateExistingEntry(EntryEventImpl event, boolean invokeCallbacks, boolean forceNewEntry); @@ -48,9 +48,9 @@ boolean putEntry(EntryEventImpl event, boolean ifNew, boolean ifOld, Object expe int entryCount(); - Set getRegionKeysForIteration(); + Set getRegionKeysForIteration(); - void postPutAll(DistributedPutAllOperation putallOp, VersionedObjectList successfulPuts, + void postPutAll(DistributedPutAllOperation putAllOp, VersionedObjectList successfulPuts, InternalRegion region); void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList successfulOps, diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionHolder.java b/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionHolder.java index f81beaef921d..3ec3caf59082 100755 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionHolder.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/versions/RegionVersionHolder.java @@ -446,7 +446,7 @@ public synchronized void initializeFrom(RegionVersionHolder source) { // initialize our version and exceptions to match the others exceptions = other.exceptions; version = other.version; - + isDepartedMember = other.isDepartedMember; // Now if this.version/exceptions overlap with myVersion/myExceptions, use this' diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java index fbdc1d9c2cb3..9b474c8cb49c 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/AbstractGatewaySenderEventProcessor.java @@ -228,13 +228,6 @@ public void incrementBatchId() { */ public void resetBatchId() { batchId = 0; - // dont reset first time when first batch is put for dispatch - // if (this.batchIdToEventsMap.size() == 1) { - // if (this.batchIdToEventsMap.containsKey(0)) { - // return; - // } - // } - // this.batchIdToEventsMap.clear(); resetLastPeekedEvents = true; } @@ -286,7 +279,7 @@ public int secondaryEventQueueSize() { return 0; } - // if parallel, get both primary and secondary queues' size, then substract primary queue's size + // if parallel, get both primary and secondary queues' size, then subtract primary queue's size if (queue instanceof ConcurrentParallelGatewaySenderQueue) { final ConcurrentParallelGatewaySenderQueue concurrentParallelGatewaySenderQueue = (ConcurrentParallelGatewaySenderQueue) queue; @@ -312,8 +305,6 @@ public void pauseDispatching() { isPaused = true; } - // merge44957: WHile merging 44957, need this method hence picked up this method from revision - // 42024. public void waitForDispatcherToPause() { if (!isPaused) { throw new IllegalStateException("Should be trying to pause!"); @@ -377,7 +368,7 @@ protected boolean stopped() { public boolean skipFailureLogging(Integer batchId) { boolean skipLogging = false; - // if map has become large then give up on new events but we don't expect + // if map has become large then give up on new events, but we don't expect // it to become too large in practise if (failureLogInterval.size() < FAILURE_MAP_MAXSIZE) { // first long in logInterval gives the last time when the log was done, @@ -385,7 +376,7 @@ public boolean skipFailureLogging(Integer batchId) { // increases exponentially // multiple currentTimeMillis calls below may hinder performance // but not much to worry about since failures are expected to - // be an infrequent occurance (and if frequent then we have to skip + // be an infrequent occurrence (and if frequent then we have to skip // logging for quite a while in any case) long[] logInterval = failureLogInterval.get(batchId); if (logInterval == null) { @@ -428,7 +419,7 @@ protected void processQueue() { } // list of the events peeked from queue List events = null; - // list of the PDX events which are peeked from pDX region and needs to go acrossthe site + // list of the PDX events which are peeked from pDX region and needs to go across the site List pdxEventsToBeDispatched = new ArrayList<>(); // list of filteredList + pdxEventsToBeDispatched events List eventsToBeDispatched = new ArrayList<>(); @@ -473,31 +464,7 @@ protected void processQueue() { resetLastPeekedEvents(); resetLastPeekedEvents = false; } - - { - // Below code was added to consider the case of queue region is - // destroyed due to userPRs localdestroy or destroy operation. - // In this case we were waiting for queue region to get created - // and then only peek from the region queue. - // With latest change of multiple PR with single ParalleSender, we - // cant wait for particular regionqueue to get recreated as there - // will be other region queue from which events can be picked - - /* - * // Check if paused. If so, wait for resumption if (this.isPaused) { - * waitForResumption(); } - * - * synchronized (this.getQueue()) { // its quite possible that the queue region is // - * destroyed(userRegion // localdestroy destroys shadow region locally). In this case - * // better to // wait for shadows region to get recreated instead of keep loop // - * for peeking events if (this.getQueue().getRegion() == null || - * this.getQueue().getRegion().isDestroyed()) { try { this.getQueue().wait(); - * continue; // this continue is important to recheck the // conditions of stop/ pause - * after the wait of 1 sec } catch (InterruptedException e1) { - * Thread.currentThread().interrupt(); } } } - */ - } - events = queue.peek(batchSize, batchTimeInterval); + events = uncheckedCast(queue.peek(batchSize, batchTimeInterval)); } catch (InterruptedException e) { interrupted = true; sender.getCancelCriterion().checkCancelInProgress(e); @@ -517,7 +484,7 @@ protected void processQueue() { List filteredList = new ArrayList<>(events); - // If the exception has been set and its cause is an IllegalStateExcetption, + // If the exception has been set and its cause is an IllegalStateException, // remove all events whose serialized value is no longer available if (exception != null && exception.getCause() != null && exception.getCause() instanceof IllegalStateException) { @@ -606,7 +573,7 @@ protected void processQueue() { eventsToBeDispatched.clear(); if (!(dispatcher instanceof GatewaySenderEventCallbackDispatcher)) { - // store the batch before dispatching so it can be retrieved by the ack thread. + // store the batch before dispatching, so it can be retrieved by the ack thread. List[] eventsArr = uncheckedCast(new List[2]); eventsArr[0] = events; eventsArr[1] = filteredList; @@ -838,15 +805,14 @@ private List addPDXEvent() throws IOException { rebuildPdxList = false; } - // find out the list of the PDXEvents which needs to be send across remote - // site - // these events will be added to list pdxSenderEventsList. I am expecting + // find out the list of the PDXEvents which needs to be sent across remote + // site these events will be added to list pdxSenderEventsList. I am expecting // that PDX events will be only added to PDX region. no deletion happens on // PDX region if (pdxRegion != null && pdxRegion.size() != pdxEventsMap.size()) { for (Map.Entry typeEntry : pdxRegion.entrySet()) { if (!pdxEventsMap.containsKey(typeEntry.getKey())) { - // event should never be off-heap so it does not need to be released + // event should never be off-heap, so it does not need to be released EntryEventImpl event = EntryEventImpl.create((LocalRegion) pdxRegion, Operation.UPDATE, typeEntry.getKey(), typeEntry.getValue(), null, false, cache.getMyId()); event.disallowOffHeapValues(); @@ -858,7 +824,7 @@ private List addPDXEvent() throws IOException { GatewaySenderEventCallbackArgument geCallbackArg = new GatewaySenderEventCallbackArgument( event.getRawCallbackArgument(), sender.getMyDSId(), allRemoteDSIds); event.setCallbackArgument(geCallbackArg); - // OFFHEAP: event for pdx type meta data so it should never be off-heap + // OFFHEAP: event for pdx type metadata, so it should never be off-heap GatewaySenderEventImpl pdxSenderEvent = new GatewaySenderEventImpl(EnumListenerEvent.AFTER_UPDATE, event, null); @@ -872,15 +838,15 @@ private List addPDXEvent() throws IOException { while (iterator.hasNext()) { GatewaySenderEventImpl pdxEvent = iterator.next(); if (pdxEvent.isAcked) { - // Since this is acked, it means it has reached to remote site.Dont add + // Since this is acked, it means it has reached to remote site. Don't add // to pdxEventsToBeDispatched iterator.remove(); continue; } if (pdxEvent.isDispatched) { - // Dispacther does not mean that event has reched remote site. We may - // need to send it agian if there is porblem while receiveing ack - // containing this event.Dont add to pdxEventsToBeDispatched + // Dispatcher does not mean that event has reached remote site. We may + // need to send it again if there is problem while receiving ack + // containing this event. Don't add to pdxEventsToBeDispatched continue; } pdxEventsToBeDispatched.add(pdxEvent); @@ -930,11 +896,11 @@ private void resetLastPeekedEvents() { } } - private void handleSuccessfulBatchDispatch(List filteredList, - List events) { + private void handleSuccessfulBatchDispatch(final List filteredList, + final List events) { if (filteredList != null) { - for (GatewayEventFilter filter : sender.getGatewayEventFilters()) { - for (Object o : filteredList) { + for (final GatewayEventFilter filter : sender.getGatewayEventFilters()) { + for (final Object o : filteredList) { if (o instanceof GatewaySenderEventImpl) { try { filter.afterAcknowledgement((GatewaySenderEventImpl) o); @@ -946,9 +912,9 @@ private void handleSuccessfulBatchDispatch(List filteredList, } } } + filteredList.clear(); } - filteredList.clear(); eventQueueRemove(events.size()); logThresholdExceededAlerts(events); diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java index 494e49916851..966dac9e9fa0 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/GatewaySenderEventImpl.java @@ -67,7 +67,8 @@ * */ public class GatewaySenderEventImpl - implements AsyncEvent, DataSerializableFixedID, Conflatable, Sizeable, Releasable { + implements AsyncEvent, DataSerializableFixedID, Conflatable, Sizeable, + Releasable { private static final long serialVersionUID = -5690172020872255422L; protected static final Object TOKEN_NULL = new Object(); @@ -152,14 +153,14 @@ public class GatewaySenderEventImpl /** * Whether this event is acknowledged after the ack received by AckReaderThread. As of now this is - * getting used for PDX related GatewaySenderEvent. But can be extended for for other + * getting used for PDX related GatewaySenderEvent. But can be extended for other * GatewaySenderEvent. */ protected volatile boolean isAcked; /** * Whether this event is dispatched by dispatcher. As of now this is getting used for PDX related - * GatewaySenderEvent. But can be extended for for other GatewaySenderEvent. + * GatewaySenderEvent. But can be extended for other GatewaySenderEvent. */ protected volatile boolean isDispatched; /** @@ -168,7 +169,7 @@ public class GatewaySenderEventImpl protected long creationTime; /** - * For ParalledGatewaySender we need bucketId of the PartitionRegion on which the update operation + * For ParallelGatewaySender we need bucketId of the PartitionRegion on which the update operation * was applied. */ protected int bucketId; @@ -583,7 +584,7 @@ public Object getDeserializedValue() { throw new IllegalStateException( "Value is no longer available. getDeserializedValue must be called before processEvents returns."); } - // both value and valueObj are null but we did not free it. + // both value and valueObj are null, but we did not free it. return null; } } @@ -841,7 +842,7 @@ public static boolean isSerializingValue() { // / Conflatable interface methods /// /** - * Determines whether or not to conflate this message. This method will answer true IFF the + * Determines whether to conflate this message. This method will answer true IFF the * message's operation is AFTER_UPDATE and its region has enabled are conflation. Otherwise, this * method will answer false. Messages whose operation is AFTER_CREATE, AFTER_DESTROY, * AFTER_INVALIDATE or AFTER_REGION_DESTROY are not conflated. @@ -1125,7 +1126,7 @@ public int getSizeInBytes() { // - the region and regionName because they are references // - the operation because it is a reference // - the entry event because it is nulled prior to calling this method - // - the transactionId because it is is a reference + // - the transactionId because it is a reference // The size of instances of the following internal datatypes were estimated // using a NullDataOutputStream and hardcoded into this method: @@ -1199,21 +1200,15 @@ private int sizeOf(Object obj) { return size; } - - // Asif: If the GatewayEvent serializes to a node where the region itself may - // not be present or the - // region is not created yet , and if the gateway event queue is persistent, - // then even if - // we try to set the region in the fromData , we may still get null. Though - // the product is - // not using this method anywhere still not comfortable changing the Interface - // so - // modifying the implementation a bit. - + // If the GatewayEvent serializes to a node where the region itself may not be present or the + // region is not created yet , and if the gateway event queue is persistent, then even if we try + // to set the region in the fromData , we may still get null. Though the product is not using this + // method anywhere still not comfortable changing the Interface, so modifying the implementation a + // bit. + @SuppressWarnings("unchecked") @Override - public Region getRegion() { - // The region will be null mostly for the other node where the gateway event - // is serialized + public Region getRegion() { + // The region will be null mostly for the other node where the gateway event is serialized return region != null ? region : CacheFactory.getAnyInstance().getRegion(regionPath); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java index 337171446713..e82b74529a7f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ConcurrentParallelGatewaySenderQueue.java @@ -28,7 +28,6 @@ import org.apache.geode.internal.cache.RegionQueue; import org.apache.geode.internal.cache.wan.AbstractGatewaySender; import org.apache.geode.internal.cache.wan.GatewaySenderEventImpl; -import org.apache.geode.internal.size.SingleObjectSizer; /** * Queue built on top of {@link @@ -50,9 +49,9 @@ public class ConcurrentParallelGatewaySenderQueue implements RegionQueue { private final ParallelGatewaySenderEventProcessor[] processors; public ConcurrentParallelGatewaySenderQueue(AbstractGatewaySender sender, - ParallelGatewaySenderEventProcessor[] pro) { + ParallelGatewaySenderEventProcessor[] processors) { this.sender = sender; - processors = pro; + this.processors = processors; } @Override @@ -70,7 +69,7 @@ public void close() { } @Override - public Region getRegion() { + public Region getRegion() { return processors[0].getQueue().getRegion(); } @@ -93,7 +92,7 @@ public Object take() throws CacheException, InterruptedException { } @Override - public List take(int batchSize) throws CacheException, InterruptedException { + public List take(int batchSize) throws CacheException, InterruptedException { throw new UnsupportedOperationException("This method(take) is not supported"); } @@ -108,24 +107,22 @@ public Object peek() throws InterruptedException, CacheException { } @Override - public List peek(int batchSize) throws InterruptedException, CacheException { + public List peek(int batchSize) throws InterruptedException, CacheException { throw new UnsupportedOperationException("This method(peek) is not supported"); } @Override - public List peek(int batchSize, int timeToWait) throws InterruptedException, CacheException { + public List peek(int batchSize, int timeToWait) throws InterruptedException, CacheException { throw new UnsupportedOperationException("This method(peek) is not supported"); } @Override public int size() { - // is that fine?? return processors[0].getQueue().size(); } public String displayContent() { - ParallelGatewaySenderQueue pgsq = (ParallelGatewaySenderQueue) (processors[0].getQueue()); - return pgsq.displayContent(); + return ((ParallelGatewaySenderQueue) (processors[0].getQueue())).displayContent(); } public int localSize() { @@ -148,20 +145,7 @@ public void removeCacheListener() { @Override public void remove(int top) throws CacheException { - throw new UnsupportedOperationException("This method(remove) is not suported"); - } - - /* - * public void resetLastPeeked(){ this.resetLastPeeked = true; } - */ - - public long estimateMemoryFootprint(SingleObjectSizer sizer) { - long size = 0; - for (final ParallelGatewaySenderEventProcessor processor : processors) { - size += ((ParallelGatewaySenderQueue) processor.getQueue()) - .estimateMemoryFootprint(sizer); - } - return size; + throw new UnsupportedOperationException("This method(remove) is not supported"); } public void removeShadowPR(String prRegionName) { @@ -173,8 +157,7 @@ public void removeShadowPR(String prRegionName) { public void addShadowPartitionedRegionForUserPR(PartitionedRegion pr) { // Reset enqueuedAllTempQueueEvents if the sender is running // This is done so that any events received while the shadow PR is added are queued in the - // tmpQueuedEvents - // instead of blocking the distribute call which could cause a deadlock. See GEM-801. + // tmpQueuedEvents instead of blocking the distributed call which could cause a deadlock. if (sender.isRunning()) { sender.setEnqueuedAllTempQueueEvents(false); } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java index 678286c92b3f..b0bbfc0b76b9 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueue.java @@ -14,10 +14,14 @@ */ package org.apache.geode.internal.cache.wan.parallel; +import static java.lang.String.format; +import static java.util.Comparator.comparing; import static org.apache.geode.cache.Region.SEPARATOR; import static org.apache.geode.cache.wan.GatewaySender.DEFAULT_BATCH_SIZE; import static org.apache.geode.cache.wan.GatewaySender.GET_TRANSACTION_EVENTS_FROM_QUEUE_WAIT_TIME_MS; import static org.apache.geode.internal.cache.LocalRegion.InitializationLevel.BEFORE_INITIAL_IMAGE; +import static org.apache.geode.internal.lang.utils.JavaWorkarounds.computeIfAbsent; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.util.ArrayList; import java.util.Collections; @@ -69,7 +73,6 @@ import org.apache.geode.internal.cache.Conflatable; import org.apache.geode.internal.cache.DiskRegionStats; import org.apache.geode.internal.cache.DistributedRegion; -import org.apache.geode.internal.cache.EntryEventImpl; import org.apache.geode.internal.cache.ForceReattemptException; import org.apache.geode.internal.cache.InternalCache; import org.apache.geode.internal.cache.InternalRegionArguments; @@ -87,7 +90,6 @@ import org.apache.geode.internal.cache.wan.GatewaySenderEventImpl; import org.apache.geode.internal.cache.wan.GatewaySenderException; import org.apache.geode.internal.cache.wan.GatewaySenderStats; -import org.apache.geode.internal.size.SingleObjectSizer; import org.apache.geode.internal.statistics.StatisticsClock; import org.apache.geode.internal.util.concurrent.StoppableCondition; import org.apache.geode.internal.util.concurrent.StoppableReentrantLock; @@ -104,9 +106,8 @@ public class ParallelGatewaySenderQueue implements RegionQueue { private static final String SHADOW_BUCKET_PATH_PREFIX = SEPARATOR + PartitionedRegionHelper.PR_ROOT_REGION_NAME + SEPARATOR; - // >> - private final Map>> regionToDispatchedKeysMap = - new ConcurrentHashMap>>(); + private final ConcurrentMap>> regionToDispatchedKeysMap = + new ConcurrentHashMap<>(); protected final StoppableReentrantLock buckToDispatchLock; private final StoppableCondition regionToDispatchedKeysMapEmpty; @@ -222,20 +223,6 @@ public void run() { prQ.getName()); } } - - private Object deserialize(Object serializedBytes) { - Object deserializedObject = serializedBytes; - if (serializedBytes instanceof byte[]) { - byte[] serializedBytesCast = (byte[]) serializedBytes; - // This is a debugging method so ignore all exceptions like - // ClassNotFoundException - try { - deserializedObject = EntryEventImpl.deserialize(serializedBytesCast); - } catch (Exception ignore) { - } - } - return deserializedObject; - } } protected final int index; @@ -264,10 +251,10 @@ public ParallelGatewaySenderQueue(AbstractGatewaySender sender, Set asyncEvent = this.sender.getId().contains(AsyncEventQueueImpl.ASYNC_EVENT_QUEUE_PREFIX); - List listOfRegions = new ArrayList<>(userRegions); - Collections.sort(listOfRegions, (o1, o2) -> o1.getFullPath().compareTo(o2.getFullPath())); + List> listOfRegions = new ArrayList<>(userRegions); + listOfRegions.sort(comparing(Region::getFullPath)); - for (Region userRegion : listOfRegions) { + for (Region userRegion : listOfRegions) { if (userRegion instanceof PartitionedRegion) { addShadowPartitionedRegionForUserPR((PartitionedRegion) userRegion); if (index == 0 && getRegion(userRegion.getFullPath()) != null) { @@ -278,13 +265,13 @@ public ParallelGatewaySenderQueue(AbstractGatewaySender sender, Set // addShadowPartitionedRegionForUserRR if (asyncEvent) { - throw new AsyncEventQueueConfigurationException(String.format( + throw new AsyncEventQueueConfigurationException(format( "Parallel Async Event Queue %s can not be used with replicated region %s", AsyncEventQueueImpl.getAsyncEventQueueIdFromSenderId(this.sender.getId()), userRegion.getFullPath())); } throw new GatewaySenderConfigurationException( - String.format("Parallel Gateway Sender %s can not be used with replicated region %s", + format("Parallel Gateway Sender %s can not be used with replicated region %s", this.sender.getId(), userRegion.getFullPath())); } } @@ -331,30 +318,27 @@ public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) { } InternalCache cache = sender.getCache(); - final String prQName = getQueueName(sender.getId(), userRegion.getFullPath()); + final String prQName = getQueueName(sender.getId()); prQ = (PartitionedRegion) cache.getRegion(prQName); if (prQ == null) { - InternalRegionFactory fact = cache.createInternalRegionFactory(); - // Fix for 48621 - don't enable concurrency checks - // for queue buckets., event with persistence + InternalRegionFactory fact = cache.createInternalRegionFactory(); + // don't enable concurrency checks for queue buckets, event with persistence fact.setConcurrencyChecksEnabled(false); - PartitionAttributesFactory pfact = new PartitionAttributesFactory(); + PartitionAttributesFactory pfact = new PartitionAttributesFactory<>(); pfact.setTotalNumBuckets(sender.getMaxParallelismForReplicatedRegion()); int localMaxMemory = userRegion.getDataPolicy().withStorage() ? sender.getMaximumQueueMemory() : 0; pfact.setLocalMaxMemory(localMaxMemory); pfact.setRedundantCopies(3); // TODO:Kishor : THis need to be handled nicely - pfact.setPartitionResolver(new RREventIDResolver()); + pfact.setPartitionResolver(uncheckedCast(new RREventIDResolver())); if (sender.isPersistenceEnabled()) { fact.setDataPolicy(DataPolicy.PERSISTENT_PARTITION); } fact.setDiskStoreName(sender.getDiskStoreName()); - // if persistence is enabled, set the diskSyncronous to whatever user - // has set - // else set it to false - // optimize with above check of enable persistence + // if persistence is enabled, set the diskSynchronous to whatever user + // has set else set it to false optimize with above check of enable persistence if (sender.isPersistenceEnabled()) { fact.setDiskSynchronous(sender.isDiskSynchronous()); } else { @@ -368,7 +352,7 @@ public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) { fact.setEvictionAttributes(ea); fact.setPartitionAttributes(pfact.create()); - final RegionAttributes ra = fact.getCreateAttributes(); + final RegionAttributes ra = fact.getCreateAttributes(); if (logger.isDebugEnabled()) { logger.debug("{}: Attempting to create queue region: {}", this, prQName); @@ -391,11 +375,13 @@ public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) { prQ.enableConflation(sender.isBatchConflationEnabled()); // Before going ahead, make sure all the buckets of shadowPR are - // loaded - // and primary nodes have been decided. + // loaded and primary nodes have been decided. // This is required in case of persistent PR and sender. if (prQ.getLocalMaxMemory() != 0) { - for (final Integer integer : prQ.getRegionAdvisor().getBucketSet()) { + Iterator itr = prQ.getRegionAdvisor().getBucketSet().iterator(); + // noinspection WhileLoopReplaceableByForEach + while (itr.hasNext()) { + itr.next(); } } // In case of Replicated Region it may not be necessary. @@ -408,7 +394,7 @@ public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) { // started from stop operation) // for first processor only if (index == 0) { - handleShadowPRExistsScenario(cache, prQ); + handleShadowPRExistsScenario(prQ); } } /* @@ -428,10 +414,6 @@ public void addShadowPartitionedRegionForUserRR(DistributedRegion userRegion) { } } - private static String convertPathToName(String fullPath) { - return ""; - } - public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR) { addShadowPartitionedRegionForUserPR(userPR, null); } @@ -469,15 +451,14 @@ public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR, if ((childPR == null ? userPR : childPR).getDataPolicy().withPersistence() && !sender.isPersistenceEnabled()) { throw new GatewaySenderException( - String.format( - "Non persistent gateway sender %s can not be attached to persistent region %s", + format("Non persistent gateway sender %s can not be attached to persistent region %s", sender.getId(), userPR.getFullPath())); } InternalCache cache = sender.getCache(); boolean isAccessor = (userPR.getLocalMaxMemory() == 0); - final String prQName = sender.getId() + QSTRING + convertPathToName(userPR.getFullPath()); + final String prQName = sender.getId() + QSTRING; prQ = (PartitionedRegion) cache.getRegion(prQName, true); if (prQ != null && prQ.isDestroyed()) { @@ -508,10 +489,10 @@ public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR, } else { regionShortcut = RegionShortcut.PARTITION; } - InternalRegionFactory fact = cache.createInternalRegionFactory(regionShortcut); + InternalRegionFactory fact = cache.createInternalRegionFactory(regionShortcut); fact.setConcurrencyChecksEnabled(false); - PartitionAttributesFactory pfact = new PartitionAttributesFactory(); + PartitionAttributesFactory pfact = new PartitionAttributesFactory<>(); pfact.setTotalNumBuckets(userPR.getTotalNumberOfBuckets()); pfact.setRedundantCopies(userPR.getRedundantCopies()); pfact.setColocatedWith(regionName); @@ -526,7 +507,7 @@ public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR, fact.setDiskStoreName(sender.getDiskStoreName()); - // if persistence is enabled, set the diskSyncronous to whatever user has set + // if persistence is enabled, set the diskSynchronous to whatever user has set // else set it to false if (sender.isPersistenceEnabled()) { fact.setDiskSynchronous(sender.isDiskSynchronous()); @@ -541,14 +522,14 @@ public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR, fact.setEvictionAttributes(ea); fact.setPartitionAttributes(pfact.create()); - final RegionAttributes ra = fact.getCreateAttributes(); + final RegionAttributes ra = fact.getCreateAttributes(); if (logger.isDebugEnabled()) { logger.debug("{}: Attempting to create queue region: {}", this, prQName); } ParallelGatewaySenderQueueMetaRegion meta = - metaRegionFactory.newMetataRegion(cache, prQName, ra, sender); + metaRegionFactory.newMetaRegion(cache, prQName, ra, sender); fact.setInternalMetaRegion(meta); fact.setDestroyLockFlag(true); @@ -585,7 +566,7 @@ public void addShadowPartitionedRegionForUserPR(PartitionedRegion userPR, // started from stop operation) // for first parallelGatewaySenderQueue only if (index == 0) { - handleShadowPRExistsScenario(cache, prQ); + handleShadowPRExistsScenario(prQ); } } @@ -652,7 +633,7 @@ private void cleanOverflowStats(Cache cache) { /** * This will be case when the sender is started again after stop operation. */ - private void handleShadowPRExistsScenario(Cache cache, PartitionedRegion prQ) { + private void handleShadowPRExistsScenario(PartitionedRegion prQ) { // Note: The region will not be null if the sender is started again after stop operation if (logger.isDebugEnabled()) { logger.debug("{}: No need to create the region as the region has been retrieved: {}", this, @@ -660,7 +641,7 @@ private void handleShadowPRExistsScenario(Cache cache, PartitionedRegion prQ) { } } - protected void afterRegionAdd(PartitionedRegion userPR) { + protected void afterRegionAdd(@SuppressWarnings("unused") PartitionedRegion userPR) { // nothing } @@ -715,7 +696,7 @@ public boolean put(Object object) throws InterruptedException, CacheException { String regionPath = value.getRegionPath(); if (!isDREvent) { - Region region = sender.getCache().getRegion(regionPath, true); + Region region = sender.getCache().getRegion(regionPath, true); regionPath = ColocationHelper.getLeaderRegion((PartitionedRegion) region).getFullPath(); } if (isDebugEnabled) { @@ -723,7 +704,7 @@ public boolean put(Object object) throws InterruptedException, CacheException { } if (!userRegionNameToShadowPRMap.containsKey(regionPath)) { if (isDebugEnabled) { - logger.debug("The userRegionNameToshadowPRMap is {}", userRegionNameToShadowPRMap); + logger.debug("The userRegionNameToShadowPRMap is {}", userRegionNameToShadowPRMap); } logger.warn( "GatewaySender: Not queuing the event {}, as the region for which this event originated is not yet configured in the GatewaySender", @@ -734,7 +715,7 @@ public boolean put(Object object) throws InterruptedException, CacheException { PartitionedRegion prQ = userRegionNameToShadowPRMap.get(regionPath); int bucketId = value.getBucketId(); - Object key = null; + final Object key; if (!isDREvent) { key = value.getShadowKey(); @@ -776,14 +757,14 @@ public boolean put(Object object) throws InterruptedException, CacheException { bucketFullPath, brq); } if (brq != null) { - boolean intializingLocked = brq.lockWhenRegionIsInitializing(); + boolean initializingLocked = brq.lockWhenRegionIsInitializing(); brq.getInitializationLock().readLock().lock(); try { putIntoBucketRegionQueue(brq, key, value); putDone = true; } finally { brq.getInitializationLock().readLock().unlock(); - if (intializingLocked) { + if (initializingLocked) { brq.unlockWhenRegionIsInitializing(); } } @@ -812,27 +793,20 @@ public boolean put(Object object) throws InterruptedException, CacheException { * This is to prevent data loss, in the scenario when bucket is not available in the * cache but we know that it will be created. */ - BlockingQueue tempQueue = null; - synchronized (bucketToTempQueueMap) { - tempQueue = bucketToTempQueueMap.get(bucketId); - if (tempQueue == null) { - tempQueue = new LinkedBlockingQueue(); - bucketToTempQueueMap.put(bucketId, tempQueue); - } - } - + final BlockingQueue tempQueue = + computeIfAbsent(bucketToTempQueueMap, bucketId, k -> new LinkedBlockingQueue<>()); synchronized (tempQueue) { brq = (AbstractBucketRegionQueue) prQ.getCache() .getInternalRegionByPath(bucketFullPath); if (brq != null) { - boolean intializingLocked = brq.lockWhenRegionIsInitializing(); + boolean initializingLocked = brq.lockWhenRegionIsInitializing(); brq.getInitializationLock().readLock().lock(); try { putIntoBucketRegionQueue(brq, key, value); putDone = true; } finally { brq.getInitializationLock().readLock().unlock(); - if (intializingLocked) { + if (initializingLocked) { brq.unlockWhenRegionIsInitializing(); } } @@ -909,9 +883,6 @@ void putIntoBucketRegionQueue(AbstractBucketRegionQueue brq, Object key, try { if (brq != null) { addedValueToQueue = brq.addToQueue(key, value); - // TODO: During merge, ParallelWANstats test failed. On - // comment below code test passed. cheetha does not have below code. - // need to find out from hcih revision this code came } } catch (BucketNotFoundException e) { if (logger.isDebugEnabled()) { @@ -937,9 +908,9 @@ void putIntoBucketRegionQueue(AbstractBucketRegionQueue brq, Object key, * Otherwise it returns null. */ @Override - public Region getRegion() { + public Region getRegion() { return userRegionNameToShadowPRMap.size() == 1 - ? (Region) userRegionNameToShadowPRMap.values().toArray()[0] : null; + ? (Region) userRegionNameToShadowPRMap.values().toArray()[0] : null; } public PartitionedRegion getRegion(String fullpath) { @@ -961,10 +932,10 @@ public ExecutorService getConflationExecutor() { } /** - * Returns the set of shadowPR backign this queue. + * Returns the set of shadowPR backing this queue. */ public Set getRegions() { - return new HashSet(userRegionNameToShadowPRMap.values()); + return new HashSet<>(userRegionNameToShadowPRMap.values()); } // TODO: Find optimal way to get Random shadow pr as this will be called in each put and peek. @@ -978,7 +949,7 @@ protected PartitionedRegion getRandomShadowPR() { } boolean isDREvent(InternalCache cache, GatewaySenderEventImpl event) { - Region region = cache.getRegion(event.getRegionPath()); + final Region region = cache.getRegion(event.getRegionPath()); return region instanceof DistributedRegion; } @@ -1043,8 +1014,7 @@ protected int getRandomPrimaryBucket(PartitionedRegion prQ) { } @Override - public List take(int batchSize) throws CacheException, InterruptedException { - // merge42180 + public List take(int batchSize) throws CacheException, InterruptedException { throw new UnsupportedOperationException(); } @@ -1071,7 +1041,7 @@ public void remove() throws CacheException { } else { String regionPath = event.getRegionPath(); InternalCache cache = sender.getCache(); - Region region = cache.getRegion(regionPath); + Region region = cache.getRegion(regionPath); if (region != null && !region.isDestroyed()) { // TODO: We have to get colocated parent region for this region if (region instanceof DistributedRegion) { @@ -1102,8 +1072,6 @@ public void remove() throws CacheException { private void destroyEventFromQueue(PartitionedRegion prQ, int bucketId, Object key) { BucketRegionQueue brq = getBucketRegionQueueByBucketId(prQ, bucketId); - // TODO : Make sure we dont need to initalize a bucket - // before destroying a key from it try { if (brq != null) { brq.destroyKey(key); @@ -1146,7 +1114,7 @@ public void resetLastPeeked() { public Object peek() throws InterruptedException, CacheException { Object object = null; - int bucketId = -1; + final int bucketId; PartitionedRegion prQ = getRandomShadowPR(); if (prQ != null && prQ.getDataStore().getAllLocalBucketRegions().size() > 0 && ((bucketId = getRandomPrimaryBucket(prQ)) != -1)) { @@ -1158,7 +1126,8 @@ public Object peek() throws InterruptedException, CacheException { return object;// since this is not set, it would be null } catch (ForceReattemptException e) { if (logger.isDebugEnabled()) { - logger.debug("Remove: Got ForceReattemptException for {} for bucke = {}", this, bucketId); + logger.debug("Remove: Got ForceReattemptException for {} for bucketId = {}", this, + bucketId); } } } @@ -1173,12 +1142,8 @@ protected void addRemovedEvent(PartitionedRegion prQ, int bucketId, Object key) lock.lock(); boolean wasEmpty = regionToDispatchedKeysMap.isEmpty(); try { - Map> bucketIdToDispatchedKeys = - regionToDispatchedKeysMap.get(prQ.getFullPath()); - if (bucketIdToDispatchedKeys == null) { - bucketIdToDispatchedKeys = new ConcurrentHashMap>(); - regionToDispatchedKeysMap.put(prQ.getFullPath(), bucketIdToDispatchedKeys); - } + final ConcurrentMap> bucketIdToDispatchedKeys = computeIfAbsent( + regionToDispatchedKeysMap, prQ.getFullPath(), k -> new ConcurrentHashMap<>()); addRemovedEventToMap(bucketIdToDispatchedKeys, bucketId, key); if (wasEmpty) { regionToDispatchedKeysMapEmpty.signal(); @@ -1208,42 +1173,19 @@ public void sendQueueRemovalMessageForDroppedEvent(PartitionedRegion prQ, int bu private void addRemovedEventToMap(Map> bucketIdToDispatchedKeys, int bucketId, Object key) { - List dispatchedKeys = bucketIdToDispatchedKeys.get(bucketId); - if (dispatchedKeys == null) { - dispatchedKeys = new ArrayList<>(); - bucketIdToDispatchedKeys.put(bucketId, dispatchedKeys); - } - dispatchedKeys.add(key); + computeIfAbsent(bucketIdToDispatchedKeys, bucketId, k -> new ArrayList<>()).add(key); } protected void addRemovedEvents(PartitionedRegion prQ, int bucketId, List shadowKeys) { - buckToDispatchLock.lock(); - boolean wasEmpty = regionToDispatchedKeysMap.isEmpty(); - try { - Map> bucketIdToDispatchedKeys = - regionToDispatchedKeysMap.get(prQ.getFullPath()); - if (bucketIdToDispatchedKeys == null) { - bucketIdToDispatchedKeys = new ConcurrentHashMap<>(); - regionToDispatchedKeysMap.put(prQ.getFullPath(), bucketIdToDispatchedKeys); - } - addRemovedEventsToMap(bucketIdToDispatchedKeys, bucketId, shadowKeys); - if (wasEmpty) { - regionToDispatchedKeysMapEmpty.signal(); - } - } finally { - buckToDispatchLock.unlock(); - } + addRemovedEvents(prQ.getFullPath(), bucketId, shadowKeys); } protected void addRemovedEvents(String prQPath, int bucketId, List shadowKeys) { buckToDispatchLock.lock(); boolean wasEmpty = regionToDispatchedKeysMap.isEmpty(); try { - Map> bucketIdToDispatchedKeys = regionToDispatchedKeysMap.get(prQPath); - if (bucketIdToDispatchedKeys == null) { - bucketIdToDispatchedKeys = new ConcurrentHashMap<>(); - regionToDispatchedKeysMap.put(prQPath, bucketIdToDispatchedKeys); - } + final ConcurrentMap> bucketIdToDispatchedKeys = + computeIfAbsent(regionToDispatchedKeysMap, prQPath, k -> new ConcurrentHashMap<>()); addRemovedEventsToMap(bucketIdToDispatchedKeys, bucketId, shadowKeys); if (wasEmpty) { regionToDispatchedKeysMapEmpty.signal(); @@ -1253,24 +1195,18 @@ protected void addRemovedEvents(String prQPath, int bucketId, List shado } } - private void addRemovedEventsToMap(Map> bucketIdToDispatchedKeys, + private void addRemovedEventsToMap(ConcurrentMap> bucketIdToDispatchedKeys, int bucketId, List keys) { - List dispatchedKeys = bucketIdToDispatchedKeys.get(bucketId); - if (dispatchedKeys == null) { - dispatchedKeys = keys == null ? new ArrayList<>() : keys; - } else { - dispatchedKeys.addAll(keys); - } - bucketIdToDispatchedKeys.put(bucketId, dispatchedKeys); + computeIfAbsent(bucketIdToDispatchedKeys, bucketId, k -> new ArrayList<>()).addAll(keys); } @Override - public List peek(int batchSize) throws InterruptedException, CacheException { + public List peek(int batchSize) throws InterruptedException, CacheException { throw new UnsupportedOperationException(); } @Override - public List peek(int batchSize, int timeToWait) throws InterruptedException, CacheException { + public List peek(int batchSize, int timeToWait) throws InterruptedException, CacheException { final boolean isDebugEnabled = logger.isDebugEnabled(); PartitionedRegion prQ = getRandomShadowPR(); @@ -1468,7 +1404,7 @@ static long calculateTimeToSleep(long timeToWait) { private void addPeekedEvents(List batch, int batchSize) { if (resetLastPeeked) { - // Remove all entries from peekedEvents for buckets that are not longer primary + // Remove all entries from peekedEvents for buckets that are no longer primary // This will prevent repeatedly trying to dispatch non-primary events Object[] helpArray = peekedEvents.toArray(); if (helpArray.length > 0) { @@ -1543,16 +1479,11 @@ private void addPreviouslyPeekedEvents(List batch, int b protected void blockProcessorThreadIfRequired() throws InterruptedException { queueEmptyLock.lock(); try { - if (isQueueEmpty) { // merge44610: this if condition came from cheetah 44610 + if (isQueueEmpty) { if (logger.isDebugEnabled()) { logger.debug("Going to wait, till notified."); } - // merge44610: this time waiting came from cheetah 44610. In cedar 1000 - // is assumed as milliseconds. In cheetah TimeUnitParamter Millisecond - // is used. In cheetah stoppable has method to consider timeunit - // parameter but cedar does not have such corresponding method queueEmptyCondition.await(1000); - // merge44610: this time waiting came from cheetah 44610 } // update the flag so that next time when we come we will block. isQueueEmpty = localSizeForProcessor() == 0; @@ -1569,7 +1500,7 @@ protected Object peekAhead(PartitionedRegion prQ, int bucketId) throws CacheExce BucketRegionQueue brq = getBucketRegionQueueByBucketId(prQ, bucketId); if (logger.isDebugEnabled()) { - logger.debug("{}: Peekahead for the bucket {}", this, bucketId); + logger.debug("{}: peekAhead for the bucket {}", this, bucketId); } try { object = brq.peek(); @@ -1634,7 +1565,7 @@ public String displayContent() { Set allLocalBuckets = prQ.getDataStore().getAllLocalBucketRegions(); for (BucketRegion br : allLocalBuckets) { if (br.size() > 0) { - sb.append("bucketId=" + br.getId() + ":" + br.keySet() + ";"); + sb.append("bucketId=").append(br.getId()).append(":").append(br.keySet()).append(";"); } } } @@ -1711,7 +1642,7 @@ public int size() { @Override public void addCacheListener(CacheListener listener) { for (PartitionedRegion prQ : userRegionNameToShadowPRMap.values()) { - AttributesMutator mutator = prQ.getAttributesMutator(); + AttributesMutator mutator = prQ.getAttributesMutator(); mutator.addCacheListener(listener); } } @@ -1844,8 +1775,8 @@ public static boolean isParallelQueue(String regionName) { return regionName.contains(QSTRING); } - public static String getQueueName(String senderId, String regionPath) { - return senderId + QSTRING + convertPathToName(regionPath); + public static String getQueueName(String senderId) { + return senderId + QSTRING; } public static String getSenderId(String regionName) { @@ -1923,7 +1854,7 @@ public void run() { } } - final Map>> temp; + final Map>> temp; buckToDispatchLock.lock(); try { boolean wasEmpty = regionToDispatchedKeysMap.isEmpty(); @@ -1936,7 +1867,6 @@ public void run() { // TODO: This should be optimized. temp = new HashMap<>(regionToDispatchedKeysMap); - regionToDispatchedKeysMap.clear(); } finally { buckToDispatchLock.unlock(); @@ -1994,10 +1924,11 @@ public void run() { } } - private Set getAllRecipients(InternalCache cache, Map map) { - Set recipients = new ObjectOpenHashSet(); - for (Object pr : map.keySet()) { - PartitionedRegion partitionedRegion = (PartitionedRegion) cache.getRegion((String) pr); + private Set getAllRecipients(InternalCache cache, + Map map) { + Set recipients = new ObjectOpenHashSet<>(); + for (String pr : map.keySet()) { + PartitionedRegion partitionedRegion = (PartitionedRegion) cache.getRegion(pr); if (partitionedRegion != null && partitionedRegion.getRegionAdvisor() != null) { recipients.addAll(partitionedRegion.getRegionAdvisor().adviseDataStore()); } @@ -2058,9 +1989,9 @@ public void shutdown() { protected static class ParallelGatewaySenderQueueMetaRegion extends PartitionedRegion { - AbstractGatewaySender sender = null; + final AbstractGatewaySender sender; - public ParallelGatewaySenderQueueMetaRegion(String regionName, RegionAttributes attrs, + public ParallelGatewaySenderQueueMetaRegion(String regionName, RegionAttributes attrs, LocalRegion parentRegion, InternalCache cache, AbstractGatewaySender pgSender, StatisticsClock statisticsClock) { super(regionName, attrs, parentRegion, cache, @@ -2111,12 +2042,6 @@ public AbstractGatewaySender getParallelGatewaySender() { } } - public long estimateMemoryFootprint(SingleObjectSizer sizer) { - return sizer.sizeof(this) + sizer.sizeof(regionToDispatchedKeysMap) - + sizer.sizeof(userRegionNameToShadowPRMap) + sizer.sizeof(bucketToTempQueueMap) - + sizer.sizeof(peekedEvents) + sizer.sizeof(conflationExecutor); - } - public void clear(PartitionedRegion pr, int bucketId) { throw new RuntimeException("This method(clear)is not supported by ParallelGatewaySenderQueue"); } @@ -2126,12 +2051,10 @@ public int size(PartitionedRegion pr, int bucketId) throws ForceReattemptExcepti } static class MetaRegionFactory { - ParallelGatewaySenderQueueMetaRegion newMetataRegion(InternalCache cache, final String prQName, - final RegionAttributes ra, AbstractGatewaySender sender) { - ParallelGatewaySenderQueueMetaRegion meta = - new ParallelGatewaySenderQueueMetaRegion(prQName, ra, null, cache, sender, - sender.getStatisticsClock()); - return meta; + ParallelGatewaySenderQueueMetaRegion newMetaRegion(InternalCache cache, final String prQName, + final RegionAttributes ra, AbstractGatewaySender sender) { + return new ParallelGatewaySenderQueueMetaRegion(prQName, ra, null, cache, sender, + sender.getStatisticsClock()); } } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessage.java b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessage.java index 10951e84bbe8..4bb70e5aa527 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessage.java +++ b/geode-core/src/main/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessage.java @@ -14,6 +14,7 @@ */ package org.apache.geode.internal.cache.wan.parallel; +import static java.lang.String.format; import static org.apache.geode.cache.Region.SEPARATOR; import static org.apache.geode.internal.cache.LocalRegion.InitializationLevel.BEFORE_INITIAL_IMAGE; @@ -43,6 +44,7 @@ import org.apache.geode.internal.cache.LocalRegion.InitializationLevel; import org.apache.geode.internal.cache.PartitionedRegion; import org.apache.geode.internal.cache.PartitionedRegionHelper; +import org.apache.geode.internal.cache.RegionQueue; import org.apache.geode.internal.cache.wan.AbstractGatewaySender; import org.apache.geode.internal.cache.wan.GatewaySenderEventImpl; import org.apache.geode.internal.serialization.DeserializationContext; @@ -58,13 +60,13 @@ public class ParallelQueueRemovalMessage extends PooledDistributionMessage { private static final Logger logger = LogService.getLogger(); - private Map>> regionToDispatchedKeysMap; + private Map>> regionToDispatchedKeysMap; public ParallelQueueRemovalMessage() {} public ParallelQueueRemovalMessage( - Map>> rgnToDispatchedKeysMap) { - this.regionToDispatchedKeysMap = rgnToDispatchedKeysMap; + final Map>> regionToDispatchedKeysMap) { + this.regionToDispatchedKeysMap = regionToDispatchedKeysMap; } @Override @@ -74,8 +76,7 @@ public int getDSFID() { @Override public String toString() { - String cname = getShortClassName(); - return cname + "regionToDispatchedKeysMap=" + regionToDispatchedKeysMap + return getShortClassName() + "regionToDispatchedKeysMap=" + regionToDispatchedKeysMap + " sender=" + getSender(); } @@ -87,21 +88,19 @@ protected void process(ClusterDistributionManager dm) { final InitializationLevel oldLevel = LocalRegion.setThreadInitLevelRequirement(BEFORE_INITIAL_IMAGE); try { - for (Object name : regionToDispatchedKeysMap.keySet()) { - final String regionName = (String) name; + for (String regionName : regionToDispatchedKeysMap.keySet()) { final PartitionedRegion region = (PartitionedRegion) cache.getRegion(regionName); - if (region == null) { - continue; - } else { - AbstractGatewaySender abstractSender = region.getParallelGatewaySender(); + if (region != null) { + final AbstractGatewaySender abstractSender = region.getParallelGatewaySender(); // Find the map: bucketId to dispatchedKeys // Find the bucket // Destroy the keys - Map bucketIdToDispatchedKeys = (Map) regionToDispatchedKeysMap.get(regionName); - for (Object bId : bucketIdToDispatchedKeys.keySet()) { + final Map> bucketIdToDispatchedKeys = + regionToDispatchedKeysMap.get(regionName); + for (final Integer bId : bucketIdToDispatchedKeys.keySet()) { final String bucketFullPath = SEPARATOR + PartitionedRegionHelper.PR_ROOT_REGION_NAME + SEPARATOR - + region.getBucketName((Integer) bId); + + region.getBucketName(bId); AbstractBucketRegionQueue brq = (AbstractBucketRegionQueue) cache.getInternalRegionByPath(bucketFullPath); if (isDebugEnabled) { @@ -110,7 +109,7 @@ protected void process(ClusterDistributionManager dm) { bucketFullPath, brq); } - List dispatchedKeys = (List) bucketIdToDispatchedKeys.get(bId); + List dispatchedKeys = bucketIdToDispatchedKeys.get(bId); if (dispatchedKeys != null) { for (Object key : dispatchedKeys) { // First, clear the Event from tempQueueEvents at AbstractGatewaySender level, if @@ -131,7 +130,6 @@ protected void process(ClusterDistributionManager dm) { } else { // if bucket is not initialized, the event should either be in bucket or // tempQueue - boolean isDestroyed = false; if (isDebugEnabled) { logger.debug( "ParallelQueueRemovalMessage : The bucket {} is not yet initialized.", @@ -140,16 +138,13 @@ protected void process(ClusterDistributionManager dm) { brq.getInitializationLock().readLock().lock(); try { if (brq.containsKey(key)) { - // fix for #48082 afterAckForSecondary_EventInBucket(abstractSender, brq, key); destroyKeyFromBucketQueue(brq, key, region); - isDestroyed = true; } // Even if BucketRegionQueue does not have the key, it could be in the - // tempQueue - // remove it from there..defect #49196 - destroyFromTempQueue(brq.getPartitionedRegion(), (Integer) bId, key); + // tempQueue remove it from there. + destroyFromTempQueue(brq.getPartitionedRegion(), bId, key); // Finally, add the key to the failed batch removal keys so that it is // definitely removed from the bucket region queue @@ -158,8 +153,8 @@ protected void process(ClusterDistributionManager dm) { brq.getInitializationLock().readLock().unlock(); } } - } else {// brq is null. Destroy the event from tempQueue. Defect #49196 - destroyFromTempQueue(region, (Integer) bId, key); + } else {// brq is null. Destroy the event from tempQueue. + destroyFromTempQueue(region, bId, key); } } } @@ -176,16 +171,15 @@ protected void process(ClusterDistributionManager dm) { private void afterAckForSecondary_EventInBucket(AbstractGatewaySender abstractSender, AbstractBucketRegionQueue brq, Object key) { for (GatewayEventFilter filter : abstractSender.getGatewayEventFilters()) { - GatewayQueueEvent eventForFilter = (GatewayQueueEvent) brq.get(key); + GatewayQueueEvent eventForFilter = (GatewayQueueEvent) brq.get(key); try { if (eventForFilter != null) { filter.afterAcknowledgement(eventForFilter); } } catch (Exception e) { - logger.fatal(String.format( + logger.fatal(format( "Exception occurred while handling call to %s.afterAcknowledgement for event %s:", - filter.toString(), eventForFilter), - e); + filter.toString(), eventForFilter), e); } } } @@ -222,19 +216,17 @@ void destroyKeyFromBucketQueue(AbstractBucketRegionQueue brq, Object key, "Got ForceReattemptException while getting bucket {} to destroyLocally the keys.", brq.getId()); } - } catch (CancelException e) { - return; // cache or DS is closing + } catch (CancelException ignore) { + // cache or DS is closing } catch (CacheException e) { - logger.error(String.format( - "ParallelQueueRemovalMessage::process:Exception in processing the last disptached key for a ParallelGatewaySenderQueue's shadowPR. The problem is with key,%s for shadowPR with name=%s", - key, prQ.getName()), - e); + logger.error(format( + "ParallelQueueRemovalMessage::process:Exception in processing the last dispatched key for a ParallelGatewaySenderQueue's shadowPR. The problem is with key,%s for shadowPR with name=%s", + key, prQ.getName()), e); } } - private boolean destroyFromTempQueue(PartitionedRegion qPR, int bId, Object key) { - boolean isDestroyed = false; - Set queues = qPR.getParallelGatewaySender().getQueues(); + private void destroyFromTempQueue(PartitionedRegion qPR, int bId, Object key) { + Set queues = qPR.getParallelGatewaySender().getQueues(); if (queues != null) { ConcurrentParallelGatewaySenderQueue prq = (ConcurrentParallelGatewaySenderQueue) queues.toArray()[0]; @@ -243,20 +235,16 @@ private boolean destroyFromTempQueue(PartitionedRegion qPR, int bId, Object key) Iterator itr = tempQueue.iterator(); while (itr.hasNext()) { GatewaySenderEventImpl eventForFilter = itr.next(); - // fix for #48082 afterAckForSecondary_EventInTempQueue(qPR.getParallelGatewaySender(), eventForFilter); if (eventForFilter.getShadowKey().equals(key)) { itr.remove(); - eventForFilter.release(); // GEODE-1282 - isDestroyed = true; + eventForFilter.release(); } } } } - return isDestroyed; } - // fix for #48082 private void afterAckForSecondary_EventInTempQueue( AbstractGatewaySender parallelGatewaySenderImpl, GatewaySenderEventImpl eventForFilter) { for (GatewayEventFilter filter : parallelGatewaySenderImpl.getGatewayEventFilters()) { @@ -265,7 +253,7 @@ private void afterAckForSecondary_EventInTempQueue( filter.afterAcknowledgement(eventForFilter); } } catch (Exception e) { - logger.fatal(String.format( + logger.fatal(format( "Exception occurred while handling call to %s.afterAcknowledgement for event %s:", filter.toString(), eventForFilter), e); diff --git a/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstance.java b/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstance.java index 26ddfde9425c..8bb8a3a90714 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstance.java +++ b/geode-core/src/main/java/org/apache/geode/internal/offheap/OffHeapRegionEntryHelperInstance.java @@ -183,6 +183,14 @@ public void releaseEntry(@Unretained OffHeapRegionEntry regionEntry, long oldAddress = TokenAddress.objectToAddress(expectedValue); final long newAddress = TokenAddress.objectToAddress(Token.REMOVED_PHASE2); if (regionEntry.setAddress(oldAddress, newAddress)) { + if (regionEntry instanceof DiskEntry) { + DiskId diskId = ((DiskEntry) regionEntry).getDiskId(); + if (diskId != null && diskId.isPendingAsync()) { + synchronized (diskId) { + diskId.setPendingAsync(false); + } + } + } releaseAddress(oldAddress); } } diff --git a/geode-core/src/main/java/org/apache/geode/internal/size/FieldStacker.java b/geode-core/src/main/java/org/apache/geode/internal/size/FieldStacker.java new file mode 100644 index 000000000000..30e93dd6b1cf --- /dev/null +++ b/geode-core/src/main/java/org/apache/geode/internal/size/FieldStacker.java @@ -0,0 +1,62 @@ +/* + * Copyright 2022 VMware, Inc. + * https://network.tanzu.vmware.com/legal_documents/vmware_eula + */ + +package org.apache.geode.internal.size; + +import static java.lang.reflect.Modifier.isStatic; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; + +import org.apache.geode.internal.size.ObjectTraverser.VisitStack; + +class FieldStacker { + private final List instanceFieldAccessors = new ArrayList<>(); + private final List staticFieldAccessors = new ArrayList<>(); + + FieldStacker(Class clazz) { + Class c = clazz; + do { + for (Field field : c.getDeclaredFields()) { + if (!field.getType().isPrimitive()) { + registerAccessorFor(field); + } + } + c = c.getSuperclass(); + } while (c != null); + } + + /** + * Adds the values of object's non-primitive fields to stack. If stack accepts static fields + * from the object's class, the values of the class's non-primitive static fields are also added. + * + * @param object the object whose field values to add to the stack + * @param stack the stack to which to add the field values + */ + void stackFields(Object object, VisitStack stack) { + for (UnsafeInstanceFieldAccessor accessor : instanceFieldAccessors) { + stack.add(object, accessor.get(object)); + } + if (stack.shouldIncludeStatics(object.getClass())) { + for (UnsafeStaticFieldAccessor accessor : staticFieldAccessors) { + stack.add(object, accessor.get()); + } + } + } + + private void registerAccessorFor(Field field) { + try { + if (isStatic(field.getModifiers())) { + staticFieldAccessors.add(new UnsafeStaticFieldAccessor(field)); + } else { + instanceFieldAccessors.add(new UnsafeInstanceFieldAccessor(field)); + } + } catch (UnsupportedOperationException ignored) { + // Java 17+ does not give offsets for fields of lambdas, records, and other hidden classes. + // Quietly ignore these fields. + } + } +} diff --git a/geode-core/src/main/java/org/apache/geode/internal/size/ObjectTraverser.java b/geode-core/src/main/java/org/apache/geode/internal/size/ObjectTraverser.java index 03128eef5ad9..485e7c5c7bf4 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/size/ObjectTraverser.java +++ b/geode-core/src/main/java/org/apache/geode/internal/size/ObjectTraverser.java @@ -1,3 +1,4 @@ +// Copyright (c) VMware, Inc. 2022. All rights reserved. /* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding @@ -15,27 +16,16 @@ package org.apache.geode.internal.size; import java.lang.reflect.Array; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; import java.util.LinkedList; import java.util.Map; import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet; -import org.apache.geode.annotations.Immutable; import org.apache.geode.internal.util.concurrent.CopyOnWriteWeakHashMap; - public class ObjectTraverser { - private final Map, Field[]> FIELD_CACHE = - new CopyOnWriteWeakHashMap<>(); - private final Map, Field[]> STATIC_FIELD_CACHE = + private final Map, FieldStacker> fieldStackers = new CopyOnWriteWeakHashMap<>(); - @Immutable - private static final Field[] NON_PRIMITIVE_ARRAY = new Field[0]; - @Immutable - private static final Field[] PRIMITIVE_ARRAY = new Field[0]; /** * Visit all objects reachable from a given root object, using a breadth first search. Using this @@ -46,111 +36,37 @@ public class ObjectTraverser { * @param includeStatics if true, then first time we see a new object type, all of its static * fields will be visited. */ - public void breadthFirstSearch(Object root, Visitor visitor, boolean includeStatics) - throws IllegalArgumentException, IllegalAccessException { + public void breadthFirstSearch(Object root, Visitor visitor, boolean includeStatics) { VisitStack stack = new VisitStack(visitor, includeStatics); stack.add(null, root); + while (!stack.isEmpty()) { Object next = stack.next(); doSearch(next, stack); } - } - private void doSearch(Object root, VisitStack stack) - throws IllegalArgumentException, IllegalAccessException { - Class clazz = root.getClass(); - boolean includeStatics = stack.shouldIncludeStatics(clazz); - Field[] nonPrimitiveFields = getNonPrimitiveFields(clazz, includeStatics); - - if (nonPrimitiveFields == NON_PRIMITIVE_ARRAY) { - int length = Array.getLength(root); - for (int i = 0; i < length; i++) { - Object value = Array.get(root, i); - stack.add(root, value); + private void doSearch(Object object, VisitStack stack) { + Class clazz = object.getClass(); + if (clazz.isArray()) { + if (!clazz.getComponentType().isPrimitive()) { + addArrayElements(object, stack); } return; } - - if (includeStatics) { - for (Field field : getStaticFields(clazz)) { - Object value = field.get(root); - stack.add(root, value); - } - } - - for (Field field : nonPrimitiveFields) { - Object value = field.get(root); - stack.add(root, value); - } - } - - private Field[] getNonPrimitiveFields(Class clazz, boolean includeStatics) { - Field[] result = FIELD_CACHE.get(clazz); - if (result == null) { - cacheFields(clazz, includeStatics); - result = FIELD_CACHE.get(clazz); - } - return result; - } - - private Field[] getStaticFields(Class clazz) { - Field[] result = STATIC_FIELD_CACHE.get(clazz); - if (result == null) { - cacheFields(clazz, true); - result = STATIC_FIELD_CACHE.get(clazz); - } - return result; + FieldStacker fieldStacker = fieldStackers.computeIfAbsent(clazz, FieldStacker::new); + fieldStacker.stackFields(object, stack); } - private void cacheFields(final Class clazz, boolean includeStatics) { - if (clazz != null && clazz.isArray()) { - Class componentType = clazz.getComponentType(); - if (componentType.isPrimitive()) { - FIELD_CACHE.put(clazz, PRIMITIVE_ARRAY); - STATIC_FIELD_CACHE.put(clazz, PRIMITIVE_ARRAY); - } else { - FIELD_CACHE.put(clazz, NON_PRIMITIVE_ARRAY); - STATIC_FIELD_CACHE.put(clazz, NON_PRIMITIVE_ARRAY); - } - return; - } - - ArrayList staticFields = new ArrayList<>(); - ArrayList nonPrimitiveFields = new ArrayList<>(); - - Class currentClass = clazz; - while (currentClass != null) { - Field[] fields = currentClass.getDeclaredFields(); - for (Field field : fields) { - Class fieldType = field.getType(); - if (!fieldType.isPrimitive()) { - if (Modifier.isStatic(field.getModifiers())) { - if (includeStatics) { - field.setAccessible(true); - staticFields.add(field); - } - } else { - field.setAccessible(true); - nonPrimitiveFields.add(field); - } - } - } - - currentClass = currentClass.getSuperclass(); - } - - FIELD_CACHE.put(clazz, nonPrimitiveFields.toArray(new Field[0])); - if (includeStatics) { - STATIC_FIELD_CACHE.put(clazz, staticFields.toArray(new Field[0])); + private static void addArrayElements(Object array, VisitStack stack) { + int length = Array.getLength(array); + for (int i = 0; i < length; i++) { + Object value = Array.get(array, i); + stack.add(array, value); } } - Map, Field[]> getStaticFieldCache() { - return STATIC_FIELD_CACHE; - } - public interface Visitor { /** * Visit an object @@ -162,9 +78,7 @@ public interface Visitor { boolean visit(Object parent, Object object); } - - - private static class VisitStack { + static class VisitStack { private final ReferenceOpenHashSet seen = new ReferenceOpenHashSet<>(); private final LinkedList stack = new LinkedList<>(); private final Visitor visitor; @@ -175,7 +89,7 @@ private static class VisitStack { this.includeStatics = includeStatics; } - public void add(Object parent, Object object) { + void add(Object parent, Object object) { if (object == null) { return; } @@ -189,15 +103,15 @@ public void add(Object parent, Object object) { } } - public Object next() { + Object next() { return stack.removeFirst(); } - public boolean isEmpty() { + boolean isEmpty() { return stack.isEmpty(); } - public boolean shouldIncludeStatics(Class clazz) { + boolean shouldIncludeStatics(Class clazz) { if (!includeStatics) { return false; } diff --git a/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionObjectSizer.java b/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionObjectSizer.java index 81028315d30a..a8289423fb30 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionObjectSizer.java +++ b/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionObjectSizer.java @@ -58,7 +58,9 @@ public class ReflectionObjectSizer implements ObjectSizer, Serializable { && !(object instanceof InternalDistributedSystem) && !(object instanceof ClassLoader) && !(object instanceof Logger) - && !(object instanceof StatisticsManager); + && !(object instanceof StatisticsManager) + && !(object instanceof Thread) + && !(object instanceof ThreadGroup); }; @Override diff --git a/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionSingleObjectSizer.java b/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionSingleObjectSizer.java index ea541b2635cb..e3b0361dbc5f 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionSingleObjectSizer.java +++ b/geode-core/src/main/java/org/apache/geode/internal/size/ReflectionSingleObjectSizer.java @@ -1,3 +1,4 @@ +// Copyright (c) VMware, Inc. 2022. All rights reserved. /* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding @@ -120,7 +121,7 @@ static long unsafeSizeof(Class clazz, Unsafe myUnsafe) { for (Field field : fields) { if (!Modifier.isStatic(field.getModifiers())) { try { - long offset = myUnsafe.fieldOffset(field); + long offset = myUnsafe.objectFieldOffset(field); if (offset >= lastFieldOffset) { lastFieldOffset = offset; lastField = field; diff --git a/geode-core/src/main/java/org/apache/geode/internal/size/UnsafeInstanceFieldAccessor.java b/geode-core/src/main/java/org/apache/geode/internal/size/UnsafeInstanceFieldAccessor.java new file mode 100644 index 000000000000..429155836e73 --- /dev/null +++ b/geode-core/src/main/java/org/apache/geode/internal/size/UnsafeInstanceFieldAccessor.java @@ -0,0 +1,42 @@ +/* + * Copyright 2022 VMware, Inc. + * https://network.tanzu.vmware.com/legal_documents/vmware_eula + */ + +package org.apache.geode.internal.size; + +import java.lang.reflect.Field; + +import org.apache.geode.annotations.Immutable; +import org.apache.geode.unsafe.internal.sun.misc.Unsafe; + +/** + * Accesses the value of a non-primitive instance field using {@link Unsafe}, bypassing the + * {@link Field}'s access and validity checks. + */ +class UnsafeInstanceFieldAccessor { + @Immutable + static final Unsafe UNSAFE = new Unsafe(); + + private final long offset; + + /** + * Creates an accessor for non-primitive instance field f. + * + * @param f the field + * @throws UnsupportedOperationException if the field's declaring class is hidden + */ + UnsafeInstanceFieldAccessor(Field f) { + offset = UNSAFE.objectFieldOffset(f); + } + + /** + * Returns the value of the field for object o. + * + * @param o the object in which to access the field value + * @return the value of the field for object o + */ + Object get(Object o) { + return UNSAFE.getObject(o, offset); + } +} diff --git a/geode-core/src/main/java/org/apache/geode/internal/size/UnsafeStaticFieldAccessor.java b/geode-core/src/main/java/org/apache/geode/internal/size/UnsafeStaticFieldAccessor.java new file mode 100644 index 000000000000..64b72494f9b5 --- /dev/null +++ b/geode-core/src/main/java/org/apache/geode/internal/size/UnsafeStaticFieldAccessor.java @@ -0,0 +1,42 @@ +/* + * Copyright 2022 VMware, Inc. + * https://network.tanzu.vmware.com/legal_documents/vmware_eula + */ + +package org.apache.geode.internal.size; + +import java.lang.reflect.Field; + +import org.apache.geode.annotations.Immutable; +import org.apache.geode.unsafe.internal.sun.misc.Unsafe; + +/** + * Accesses the value of a non-primitive static field using {@link Unsafe}, bypassing the + * {@link Field}'s access and validity checks. + */ +class UnsafeStaticFieldAccessor { + @Immutable + static final Unsafe UNSAFE = new Unsafe(); + private final Object base; + private final long offset; + + /** + * Creates an accessor for non-primitive static field f. + * + * @param f the field + * @throws UnsupportedOperationException if the field's declaring class is hidden + */ + UnsafeStaticFieldAccessor(Field f) { + base = UNSAFE.staticFieldBase(f); + offset = UNSAFE.staticFieldOffset(f); + } + + /** + * Returns the value of the static field. + * + * @return the value of the static field + */ + Object get() { + return UNSAFE.getObject(base, offset); + } +} diff --git a/geode-core/src/main/java/org/apache/geode/internal/stats50/VMStats50.java b/geode-core/src/main/java/org/apache/geode/internal/stats50/VMStats50.java index a2d25dadeb0b..587a66a8475a 100644 --- a/geode-core/src/main/java/org/apache/geode/internal/stats50/VMStats50.java +++ b/geode-core/src/main/java/org/apache/geode/internal/stats50/VMStats50.java @@ -40,7 +40,6 @@ import org.apache.geode.StatisticsTypeFactory; import org.apache.geode.SystemFailure; import org.apache.geode.annotations.Immutable; -import org.apache.geode.annotations.internal.MakeNotStatic; import org.apache.geode.internal.classloader.ClassPathLoader; import org.apache.geode.internal.statistics.StatisticsTypeFactoryImpl; import org.apache.geode.internal.statistics.VMStatsContract; @@ -63,19 +62,19 @@ public class VMStats50 implements VMStatsContract { private static final MemoryMXBean memBean; @Immutable private static final OperatingSystemMXBean osBean; + @Immutable + static final com.sun.management.OperatingSystemMXBean cpuBean; /** * This is actually an instance of UnixOperatingSystemMXBean but this class is not available on * Windows so needed to make this a runtime check. */ @Immutable - private static final Object unixBean; + static final Object unixBean; @Immutable private static final Method getMaxFileDescriptorCount; @Immutable private static final Method getOpenFileDescriptorCount; @Immutable - private static final Method getProcessCpuTime; - @Immutable private static final ThreadMXBean threadBean; private static final int pendingFinalizationCountId; @@ -103,7 +102,6 @@ public class VMStats50 implements VMStatsContract { private static final StatisticsType gcType; private static final int gc_collectionsId; private static final int gc_collectionTimeId; - @MakeNotStatic private final Map gcMap = new HashMap<>(); @@ -121,7 +119,6 @@ public class VMStats50 implements VMStatsContract { private static final int mp_collectionUsageThresholdId; private static final int mp_usageExceededId; private static final int mp_collectionUsageExceededId; - @MakeNotStatic private final Map mpMap = new HashMap<>(); @@ -151,10 +148,14 @@ public class VMStats50 implements VMStatsContract { clBean = ManagementFactory.getClassLoadingMXBean(); memBean = ManagementFactory.getMemoryMXBean(); osBean = ManagementFactory.getOperatingSystemMXBean(); + if (osBean instanceof com.sun.management.OperatingSystemMXBean) { + cpuBean = (com.sun.management.OperatingSystemMXBean) osBean; + } else { + cpuBean = null; + } { Method m1 = null; Method m2 = null; - Method m3 = null; Object bean = null; try { Class c = @@ -166,11 +167,6 @@ public class VMStats50 implements VMStatsContract { } else { // leave them null } - // Always set ProcessCpuTime - m3 = osBean.getClass().getMethod("getProcessCpuTime"); - if (m3 != null) { - m3.setAccessible(true); - } } catch (VirtualMachineError err) { SystemFailure.initiateFailure(err); // If this ever returns, rethrow the error. We're poisoned @@ -188,12 +184,10 @@ public class VMStats50 implements VMStatsContract { bean = null; m1 = null; m2 = null; - m3 = null; } finally { unixBean = bean; getMaxFileDescriptorCount = m1; getOpenFileDescriptorCount = m2; - getProcessCpuTime = m3; } } threadBean = ManagementFactory.getThreadMXBean(); @@ -596,24 +590,8 @@ public void refresh() { vmStats.setLong(totalMemoryId, rt.totalMemory()); vmStats.setLong(maxMemoryId, rt.maxMemory()); - // Compute processCpuTime separately, if not accessible ignore - try { - if (getProcessCpuTime != null) { - Object v = getProcessCpuTime.invoke(osBean); - vmStats.setLong(processCpuTimeId, (Long) v); - } - } catch (VirtualMachineError err) { - SystemFailure.initiateFailure(err); - // If this ever returns, rethrow the error. We're poisoned - // now, so don't let this thread continue. - throw err; - } catch (Throwable ex) { - // Whenever you catch Error or Throwable, you must also - // catch VirtualMachineError (see above). However, there is - // _still_ a possibility that you are dealing with a cascading - // error condition, so you also need to check to see if the JVM - // is still usable: - SystemFailure.checkFailure(); + if (cpuBean != null) { + vmStats.setLong(processCpuTimeId, cpuBean.getProcessCpuTime()); } if (unixBean != null) { diff --git a/geode-core/src/main/java/org/apache/geode/management/MemberMXBean.java b/geode-core/src/main/java/org/apache/geode/management/MemberMXBean.java index a94bcfa0b800..4f70f0423191 100644 --- a/geode-core/src/main/java/org/apache/geode/management/MemberMXBean.java +++ b/geode-core/src/main/java/org/apache/geode/management/MemberMXBean.java @@ -184,14 +184,42 @@ public interface MemberMXBean { /** * Returns JVM metrics. * - * @return JVM metrics + * @return A collection of following JVM metrics beans:
+ * gcCount - (Long) the number of times garbage collection has occurred.
+ * gcTimeMillis - (Long) the amount of time (in milliseconds) spent on garbage + * collection.
+ * initMemory - (Long) the initial number of megabytes of memory requested from the + * operating system.
+ * committedMemory - (Long) the current number of megabytes of memory allocated.
+ * usedMemory - (Long) the current number of megabytes of memory being used.
+ * maxMemory - (Long) the maximum number of megabytes of memory available from the + * operating system.
+ * totalThreads - (Integer) he number of threads in use. */ JVMMetrics showJVMMetrics(); /** * Returns operating system metrics. * - * @return operating system metrics + * @return A collection of following operating system metrics beans:
+ * maxFileDescriptorCount - (Long) the maximum number of open file descriptors allowed by + * the operating system.
+ * openFileDescriptorCount - (Long) the current number of open file descriptors.
+ * processCpuTime - (Long) the amount of time (in nanoseconds) used by the member's + * process.
+ * committedVirtualMemorySize - (Long) the current number of megabytes of memory + * allocated.
+ * totalPhysicalMemorySize - (Long) the number of megabytes of free memory available to + * the operating system.
+ * freePhysicalMemorySize - (Long) the number of megabytes of free memory available to the + * operating system.
+ * totalSwapSpaceSize - (Long) the number of megabytes of swap space allocated.
+ * freeSwapSpaceSize - (Long) the number of megabytes of free swap space.
+ * name - (String) the name of the operating system.
+ * version - (String) the version of the operating system.
+ * arch - (String) the hardware architecture.
+ * availableProcessors - (Integer) the number of available processors.
+ * systemLoadAverage - (Double) the system load average. */ OSMetrics showOSMetrics(); diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/beans/QueryDataFunction.java b/geode-core/src/main/java/org/apache/geode/management/internal/beans/QueryDataFunction.java index c726d95bbe71..69c564847f1f 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/beans/QueryDataFunction.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/beans/QueryDataFunction.java @@ -52,8 +52,7 @@ * This function is executed on one or multiple members based on the member input to * DataQueryEngine.queryData() */ -@SuppressWarnings({"unchecked"}) -public class QueryDataFunction implements Function, InternalEntity { +public class QueryDataFunction implements Function, InternalEntity { private static final long serialVersionUID = 1L; @@ -87,8 +86,8 @@ public boolean hasResult() { } @Override - public void execute(final FunctionContext context) { - Object[] functionArgs = (Object[]) context.getArguments(); + public void execute(final FunctionContext context) { + Object[] functionArgs = context.getArguments(); boolean showMember = (Boolean) functionArgs[DISPLAY_MEMBERWISE]; String queryString = (String) functionArgs[QUERY]; String regionName = (String) functionArgs[REGION]; @@ -112,18 +111,19 @@ public String getId() { } // return the compressed result data - private byte[] selectWithType(final FunctionContext context, String queryString, + private byte[] selectWithType(final FunctionContext context, String queryString, final boolean showMember, final String regionName, final int limit, final int queryResultSetLimit, final int queryCollectionsDepth) throws Exception { InternalCache cache = (InternalCache) context.getCache(); - Function localQueryFunc = new LocalQueryFunction("LocalQueryFunction", regionName, showMember) - .setOptimizeForWrite(true); + Function localQueryFunc = new LocalQueryFunction("LocalQueryFunction", regionName, + showMember) + .setOptimizeForWrite(true); queryString = applyLimitClause(queryString, limit, queryResultSetLimit); try { QueryResultFormatter result = new QueryResultFormatter(queryCollectionsDepth); - Region region = cache.getRegion(regionName); + Region region = cache.getRegion(regionName); if (region == null) { throw new Exception(String.format("Cannot find region %s in member %s", regionName, @@ -139,10 +139,7 @@ private byte[] selectWithType(final FunctionContext context, String queryString, Query query = queryService.newQuery(queryString); results = query.execute(); - } else { - ResultCollector rcollector; - PartitionedRegion parRegion = PartitionedRegionHelper.getPartitionedRegion(regionName, cache); if (parRegion != null && showMember) { @@ -154,29 +151,27 @@ private byte[] selectWithType(final FunctionContext context, String queryString, for (BucketRegion bRegion : localPrimaryBucketRegions) { localPrimaryBucketSet.add(bRegion.getId()); } - LocalDataSet lds = - new LocalDataSet(parRegion, localPrimaryBucketSet); + LocalDataSet lds = new LocalDataSet<>(parRegion, localPrimaryBucketSet); DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery(queryString); final ExecutionContext executionContext = new QueryExecutionContext(null, cache, query); results = lds.executeQuery(query, executionContext, null, localPrimaryBucketSet); } } else { - rcollector = FunctionService.onRegion(cache.getRegion(regionName)) - .setArguments(queryString).execute(localQueryFunc); - results = rcollector.getResult(); + final ResultCollector resultCollector = + execute(queryString, regionName, cache, localQueryFunc); + results = resultCollector.getResult(); } } - if (results != null && results instanceof SelectResults) { - - SelectResults selectResults = (SelectResults) results; + if (results instanceof SelectResults) { + SelectResults selectResults = (SelectResults) results; for (Object object : selectResults) { result.add(RESULT_KEY, object); noDataFound = false; } - } else if (results != null && results instanceof ArrayList) { - ArrayList listResults = (ArrayList) results; - ArrayList actualResult = (ArrayList) listResults.get(0); + } else if (results instanceof ArrayList) { + ArrayList listResults = (ArrayList) results; + ArrayList actualResult = (ArrayList) listResults.get(0); for (Object object : actualResult) { result.add(RESULT_KEY, object); noDataFound = false; @@ -198,8 +193,15 @@ private byte[] selectWithType(final FunctionContext context, String queryString, } } + @SuppressWarnings("unchecked") + private ResultCollector execute(final String queryString, final String regionName, + final InternalCache cache, final Function localQueryFunc) { + return FunctionService.onRegion(cache.getRegion(regionName)) + .setArguments(queryString).execute(localQueryFunc); + } + /** - * Matches the input query with query with limit pattern. If limit is found in input query this + * Matches the input query with limit pattern. If limit is found in input query this * function ignores. Else it will append a default limit .. 1000 If input limit is 0 then also it * will append default limit of 1000 * @@ -211,7 +213,7 @@ private byte[] selectWithType(final FunctionContext context, String queryString, protected static String applyLimitClause(final String query, int limit, final int queryResultSetLimit) { String[] lines = query.split(System.lineSeparator()); - List queryStrings = new ArrayList(); + List queryStrings = new ArrayList<>(); for (String line : lines) { // remove the comments if (!line.startsWith("--") && line.length() > 0) { @@ -245,7 +247,7 @@ protected static String applyLimitClause(final String query, int limit, /** * Function to gather data locally. This function is required to execute query with region context */ - private class LocalQueryFunction implements InternalFunction { + private static class LocalQueryFunction implements InternalFunction { private static final long serialVersionUID = 1L; @@ -282,19 +284,19 @@ public LocalQueryFunction setOptimizeForWrite(final boolean optimizeForWrite) { } @Override - public void execute(final FunctionContext context) { + public void execute(final FunctionContext context) { InternalCache cache = (InternalCache) context.getCache(); QueryService queryService = cache.getQueryService(); - String qstr = (String) context.getArguments(); - Region r = cache.getRegion(regionName); + String querySting = context.getArguments(); + Region r = cache.getRegion(regionName); try { - Query query = queryService.newQuery(qstr); - SelectResults sr; + Query query = queryService.newQuery(querySting); + SelectResults sr; if (r.getAttributes().getPartitionAttributes() != null && showMembers) { - sr = (SelectResults) query.execute((RegionFunctionContext) context); + sr = (SelectResults) query.execute((RegionFunctionContext) context); context.getResultSender().lastResult(sr.asList()); } else { - sr = (SelectResults) query.execute(); + sr = (SelectResults) query.execute(); context.getResultSender().lastResult(sr.asList()); } diff --git a/geode-core/src/main/java/org/apache/geode/management/internal/messages/CompactRequest.java b/geode-core/src/main/java/org/apache/geode/management/internal/messages/CompactRequest.java index c10dcad35420..87cea4aea942 100644 --- a/geode-core/src/main/java/org/apache/geode/management/internal/messages/CompactRequest.java +++ b/geode-core/src/main/java/org/apache/geode/management/internal/messages/CompactRequest.java @@ -34,6 +34,7 @@ import org.apache.geode.distributed.internal.DistributionManager; import org.apache.geode.distributed.internal.DistributionMessage; import org.apache.geode.distributed.internal.ReplyException; +import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.internal.admin.remote.AdminMultipleReplyProcessor; import org.apache.geode.internal.admin.remote.AdminRequest; import org.apache.geode.internal.admin.remote.AdminResponse; @@ -60,7 +61,7 @@ public class CompactRequest extends AdminRequest { private static String notExecutedMembers; public static Map send(DistributionManager dm, - String diskStoreName, Set recipients) { + String diskStoreName, Set recipients) { Map results = Collections.emptyMap(); if (recipients != null && !recipients.isEmpty()) { diff --git a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt index 6a13d82c4239..5a4d7c5c5609 100644 --- a/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt +++ b/geode-core/src/main/resources/org/apache/geode/internal/sanctioned-geode-core-serializables.txt @@ -442,7 +442,7 @@ org/apache/geode/management/internal/ManagementFunction,true,1,mbeanServer:javax org/apache/geode/management/internal/NotificationKey,true,2207984824068608930,currentTime:long,objectName:javax/management/ObjectName org/apache/geode/management/internal/beans/FileUploader$RemoteFile,false,filename:java/lang/String,outputStream:com/healthmarketscience/rmiio/RemoteOutputStream org/apache/geode/management/internal/beans/QueryDataFunction,true,1 -org/apache/geode/management/internal/beans/QueryDataFunction$LocalQueryFunction,true,1,id:java/lang/String,optimizeForWrite:boolean,regionName:java/lang/String,showMembers:boolean,this$0:org/apache/geode/management/internal/beans/QueryDataFunction +org/apache/geode/management/internal/beans/QueryDataFunction$LocalQueryFunction,true,1,id:java/lang/String,optimizeForWrite:boolean,regionName:java/lang/String,showMembers:boolean org/apache/geode/management/internal/beans/stats/StatType,false org/apache/geode/management/internal/cli/functions/CacheRealizationFunction,true,6209080805559452304 org/apache/geode/management/internal/cli/functions/RebalanceFunction,true,1 diff --git a/geode-core/src/test/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOpRetryTest.java b/geode-core/src/test/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOpRetryTest.java index c47d7edeca78..8567545aed86 100644 --- a/geode-core/src/test/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOpRetryTest.java +++ b/geode-core/src/test/java/org/apache/geode/cache/client/internal/ExecuteRegionFunctionSingleHopOpRetryTest.java @@ -203,7 +203,7 @@ private void executeFunctionSingleHopAndValidate( executor1 -> new ExecuteRegionFunctionSingleHopOp.ExecuteRegionFunctionSingleHopOpImpl( testSupport.getRegion().getFullPath(), function, executor1, resultCollector, - FUNCTION_HAS_RESULT, new HashSet<>(), + new HashSet<>(), ExecuteFunctionTestSupport.ALL_BUCKETS_SETTING, DEFAULT_CLIENT_FUNCTION_TIMEOUT), () -> new ExecuteRegionFunctionOp.ExecuteRegionFunctionOpImpl( diff --git a/geode-core/src/test/java/org/apache/geode/cache/query/internal/AttributeDescriptorTest.java b/geode-core/src/test/java/org/apache/geode/cache/query/internal/AttributeDescriptorTest.java index a55ba672ed6b..985bbac670a9 100644 --- a/geode-core/src/test/java/org/apache/geode/cache/query/internal/AttributeDescriptorTest.java +++ b/geode-core/src/test/java/org/apache/geode/cache/query/internal/AttributeDescriptorTest.java @@ -131,14 +131,14 @@ public void getReadMethodShouldReturnRequestedMethodForAttributesWithAccessors( @Test public void getReadMethodShouldReturnNullAndUpdateCachedClassToMethodsMapWhenMethodCanNotBeFound() { - DefaultQuery.getPdxClasstoMethodsmap().clear(); + DefaultQuery.getPdxClassToMethodsMap().clear(); AttributeDescriptor attributeDescriptor = new AttributeDescriptor(typeRegistry, "nonExistingAttribute"); assertThat(attributeDescriptor.getReadMethod(TestBean.class)).isNull(); - assertThat(DefaultQuery.getPdxClasstoMethodsmap().isEmpty()).isFalse(); - assertThat(DefaultQuery.getPdxClasstoMethodsmap() + assertThat(DefaultQuery.getPdxClassToMethodsMap().isEmpty()).isFalse(); + assertThat(DefaultQuery.getPdxClassToMethodsMap() .containsKey(TestBean.class.getCanonicalName())).isTrue(); - assertThat(DefaultQuery.getPdxClasstoMethodsmap().get(TestBean.class.getCanonicalName()) + assertThat(DefaultQuery.getPdxClassToMethodsMap().get(TestBean.class.getCanonicalName()) .contains("nonExistingAttribute")).isTrue(); } diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/ClusterDistributionManagerTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/ClusterDistributionManagerTest.java index 913183a64d36..ed2ef2fdfbf1 100644 --- a/geode-core/src/test/java/org/apache/geode/distributed/internal/ClusterDistributionManagerTest.java +++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/ClusterDistributionManagerTest.java @@ -14,18 +14,38 @@ */ package org.apache.geode.distributed.internal; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Set; + +import org.junit.Rule; import org.junit.Test; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.mockito.quality.Strictness; import org.apache.geode.ForcedDisconnectException; +import org.apache.geode.alerting.internal.api.AlertingService; +import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.distributed.internal.membership.api.MemberDisconnectedException; +import org.apache.geode.distributed.internal.membership.api.MembershipLocator; +import org.apache.geode.distributed.internal.membership.api.MembershipView; +import org.apache.geode.internal.admin.remote.RemoteTransportConfig; +import org.apache.geode.internal.inet.LocalHostUtil; public class ClusterDistributionManagerTest { + @Rule + public MockitoRule mockitoRule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS); + @Test public void membershipFailureProcessingCreatesForcedDisconnectException() { ClusterDistributionManager manager = mock(ClusterDistributionManager.class); @@ -37,4 +57,31 @@ public void membershipFailureProcessingCreatesForcedDisconnectException() { // the root cause should be a ForcedDisconnectException verify(manager, times(1)).setRootCause(isA(ForcedDisconnectException.class)); } + + @Test + public void getEquivalentsForLocalHostReturnsOneAddress() throws UnknownHostException { + AlertingService alertingService = mock(AlertingService.class); + Distribution distribution = mock(Distribution.class); + DistributionConfig distributionConfig = mock(DistributionConfig.class); + InetAddress localHost = LocalHostUtil.getLocalHost(); + InternalDistributedSystem system = mock(InternalDistributedSystem.class); + MembershipLocator membershipLocator = + uncheckedCast(mock(MembershipLocator.class)); + RemoteTransportConfig transportConfig = mock(RemoteTransportConfig.class); + + when(distribution.getView()).thenReturn(uncheckedCast(mock(MembershipView.class))); + when(system.getConfig()).thenReturn(distributionConfig); + + DistributionManager distributionManager = new ClusterDistributionManager( + system, + transportConfig, + alertingService, + membershipLocator, + (sys, statId) -> mock(DistributionStats.class), + (dm, transport, sys, membershipListener, messageListener, locator) -> distribution); + + Set members = distributionManager.getEquivalents(localHost); + + assertThat(members).contains(localHost); + } } diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/DistributionAdvisorTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/DistributionAdvisorTest.java index acc4366536a1..289dc72b15ad 100644 --- a/geode-core/src/test/java/org/apache/geode/distributed/internal/DistributionAdvisorTest.java +++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/DistributionAdvisorTest.java @@ -53,7 +53,7 @@ public void setup() { dataPolicy = mock(DataPolicy.class); when(distributionAdvisor.getRegionForDeltaGII()).thenReturn(distributedRegion); - when(distributionAdvisor.getDelay(distributedRegion)).thenReturn(delay); + when(distributionAdvisor.getSyncDelayForCrashedMemberMilliseconds()).thenReturn(delay); when(distributedRegion.getDataPolicy()).thenReturn(dataPolicy); when(distributedRegion.getConcurrencyChecksEnabled()).thenReturn(true); when(distributedRegion.isInitializedWithWait()).thenReturn(true); diff --git a/geode-core/src/test/java/org/apache/geode/distributed/internal/LonerDistributionManagerTest.java b/geode-core/src/test/java/org/apache/geode/distributed/internal/LonerDistributionManagerTest.java new file mode 100644 index 000000000000..9e0406255a58 --- /dev/null +++ b/geode-core/src/test/java/org/apache/geode/distributed/internal/LonerDistributionManagerTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.geode.distributed.internal; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Set; + +import org.junit.Rule; +import org.junit.Test; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; +import org.mockito.quality.Strictness; + +import org.apache.geode.internal.inet.LocalHostUtil; +import org.apache.geode.internal.logging.InternalLogWriter; + +public class LonerDistributionManagerTest { + + @Rule + public MockitoRule mockitoRule = MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS); + + @Test + public void getEquivalentsForLocalHostReturnsOneAddress() throws UnknownHostException { + DistributionConfig distributionConfig = mock(DistributionConfig.class); + InetAddress localHost = LocalHostUtil.getLocalHost(); + InternalDistributedSystem system = mock(InternalDistributedSystem.class); + InternalLogWriter logWriter = mock(InternalLogWriter.class); + + when(system.getConfig()).thenReturn(distributionConfig); + + DistributionManager distributionManager = new LonerDistributionManager(system, logWriter); + + Set members = distributionManager.getEquivalents(localHost); + + assertThat(members).contains(localHost); + } +} diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/AbstractRegionJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/AbstractRegionJUnitTest.java index 69f66aa1c2f4..56eb96b915e5 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/AbstractRegionJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/AbstractRegionJUnitTest.java @@ -17,7 +17,6 @@ import static org.apache.geode.internal.statistics.StatisticsClockFactory.disabledClock; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import java.util.Set; @@ -54,8 +53,10 @@ public class AbstractRegionJUnitTest { */ @Test public void extensionPointIsSimpleExtensionPointByDefault() { - AbstractRegion region = spy(AbstractRegion.class); + AbstractRegion region = createTestableAbstractRegion(); + ExtensionPoint> extensionPoint = region.getExtensionPoint(); + assertThat(extensionPoint).isNotNull().isInstanceOf(SimpleExtensionPoint.class); } @@ -102,7 +103,7 @@ public void getAllGatewaySenderIdsIncludesBothGatewaySenderIdsAndAsyncQueueIds() } private AbstractRegion createTestableAbstractRegion() { - RegionAttributes regionAttributes = mock(RegionAttributes.class); + RegionAttributes regionAttributes = mock(RegionAttributes.class); when(regionAttributes.getDataPolicy()).thenReturn(DataPolicy.DEFAULT); EvictionAttributes evictionAttributes = mock(EvictionAttributes.class); when(evictionAttributes.getAction()).thenReturn(EvictionAction.NONE); @@ -119,9 +120,8 @@ private AbstractRegion createTestableAbstractRegion() { LocalRegion.ServerRegionProxyConstructor.class); Function regionPerfStatsFactory = (localRegion) -> mock(RegionPerfStats.class); - AbstractRegion region = new LocalRegion("regionName", regionAttributes, null, Fakes.cache(), + return new LocalRegion("regionName", regionAttributes, null, Fakes.cache(), new InternalRegionArguments(), null, regionMapConstructor, proxyConstructor, null, null, regionPerfStatsFactory, disabledClock()); - return region; } } diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/InitialImageOperationTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/InitialImageOperationTest.java index 8eb694a78780..9ed6cb8ed997 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/InitialImageOperationTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/InitialImageOperationTest.java @@ -17,6 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -26,7 +27,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Iterator; import java.util.List; import org.junit.Before; @@ -37,13 +37,12 @@ import org.apache.geode.cache.Scope; import org.apache.geode.distributed.internal.ClusterDistributionManager; import org.apache.geode.distributed.internal.membership.InternalDistributedMember; -import org.apache.geode.internal.cache.persistence.DiskStoreID; -import org.apache.geode.internal.cache.versions.DiskRegionVersionVector; import org.apache.geode.internal.cache.versions.DiskVersionTag; import org.apache.geode.internal.cache.versions.RegionVersionVector; import org.apache.geode.internal.cache.versions.VMRegionVersionVector; +import org.apache.geode.internal.cache.versions.VMVersionTag; import org.apache.geode.internal.cache.versions.VersionSource; -import org.apache.geode.internal.cache.versions.VersionStamp; +import org.apache.geode.internal.cache.versions.VersionTag; public class InitialImageOperationTest { @@ -135,35 +134,13 @@ public void processChunkDoesNotThrowIfDiskVersionTagMemberIDIsNull() } @Test - public void shouldRemoveDepartedMembersFromRVVForNonPersistentRegion() { + public void shouldRemoveDepartedMembersFromRVVForNonPersistentRegion() + throws IOException, ClassNotFoundException { InternalDistributedMember server1 = new InternalDistributedMember("host1", 101); InternalDistributedMember server2 = new InternalDistributedMember("host2", 102); InternalDistributedMember server3 = new InternalDistributedMember("host3", 103); InternalDistributedMember server4 = new InternalDistributedMember("host4", 104); - when(distributedRegion.getDataPolicy()).thenReturn(DataPolicy.REPLICATE); - when(distributedRegion.getVersionMember()).thenReturn(server1); - - RegionEntry re1 = mock(RegionEntry.class); - RegionEntry re2 = mock(RegionEntry.class); - RegionEntry re3 = mock(RegionEntry.class); - ArrayList entries = new ArrayList<>(); - entries.add(re1); - entries.add(re2); - entries.add(re3); - Iterator iterator = entries.iterator(); - when(distributedRegion.getBestIterator(false)).thenReturn(iterator); - VersionStamp stamp1 = mock(VersionStamp.class); - VersionStamp stamp2 = mock(VersionStamp.class); - VersionStamp stamp3 = mock(VersionStamp.class); - when(re1.getVersionStamp()).thenReturn(stamp1); - when(re2.getVersionStamp()).thenReturn(stamp2); - when(re3.getVersionStamp()).thenReturn(stamp3); - when(stamp1.getMemberID()).thenReturn(server1); - when(stamp2.getMemberID()).thenReturn(server2); - when(stamp3.getMemberID()).thenReturn(server3); - - RegionMap regionMap = mock(RegionMap.class); - InitialImageOperation operation = spy(new InitialImageOperation(distributedRegion, regionMap)); + ImageState imageState = mock(ImageState.class); RegionVersionVector recoveredRVV = new VMRegionVersionVector(server1); recoveredRVV.recordVersion(server1, 1); @@ -173,81 +150,59 @@ public void shouldRemoveDepartedMembersFromRVVForNonPersistentRegion() { recoveredRVV.recordGCVersion(server2, 1); recoveredRVV.recordGCVersion(server3, 1); recoveredRVV.recordGCVersion(server4, 1); + + // there will be re3 from server3 recoveredRVV.memberDeparted(null, server3, true); + + // there won't be any entry from server4, so it will be removed from MemberToVersion map after + // GII recoveredRVV.memberDeparted(null, server4, true); + assertThat(recoveredRVV.isDepartedMember(server3)).isTrue(); assertThat(recoveredRVV.isDepartedMember(server4)).isTrue(); assertThat(recoveredRVV.getMemberToVersion().size()).isEqualTo(4); assertThat(recoveredRVV.getMemberToGCVersion().size()).isEqualTo(3); + InitialImageOperation.Entry re3 = mock(InitialImageOperation.Entry.class); + VersionTag tag3 = VMVersionTag.create(server3); + when(re3.getVersionTag()).thenReturn(tag3); + ArrayList entries = new ArrayList<>(); + entries.add(re3); - RegionVersionVector receivedRVV = new VMRegionVersionVector(server2); - receivedRVV.recordVersion(server1, 1); - receivedRVV.recordVersion(server2, 1); - receivedRVV.recordVersion(server2, 2); - receivedRVV.recordVersion(server3, 1); - receivedRVV.recordVersion(server4, 1); - receivedRVV.recordGCVersion(server2, 1); - receivedRVV.recordGCVersion(server3, 1); - receivedRVV.recordGCVersion(server4, 1); - receivedRVV.memberDeparted(null, server3, true); - receivedRVV.memberDeparted(null, server4, true); - assertThat(receivedRVV.isDepartedMember(server3)).isTrue(); - assertThat(receivedRVV.isDepartedMember(server4)).isTrue(); - assertThat(receivedRVV.getMemberToVersion().size()).isEqualTo(4); - assertThat(receivedRVV.getMemberToGCVersion().size()).isEqualTo(3); - - RegionVersionVector remoteRVV = receivedRVV.getCloneForTransmission(); - - operation.processReceivedRVV(remoteRVV, recoveredRVV, receivedRVV); - assertThat(receivedRVV.getMemberToVersion().size()).isEqualTo(3); - assertThat(receivedRVV.getMemberToGCVersion().size()).isEqualTo(2); + CachePerfStats stats = mock(CachePerfStats.class); + doNothing().when(stats).incGetInitialImageKeysReceived(); + when(distributedRegion.getCachePerfStats()).thenReturn(stats); + when(distributedRegion.getImageState()).thenReturn(imageState); + when(distributedRegion.getVersionMember()).thenReturn(server1); + when(distributedRegion.getDiskRegion()).thenReturn(null); + when(distributedRegion.isDestroyed()).thenReturn(false); + when(distributedRegion.getVersionVector()).thenReturn(recoveredRVV); + when(imageState.getClearRegionFlag()).thenReturn(false); + + InternalDistributedMember giiProvider = mock(InternalDistributedMember.class); + RegionMap regionMap = mock(RegionMap.class); + InitialImageOperation operation = spy(new InitialImageOperation(distributedRegion, regionMap)); + + when(distributedRegion.getDataPolicy()).thenReturn(DataPolicy.REPLICATE); + assertThat(operation.processChunk(entries, giiProvider)).isTrue(); + assertThat(recoveredRVV.getDepartedMembersSet().size()).isEqualTo(1); assertThat(recoveredRVV.getMemberToVersion().size()).isEqualTo(3); assertThat(recoveredRVV.getMemberToGCVersion().size()).isEqualTo(2); - assertThat(remoteRVV.getMemberToVersion().size()).isEqualTo(3); - assertThat(remoteRVV.getMemberToGCVersion().size()).isEqualTo(2); assertThat(recoveredRVV.getMemberToVersion().containsKey(server3)).isTrue(); assertThat(recoveredRVV.getMemberToVersion().containsKey(server4)).isFalse(); assertThat(recoveredRVV.getMemberToGCVersion().containsKey(server3)).isTrue(); assertThat(recoveredRVV.getMemberToGCVersion().containsKey(server4)).isFalse(); - assertThat(receivedRVV.getMemberToVersion().containsKey(server3)).isTrue(); - assertThat(receivedRVV.getMemberToVersion().containsKey(server4)).isFalse(); - assertThat(receivedRVV.getMemberToGCVersion().containsKey(server3)).isTrue(); - assertThat(receivedRVV.getMemberToGCVersion().containsKey(server4)).isFalse(); } @Test - public void shouldNotRemoveDepartedMembersFromRVVForPersistentRegion() { - InternalDistributedMember idm = new InternalDistributedMember("host1", 101); - DiskStoreID server1 = new DiskStoreID(0, 0); - DiskStoreID server2 = new DiskStoreID(0, 1); - DiskStoreID server3 = new DiskStoreID(0, 2); - DiskStoreID server4 = new DiskStoreID(0, 3); - when(distributedRegion.getDataPolicy()).thenReturn(DataPolicy.PERSISTENT_REPLICATE); - when(distributedRegion.getVersionMember()).thenReturn(server1); - - RegionEntry re1 = mock(RegionEntry.class); - RegionEntry re2 = mock(RegionEntry.class); - RegionEntry re3 = mock(RegionEntry.class); - ArrayList entries = new ArrayList<>(); - entries.add(re1); - entries.add(re2); - entries.add(re3); - Iterator iterator = entries.iterator(); - when(distributedRegion.getBestIterator(false)).thenReturn(iterator); - VersionStamp stamp1 = mock(VersionStamp.class); - VersionStamp stamp2 = mock(VersionStamp.class); - VersionStamp stamp3 = mock(VersionStamp.class); - when(re1.getVersionStamp()).thenReturn(stamp1); - when(re2.getVersionStamp()).thenReturn(stamp2); - when(re3.getVersionStamp()).thenReturn(stamp3); - when(stamp1.getMemberID()).thenReturn(server1); - when(stamp2.getMemberID()).thenReturn(server2); - when(stamp3.getMemberID()).thenReturn(server3); - - RegionMap regionMap = mock(RegionMap.class); - InitialImageOperation operation = spy(new InitialImageOperation(distributedRegion, regionMap)); + public void shouldNotRemoveDepartedMembersFromRVVForPersistentRegion() + throws IOException, ClassNotFoundException { + InternalDistributedMember server1 = new InternalDistributedMember("host1", 101); + InternalDistributedMember server2 = new InternalDistributedMember("host2", 102); + InternalDistributedMember server3 = new InternalDistributedMember("host3", 103); + InternalDistributedMember server4 = new InternalDistributedMember("host4", 104); + ImageState imageState = mock(ImageState.class); - RegionVersionVector recoveredRVV = new DiskRegionVersionVector(server1); + RegionVersionVector recoveredRVV = new VMRegionVersionVector(server1); recoveredRVV.recordVersion(server1, 1); recoveredRVV.recordVersion(server2, 1); recoveredRVV.recordVersion(server3, 1); @@ -255,37 +210,33 @@ public void shouldNotRemoveDepartedMembersFromRVVForPersistentRegion() { recoveredRVV.recordGCVersion(server2, 1); recoveredRVV.recordGCVersion(server3, 1); recoveredRVV.recordGCVersion(server4, 1); - recoveredRVV.memberDeparted(null, idm, true); - assertThat(recoveredRVV.getMemberToVersion().size()).isEqualTo(4); - assertThat(recoveredRVV.getMemberToGCVersion().size()).isEqualTo(3); - - RegionVersionVector receivedRVV = new DiskRegionVersionVector(server2); - receivedRVV.recordVersion(server1, 1); - receivedRVV.recordVersion(server2, 1); - receivedRVV.recordVersion(server2, 2); - receivedRVV.recordVersion(server3, 1); - receivedRVV.recordVersion(server4, 1); - receivedRVV.recordGCVersion(server2, 1); - receivedRVV.recordGCVersion(server3, 1); - receivedRVV.recordGCVersion(server4, 1); - receivedRVV.memberDeparted(null, idm, true); - assertThat(receivedRVV.getMemberToVersion().size()).isEqualTo(4); - assertThat(receivedRVV.getMemberToGCVersion().size()).isEqualTo(3); - - RegionVersionVector remoteRVV = receivedRVV.getCloneForTransmission(); - receivedRVV = spy(receivedRVV); recoveredRVV = spy(recoveredRVV); - remoteRVV = spy(remoteRVV); - operation.processReceivedRVV(remoteRVV, recoveredRVV, receivedRVV); - assertThat(receivedRVV.getMemberToVersion().size()).isEqualTo(4); - assertThat(receivedRVV.getMemberToGCVersion().size()).isEqualTo(3); assertThat(recoveredRVV.getMemberToVersion().size()).isEqualTo(4); assertThat(recoveredRVV.getMemberToGCVersion().size()).isEqualTo(3); - assertThat(remoteRVV.getMemberToVersion().size()).isEqualTo(4); - assertThat(remoteRVV.getMemberToGCVersion().size()).isEqualTo(3); - verify(receivedRVV, never()).removeOldMembers(any()); + InitialImageOperation.Entry re3 = mock(InitialImageOperation.Entry.class); + VersionTag tag3 = VMVersionTag.create(server3); + when(re3.getVersionTag()).thenReturn(tag3); + ArrayList entries = new ArrayList<>(); + entries.add(re3); + + CachePerfStats stats = mock(CachePerfStats.class); + doNothing().when(stats).incGetInitialImageKeysReceived(); + DiskRegion diskRegion = mock(DiskRegion.class); + when(distributedRegion.getCachePerfStats()).thenReturn(stats); + when(distributedRegion.getImageState()).thenReturn(imageState); + when(distributedRegion.getVersionMember()).thenReturn(server1); + when(distributedRegion.getDiskRegion()).thenReturn(diskRegion); + when(distributedRegion.isDestroyed()).thenReturn(false); + when(distributedRegion.getVersionVector()).thenReturn(recoveredRVV); + when(imageState.getClearRegionFlag()).thenReturn(false); + + InternalDistributedMember giiProvider = mock(InternalDistributedMember.class); + RegionMap regionMap = mock(RegionMap.class); + InitialImageOperation operation = spy(new InitialImageOperation(distributedRegion, regionMap)); + + when(distributedRegion.getDataPolicy()).thenReturn(DataPolicy.PERSISTENT_REPLICATE); + assertThat(operation.processChunk(entries, giiProvider)).isTrue(); verify(recoveredRVV, never()).removeOldMembers(any()); - verify(remoteRVV, never()).removeOldMembers(any()); } } diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/LocalDataSetTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/LocalDataSetTest.java index 572fd91a01d4..ba5494a84ba8 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/LocalDataSetTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/LocalDataSetTest.java @@ -35,7 +35,7 @@ public void verifyThatIsEmptyIsTrueWhenEntryCountReturnsZero() { PartitionedRegion pr = mock(PartitionedRegion.class); when(pr.isEmpty()).thenReturn(false); when(pr.entryCount(any())).thenReturn(0); - LocalDataSet lds = new LocalDataSet(pr, Collections.emptySet()); + LocalDataSet lds = new LocalDataSet<>(pr, Collections.emptySet()); assertTrue(lds.isEmpty()); } @@ -44,7 +44,7 @@ public void verifyThatIsEmptyIsFalseWhenEntryCountReturnsNonZero() { PartitionedRegion pr = mock(PartitionedRegion.class); when(pr.isEmpty()).thenReturn(true); when(pr.entryCount(any())).thenReturn(1); - LocalDataSet lds = new LocalDataSet(pr, Collections.emptySet()); + LocalDataSet lds = new LocalDataSet<>(pr, Collections.emptySet()); assertFalse(lds.isEmpty()); } @@ -52,8 +52,8 @@ public void verifyThatIsEmptyIsFalseWhenEntryCountReturnsNonZero() { public void verifyThatGetCallbackArgIsCorrectlyPassedToGetHashKey() { PartitionedRegion pr = mock(PartitionedRegion.class); when(pr.getTotalNumberOfBuckets()).thenReturn(33); - LocalDataSet lds = new LocalDataSet(pr, Collections.emptySet()); - LocalDataSet spy = spy(lds); + LocalDataSet lds = new LocalDataSet<>(pr, Collections.emptySet()); + LocalDataSet spy = spy(lds); Object key = "key"; Object callbackArg = "callbackArg"; diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java index b50e74b32c98..2aff7bb23c30 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/PartitionedRegionTest.java @@ -365,8 +365,9 @@ public void updateBucketMapsForInterestRegistrationWithAllKeysFetchesPrimaryBuck doReturn(primaryMember) .when(spyPartitionedRegion).getNodeForBucketWrite(anyInt(), isNull()); - HashMap> nodeToBuckets = new HashMap<>(); - HashMap bucketKeys = (HashMap) asMapOfSet(0, (HashSet) asSet(0, 1)); + Map>> nodeToBuckets = + new HashMap<>(); + Map> bucketKeys = asMapOfSet(0, 0, 1); // ACT spyPartitionedRegion.updateNodeToBucketMap(nodeToBuckets, bucketKeys); diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessageJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessageJUnitTest.java index 5faa749daf66..d919bfbce4d4 100755 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessageJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/partitioned/FetchEntriesMessageJUnitTest.java @@ -75,7 +75,7 @@ public void testProcessChunk() throws Exception { PartitionedRegion pr = mock(PartitionedRegion.class); InternalDistributedSystem system = cache.getInternalDistributedSystem(); - FetchEntriesResponse response = new FetchEntriesResponse(system, pr, null, 0); + FetchEntriesResponse response = new FetchEntriesResponse(system, null, 0); HeapDataOutputStream chunkStream = createDummyChunk(); FetchEntriesReplyMessage reply = new FetchEntriesReplyMessage(null, 0, 0, chunkStream, 0, 0, 0, false, false); diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientRegistrationEventQueueManagerTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientRegistrationEventQueueManagerTest.java index 717bf8d1e51d..de624150a9b5 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientRegistrationEventQueueManagerTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/ClientRegistrationEventQueueManagerTest.java @@ -19,7 +19,6 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.catchThrowable; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -34,7 +33,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.junit.Before; @@ -341,11 +339,7 @@ public void addEventInOriginalFilterIDsButQueueWasRemovedDueToSuccessfulRegistra when(internalRegion.getFilterProfile()) .thenReturn(filterProfile); - ReentrantReadWriteLock readWriteLock = spy(new ReentrantReadWriteLock()); - ReadLock readLock = spy(readWriteLock.readLock()); - - when(readWriteLock.readLock()) - .thenReturn(readLock); + FakeReentrantReadWriteLock readWriteLock = new FakeReentrantReadWriteLock(); ClientRegistrationEventQueueManager clientRegistrationEventQueueManager = new ClientRegistrationEventQueueManager(); @@ -354,13 +348,8 @@ public void addEventInOriginalFilterIDsButQueueWasRemovedDueToSuccessfulRegistra clientRegistrationEventQueueManager.create(clientProxyMembershipId, new ConcurrentLinkedQueue<>(), readWriteLock); - doAnswer((Answer) invocation -> { - clientRegistrationEventQueueManager.drain(clientRegistrationEventQueue, cacheClientNotifier); - invocation.callRealMethod(); - return null; - }) - .when(readLock) - .lock(); + readWriteLock.readLock.beforeLock = () -> clientRegistrationEventQueueManager + .drain(clientRegistrationEventQueue, cacheClientNotifier); clientRegistrationEventQueueManager.add(internalCacheEvent, clientUpdateMessage, clientUpdateMessage, asSet(clientProxyMembershipId), cacheClientNotifier); @@ -404,4 +393,31 @@ private Operation operation() { return operation; } + + private static class FakeReentrantReadWriteLock extends ReentrantReadWriteLock { + private final FakeReadLock readLock; + + FakeReentrantReadWriteLock() { + readLock = new FakeReadLock(this); + } + + @Override + public ReentrantReadWriteLock.ReadLock readLock() { + return readLock; + } + } + + private static class FakeReadLock extends ReentrantReadWriteLock.ReadLock { + private Runnable beforeLock; + + public FakeReadLock(ReentrantReadWriteLock outerLock) { + super(outerLock); + } + + @Override + public void lock() { + beforeLock.run(); + super.lock(); + } + } } diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66Test.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66Test.java index a0956cdae255..cea25f463c17 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66Test.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunction66Test.java @@ -15,6 +15,7 @@ package org.apache.geode.internal.cache.tier.sockets.command; import static junit.framework.TestCase.assertEquals; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; @@ -34,7 +35,6 @@ import org.junit.Test; import org.junit.contrib.java.lang.system.RestoreSystemProperties; import org.junit.experimental.categories.Category; -import org.mockito.Mock; import org.apache.geode.cache.execute.Function; import org.apache.geode.cache.execute.ResultCollector; @@ -49,28 +49,29 @@ public class ExecuteRegionFunction66Test { private static final String FUNCTION_ID = "function_id"; - @Mock - private Function functionObject; + private final String functionName = "functionName"; - private ExecuteRegionFunction66 executeRegionFunction66; + private final Function functionObject = mock(Function.class); + + private final AbstractExecution execution = mock(AbstractExecution.class); + + private final ExecuteRegionFunction66 executeRegionFunction66 = + (ExecuteRegionFunction66) ExecuteRegionFunction66.getCommand(); @Rule public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties(); @Before public void setUp() throws Exception { - executeRegionFunction66 = (ExecuteRegionFunction66) ExecuteRegionFunction66.getCommand(); - - functionObject = mock(Function.class); when(functionObject.getId()).thenReturn(FUNCTION_ID); doCallRealMethod().when(functionObject).getRequiredPermissions(any()); + + when(execution.execute(functionObject)).thenReturn(uncheckedCast(mock(ResultCollector.class))); + when(execution.execute(functionName)).thenReturn(uncheckedCast(mock(ResultCollector.class))); } @Test public void executingFunctionInPreGeode18ByStringWithNoHAShouldNotSetWaitOnException() { - AbstractExecution execution = mock(AbstractExecution.class); - String functionName = "functionName"; - when(execution.execute(functionName)).thenReturn(mock(ResultCollector.class)); executeRegionFunction66.executeFunctionWithResult(functionName, AbstractExecution.NO_HA_HASRESULT_NO_OPTIMIZEFORWRITE, functionObject, execution); verify(execution, times(0)).setWaitOnExceptionFlag(true); @@ -78,9 +79,6 @@ public void executingFunctionInPreGeode18ByStringWithNoHAShouldNotSetWaitOnExcep @Test public void executingFunctionInPreGeode18ByStringWithNoHAWithOptimizeForWriteShouldNotSetWaitOnException() { - AbstractExecution execution = mock(AbstractExecution.class); - String functionName = "functionName"; - when(execution.execute(functionName)).thenReturn(mock(ResultCollector.class)); executeRegionFunction66.executeFunctionWithResult(functionName, AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE, functionObject, execution); verify(execution, times(0)).setWaitOnExceptionFlag(true); @@ -88,8 +86,6 @@ public void executingFunctionInPreGeode18ByStringWithNoHAWithOptimizeForWriteSho @Test public void executingFunctionObjectInPreGeode18ShouldNotSetWaitOnException() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); executeRegionFunction66.executeFunctionWithResult(functionObject, AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE, functionObject, execution); verify(execution, times(0)).setWaitOnExceptionFlag(true); @@ -97,25 +93,18 @@ public void executingFunctionObjectInPreGeode18ShouldNotSetWaitOnException() { @Test public void generateNullArgumentMessageIfRegionIsNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); assertEquals("The input region for the execute function request is null", executeRegionFunction66.generateNullArgumentMessage(null, null)); } @Test public void generateNullArgumentMessageIfFunctionIsNullAndRegionIsNotNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); assertEquals("The input function for the execute function request is null", executeRegionFunction66.generateNullArgumentMessage("someRegion", null)); } @Test public void populateFiltersWillReturnFiltersReadFromClientMessage() throws Exception { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - Message clientMessage = mock(Message.class); Part part1 = mock(Part.class); Object object1 = new Object(); @@ -131,7 +120,7 @@ public void populateFiltersWillReturnFiltersReadFromClientMessage() throws Excep when(clientMessage.getPart(8)).thenReturn(part2); when(clientMessage.getPart(9)).thenReturn(part3); int filterSize = 3; - Set filter = executeRegionFunction66.populateFilters(clientMessage, filterSize); + Set filter = executeRegionFunction66.populateFilters(clientMessage, filterSize); assertSame(filterSize, filter.size()); assertTrue(filter.contains(object1)); assertTrue(filter.contains(object2)); @@ -139,35 +128,30 @@ public void populateFiltersWillReturnFiltersReadFromClientMessage() throws Excep } @Test - public void populateRemovedNodexWillReturnNodesReadFromClient() throws Exception { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - + public void populateRemovedNodesWillReturnNodesReadFromClient() throws Exception { Message clientMessage = mock(Message.class); Part part1 = mock(Part.class); - Object object1 = new Object(); - when(part1.getStringOrObject()).thenReturn(object1); + String node1 = "node1"; + when(part1.getStringOrObject()).thenReturn(node1); Part part2 = mock(Part.class); - Object object2 = new Object(); - when(part2.getStringOrObject()).thenReturn(object2); + String node2 = "node2"; + when(part2.getStringOrObject()).thenReturn(node2); Part part3 = mock(Part.class); - Object object3 = new Object(); - when(part3.getStringOrObject()).thenReturn(object3); + String node3 = "node3"; + when(part3.getStringOrObject()).thenReturn(node3); when(clientMessage.getPart(7)).thenReturn(part1); when(clientMessage.getPart(8)).thenReturn(part2); when(clientMessage.getPart(9)).thenReturn(part3); - Set nodes = executeRegionFunction66.populateRemovedNodes(clientMessage, 3, 6); - assertTrue(nodes.contains(object1)); - assertTrue(nodes.contains(object2)); - assertTrue(nodes.contains(object3)); + Set nodes = executeRegionFunction66.populateRemovedNodes(clientMessage, 3, 6); + assertTrue(nodes.contains(node1)); + assertTrue(nodes.contains(node2)); + assertTrue(nodes.contains(node3)); } + @SuppressWarnings("deprecation") @Test public void getAuthorizedExecuteFunctionReturnsNullIfAuthorizationIsNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - String functionName = "functionName"; String regionPath = "regionPath"; ExecuteFunctionOperationContext context = executeRegionFunction66.getAuthorizedExecuteFunctionOperationContext(null, null, true, null, @@ -175,11 +159,9 @@ public void getAuthorizedExecuteFunctionReturnsNullIfAuthorizationIsNull() { assertNull(context); } + @SuppressWarnings("deprecation") @Test public void getAuthorizedExecuteFunctionReturnsExecutionContextIfAuthorizeRequestIsNotNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - String functionName = "functionName"; String regionPath = "regionPath"; AuthorizeRequest request = mock(AuthorizeRequest.class); when(request.executeFunctionAuthorize(any(), any(), any(), any(), anyBoolean())) diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18Test.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18Test.java index 4c40a5d7b4d7..08dcc04098f6 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18Test.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tier/sockets/command/ExecuteRegionFunctionGeode18Test.java @@ -15,6 +15,7 @@ package org.apache.geode.internal.cache.tier.sockets.command; import static junit.framework.TestCase.assertEquals; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; @@ -34,7 +35,6 @@ import org.junit.Test; import org.junit.contrib.java.lang.system.RestoreSystemProperties; import org.junit.experimental.categories.Category; -import org.mockito.Mock; import org.apache.geode.cache.execute.Function; import org.apache.geode.cache.execute.ResultCollector; @@ -49,29 +49,29 @@ public class ExecuteRegionFunctionGeode18Test { private static final String FUNCTION_ID = "function_id"; - @Mock - private Function functionObject; + private final String functionName = "functionName"; - private ExecuteRegionFunctionGeode18 executeRegionFunctionGeode18; + private final Function functionObject = mock(Function.class); + + private final AbstractExecution execution = mock(AbstractExecution.class); + + private final ExecuteRegionFunctionGeode18 executeRegionFunctionGeode18 = + (ExecuteRegionFunctionGeode18) ExecuteRegionFunctionGeode18.getCommand(); @Rule public RestoreSystemProperties restoreSystemProperties = new RestoreSystemProperties(); @Before public void setUp() throws Exception { - executeRegionFunctionGeode18 = - (ExecuteRegionFunctionGeode18) ExecuteRegionFunctionGeode18.getCommand(); - - functionObject = mock(Function.class); when(functionObject.getId()).thenReturn(FUNCTION_ID); doCallRealMethod().when(functionObject).getRequiredPermissions(any()); + + when(execution.execute(functionObject)).thenReturn(uncheckedCast(mock(ResultCollector.class))); + when(execution.execute(functionName)).thenReturn(uncheckedCast(mock(ResultCollector.class))); } @Test public void executingFunctionByStringWithNoHAShouldSetWaitOnException() { - AbstractExecution execution = mock(AbstractExecution.class); - String functionName = "functionName"; - when(execution.execute(functionName)).thenReturn(mock(ResultCollector.class)); executeRegionFunctionGeode18.executeFunctionWithResult(functionName, AbstractExecution.NO_HA_HASRESULT_NO_OPTIMIZEFORWRITE, functionObject, execution); verify(execution, times(1)).setWaitOnExceptionFlag(true); @@ -79,9 +79,6 @@ public void executingFunctionByStringWithNoHAShouldSetWaitOnException() { @Test public void executingFunctionByStringWithNoHAWithOptimizeForWriteShouldSetWaitOnException() { - AbstractExecution execution = mock(AbstractExecution.class); - String functionName = "functionName"; - when(execution.execute(functionName)).thenReturn(mock(ResultCollector.class)); executeRegionFunctionGeode18.executeFunctionWithResult(functionName, AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE, functionObject, execution); verify(execution, times(1)).setWaitOnExceptionFlag(true); @@ -89,8 +86,6 @@ public void executingFunctionByStringWithNoHAWithOptimizeForWriteShouldSetWaitOn @Test public void executeFunctionObjectShouldSetWaitOnException() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); executeRegionFunctionGeode18.executeFunctionWithResult(functionObject, AbstractExecution.NO_HA_HASRESULT_OPTIMIZEFORWRITE, functionObject, execution); verify(execution, times(1)).setWaitOnExceptionFlag(true); @@ -98,25 +93,18 @@ public void executeFunctionObjectShouldSetWaitOnException() { @Test public void generateNullArgumentMessageIfRegionIsNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); assertEquals("The input region for the execute function request is null", executeRegionFunctionGeode18.generateNullArgumentMessage(null, null)); } @Test public void generateNullArgumentMessageIfFunctionIsNullAndRegionIsNotNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); assertEquals("The input function for the execute function request is null", executeRegionFunctionGeode18.generateNullArgumentMessage("someRegion", null)); } @Test public void populateFiltersWillReturnFiltersReadFromClientMessage() throws Exception { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - Message clientMessage = mock(Message.class); Part part1 = mock(Part.class); Object object1 = new Object(); @@ -132,7 +120,7 @@ public void populateFiltersWillReturnFiltersReadFromClientMessage() throws Excep when(clientMessage.getPart(8)).thenReturn(part2); when(clientMessage.getPart(9)).thenReturn(part3); int filterSize = 3; - Set filter = executeRegionFunctionGeode18.populateFilters(clientMessage, filterSize); + Set filter = executeRegionFunctionGeode18.populateFilters(clientMessage, filterSize); assertSame(filterSize, filter.size()); assertTrue(filter.contains(object1)); assertTrue(filter.contains(object2)); @@ -140,35 +128,29 @@ public void populateFiltersWillReturnFiltersReadFromClientMessage() throws Excep } @Test - public void populateRemovedNodexWillReturnNodesReadFromClient() throws Exception { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - + public void populateRemovedNodesWillReturnNodesReadFromClient() throws Exception { Message clientMessage = mock(Message.class); Part part1 = mock(Part.class); - Object object1 = new Object(); - when(part1.getStringOrObject()).thenReturn(object1); + String node1 = "node1"; + when(part1.getStringOrObject()).thenReturn(node1); Part part2 = mock(Part.class); - Object object2 = new Object(); - when(part2.getStringOrObject()).thenReturn(object2); + String node2 = "node2"; + when(part2.getStringOrObject()).thenReturn(node2); Part part3 = mock(Part.class); - Object object3 = new Object(); - when(part3.getStringOrObject()).thenReturn(object3); + String node3 = "node3"; + when(part3.getStringOrObject()).thenReturn(node3); when(clientMessage.getPart(7)).thenReturn(part1); when(clientMessage.getPart(8)).thenReturn(part2); when(clientMessage.getPart(9)).thenReturn(part3); - Set nodes = executeRegionFunctionGeode18.populateRemovedNodes(clientMessage, 3, 6); - assertTrue(nodes.contains(object1)); - assertTrue(nodes.contains(object2)); - assertTrue(nodes.contains(object3)); + Set nodes = executeRegionFunctionGeode18.populateRemovedNodes(clientMessage, 3, 6); + assertTrue(nodes.contains(node1)); + assertTrue(nodes.contains(node2)); + assertTrue(nodes.contains(node3)); } @Test public void getAuthorizedExecuteFunctionReturnsNullIfAuthorizationIsNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - String functionName = "functionName"; String regionPath = "regionPath"; ExecuteFunctionOperationContext context = executeRegionFunctionGeode18.getAuthorizedExecuteFunctionOperationContext(null, null, true, @@ -179,9 +161,6 @@ public void getAuthorizedExecuteFunctionReturnsNullIfAuthorizationIsNull() { @Test public void getAuthorizedExecuteFunctionReturnsExecutionContextIfAuthorizeRequestIsNotNull() { - AbstractExecution execution = mock(AbstractExecution.class); - when(execution.execute(functionObject)).thenReturn(mock(ResultCollector.class)); - String functionName = "functionName"; String regionPath = "regionPath"; AuthorizeRequest request = mock(AuthorizeRequest.class); when(request.executeFunctionAuthorize(any(), any(), any(), any(), anyBoolean())) diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStubTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStubTest.java index 57882c0dea14..e9df323288d7 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStubTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/tx/PartitionedTXRegionStubTest.java @@ -83,7 +83,7 @@ public void destroyExistingEntryTracksBucketForTx() { public void destroyExistingEntryThrowsTransactionExceptionFromRemoteHost() throws Exception { PartitionedTXRegionStub stub = spy(new PartitionedTXRegionStub(txStateStub, partitionedRegion)); when(event.getRegion()).thenReturn(partitionedRegion); - doThrow(expectedException).when(partitionedRegion).destroyRemotely(remoteTransactionHost, 1, + doThrow(expectedException).when(partitionedRegion).destroyRemotely(remoteTransactionHost, event, expectedObject); Throwable caughtException = catchThrowable(() -> stub.destroyExistingEntry(event, true, @@ -102,7 +102,7 @@ public void destroyExistingEntryThrowsTransactionDataRebalancedExceptionIfIsBuck doReturn(true).when(stub).isBucketNotFoundException(forceReattemptException); doNothing().when(stub).waitToRetry(); doThrow(forceReattemptException).when(partitionedRegion).destroyRemotely(remoteTransactionHost, - 1, event, expectedObject); + event, expectedObject); Throwable caughtException = catchThrowable(() -> stub.destroyExistingEntry(event, false, expectedObject)); @@ -120,7 +120,7 @@ public void destroyExistingEntryThrowsTransactionDataNodeHasDepartedExceptionIfI doReturn(false).when(stub).isBucketNotFoundException(forceReattemptException); doNothing().when(stub).waitToRetry(); doThrow(forceReattemptException).when(partitionedRegion).destroyRemotely(remoteTransactionHost, - 1, event, expectedObject); + event, expectedObject); Throwable caughtException = catchThrowable(() -> stub.destroyExistingEntry(event, true, expectedObject)); @@ -137,7 +137,7 @@ public void getEntryForIteratorReturnsEntryGotFromTransactionHost() throws Excep when(event.getRegion()).thenReturn(partitionedRegion); when(partitionedRegion.getBucketPrimary(1)) .thenReturn((InternalDistributedMember) remoteTransactionHost); - when(partitionedRegion.getEntryRemotely((InternalDistributedMember) remoteTransactionHost, 1, + when(partitionedRegion.getEntryRemotely((InternalDistributedMember) remoteTransactionHost, key, false, true)).thenReturn(entry); assertThat(stub.getEntryForIterator(keyInfo, true)).isEqualTo(entry); @@ -164,7 +164,7 @@ public void getEntryReturnsEntryGotFromRemote() throws Exception { PartitionedTXRegionStub stub = spy(new PartitionedTXRegionStub(txStateStub, partitionedRegion)); EntrySnapshot entry = mock(EntrySnapshot.class); when(event.getRegion()).thenReturn(partitionedRegion); - when(partitionedRegion.getEntryRemotely((InternalDistributedMember) remoteTransactionHost, 1, + when(partitionedRegion.getEntryRemotely((InternalDistributedMember) remoteTransactionHost, key, false, true)).thenReturn((entry)); assertThat(stub.getEntry(keyInfo, true)).isEqualTo(entry); @@ -202,7 +202,7 @@ public void getEntryThrowsTransactionExceptionFromRemoteHost() throws Exception PartitionedTXRegionStub stub = spy(new PartitionedTXRegionStub(txStateStub, partitionedRegion)); when(event.getRegion()).thenReturn(partitionedRegion); doThrow(expectedException).when(partitionedRegion) - .getEntryRemotely((InternalDistributedMember) remoteTransactionHost, 1, key, false, true); + .getEntryRemotely((InternalDistributedMember) remoteTransactionHost, key, false, true); Throwable caughtException = catchThrowable(() -> stub.getEntry(keyInfo, true)); @@ -219,7 +219,7 @@ public void getEntryThrowsTransactionDataRebalancedExceptionIfIsBucketNotFoundEx doReturn(true).when(stub).isBucketNotFoundException(forceReattemptException); doNothing().when(stub).waitToRetry(); doThrow(forceReattemptException).when(partitionedRegion) - .getEntryRemotely((InternalDistributedMember) remoteTransactionHost, 1, key, false, true); + .getEntryRemotely((InternalDistributedMember) remoteTransactionHost, key, false, true); Throwable caughtException = catchThrowable(() -> stub.getEntry(keyInfo, true)); @@ -237,7 +237,7 @@ public void getEntryThrowsTransactionDataNodeHasDepartedExceptionIfIsNotBucketNo doReturn(false).when(stub).isBucketNotFoundException(forceReattemptException); doNothing().when(stub).waitToRetry(); doThrow(forceReattemptException).when(partitionedRegion) - .getEntryRemotely((InternalDistributedMember) remoteTransactionHost, 1, key, false, false); + .getEntryRemotely((InternalDistributedMember) remoteTransactionHost, key, false, false); Throwable caughtException = catchThrowable(() -> stub.getEntry(keyInfo, false)); @@ -260,7 +260,7 @@ public void invalidateExistingEntryThrowsTransactionExceptionFromRemoteHost() th PartitionedTXRegionStub stub = spy(new PartitionedTXRegionStub(txStateStub, partitionedRegion)); when(event.getRegion()).thenReturn(partitionedRegion); when(keyInfo.getBucketId()).thenReturn(1); - doThrow(expectedException).when(partitionedRegion).invalidateRemotely(remoteTransactionHost, 1, + doThrow(expectedException).when(partitionedRegion).invalidateRemotely(remoteTransactionHost, event); Throwable caughtException = @@ -280,7 +280,7 @@ public void invalidateExistingEntryThrowsTransactionDataRebalancedExceptionIfIsB doReturn(true).when(stub).isBucketNotFoundException(forceReattemptException); doNothing().when(stub).waitToRetry(); doThrow(forceReattemptException).when(partitionedRegion) - .invalidateRemotely(remoteTransactionHost, 1, event); + .invalidateRemotely(remoteTransactionHost, event); Throwable caughtException = catchThrowable(() -> stub.invalidateExistingEntry(event, false, false)); @@ -299,7 +299,7 @@ public void invalidateExistingEntryThrowsTransactionDataNodeHasDepartedException doReturn(false).when(stub).isBucketNotFoundException(forceReattemptException); doNothing().when(stub).waitToRetry(); doThrow(forceReattemptException).when(partitionedRegion) - .invalidateRemotely(remoteTransactionHost, 1, event); + .invalidateRemotely(remoteTransactionHost, event); Throwable caughtException = catchThrowable(() -> stub.invalidateExistingEntry(event, false, false)); diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/versions/RegionVersionHolderJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/versions/RegionVersionHolderJUnitTest.java index 48bc8ba9ab47..5e1da1ce5fbe 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/versions/RegionVersionHolderJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/versions/RegionVersionHolderJUnitTest.java @@ -101,6 +101,16 @@ private void fillSpecialExceptionForRVH(boolean useBitSet) { assertThat(exceptionList.size()).isEqualTo(0); } + @Test + public void initializeFromShouldCopyIfDepartedMember() { + RegionVersionHolder vh1 = new RegionVersionHolder(member); + vh1.isDepartedMember = true; + RegionVersionHolder vh2 = new RegionVersionHolder(member); + assertThat(vh2.isDepartedMember).isFalse(); + vh2.initializeFrom(vh1); + assertThat(vh2.isDepartedMember).isTrue(); + } + @Test public void test48066_1() { RegionVersionHolder vh1 = new RegionVersionHolder(member); diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java index 92d1601d8bb6..43a993ad333d 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelGatewaySenderQueueJUnitTest.java @@ -299,7 +299,7 @@ public void testLocalSize() { PartitionedRegionDataStore dataStore = mock(PartitionedRegionDataStore.class); when(mockMetaRegion.getDataStore()).thenReturn(dataStore); when(dataStore.getSizeOfLocalPrimaryBuckets()).thenReturn(3); - when(metaRegionFactory.newMetataRegion(any(), any(), any(), any())).thenReturn(mockMetaRegion); + when(metaRegionFactory.newMetaRegion(any(), any(), any(), any())).thenReturn(mockMetaRegion); InternalRegionFactory regionFactory = mock(InternalRegionFactory.class); when(regionFactory.create(any())).thenReturn(mockMetaRegion); when(cache.createInternalRegionFactory(any())).thenReturn(regionFactory); diff --git a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java index 84a604810a49..5ac85ba213a0 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/cache/wan/parallel/ParallelQueueRemovalMessageJUnitTest.java @@ -249,8 +249,8 @@ private void createAndProcessParallelQueueRemovalMessage() { message.process((ClusterDistributionManager) cache.getDistributionManager()); } - private HashMap>> createRegionToDispatchedKeysMap() { - HashMap>> regionToDispatchedKeys = new HashMap<>(); + private Map>> createRegionToDispatchedKeysMap() { + Map>> regionToDispatchedKeys = new HashMap<>(); Map> bucketIdToDispatchedKeys = new HashMap<>(); List dispatchedKeys = new ArrayList<>(); dispatchedKeys.add(KEY); diff --git a/geode-core/src/test/java/org/apache/geode/internal/size/ObjectTraverserJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/size/ObjectTraverserJUnitTest.java deleted file mode 100644 index d3bc40101836..000000000000 --- a/geode-core/src/test/java/org/apache/geode/internal/size/ObjectTraverserJUnitTest.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License - * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - * or implied. See the License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.geode.internal.size; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.IdentityHashMap; -import java.util.Map; -import java.util.Set; - -import org.junit.Ignore; -import org.junit.Test; - -import org.apache.geode.util.internal.GeodeGlossary; - -public class ObjectTraverserJUnitTest { - - @Test - public void testBasic() throws Exception { - Set testData = new HashSet<>(); - Object one = new Object(); - testData.add(one); - Object[] two = new Object[2]; - testData.add(two); - ArrayList three = new ArrayList<>(); - two[0] = three; - three.add(testData); - - TestVisitor visitor = new TestVisitor(); - new ObjectTraverser().breadthFirstSearch(testData, visitor, false); - - assertNotNull(visitor.visited.remove(testData)); - assertNotNull(visitor.visited.remove(one)); - assertNotNull(visitor.visited.remove(two)); - assertNotNull(visitor.visited.remove(three)); - } - - @SuppressWarnings("InstantiationOfUtilityClass") - @Test - public void testStatics() throws Exception { - final Object staticObject = new Object(); - TestObject1.test2 = staticObject; - TestObject1 test1 = new TestObject1(); - - TestVisitor visitor = new TestVisitor(); - ObjectTraverser nonStaticTraverser = new ObjectTraverser(); - nonStaticTraverser.breadthFirstSearch(test1, visitor, false); - assertThat(visitor.visited.get(staticObject)).isNull(); - assertThat(nonStaticTraverser.getStaticFieldCache().get(test1.getClass())).isNull(); - - visitor = new TestVisitor(); - ObjectTraverser staticTraverser = new ObjectTraverser(); - staticTraverser.breadthFirstSearch(test1, visitor, true); - assertThat(visitor.visited.get(staticObject)).isNotNull(); - assertThat(staticTraverser.getStaticFieldCache().get(test1.getClass())).isNotNull(); - } - - @Test - public void testStop() throws Exception { - Set set1 = new HashSet<>(); - final Set set2 = new HashSet<>(); - Object object3 = new Object(); - set1.add(set2); - set2.add(object3); - - TestVisitor visitor = new TestVisitor() { - @Override - public boolean visit(Object parent, Object object) { - super.visit(parent, object); - return object != set2; - } - }; - - new ObjectTraverser().breadthFirstSearch(set1, visitor, true); - - assertNotNull(visitor.visited.get(set1)); - assertNotNull(visitor.visited.get(set2)); - assertNull(visitor.visited.get(object3)); - } - - /** This test is commented out because it needs to be verified manually */ - @Ignore("commented out because it needs to be verified manually") - @Test - public void testHistogram() throws Exception { - Set set1 = new HashSet<>(); - final Set set2 = new HashSet<>(); - Object object3 = new Object(); - set1.add(set2); - set2.add(object3); - - System.setProperty(GeodeGlossary.GEMFIRE_PREFIX + "ObjectSizer.SIZE_OF_CLASS", - "org.apache.geode.internal.size.SizeOfUtil0"); - System.out.println(ObjectGraphSizer.histogram(set1, true)); - } - - private static class TestVisitor implements ObjectTraverser.Visitor { - private static final Object VALUE = new Object(); - - public Map visited = new IdentityHashMap<>(); - - @Override - public boolean visit(Object parent, Object object) { - assertNull(visited.put(object, VALUE)); - return true; - } - } - - private static class TestObject1 { - protected static Object test2; - } - -} diff --git a/geode-core/src/test/java/org/apache/geode/internal/size/ObjectTraverserTest.java b/geode-core/src/test/java/org/apache/geode/internal/size/ObjectTraverserTest.java new file mode 100644 index 000000000000..9881bd305139 --- /dev/null +++ b/geode-core/src/test/java/org/apache/geode/internal/size/ObjectTraverserTest.java @@ -0,0 +1,303 @@ +// Copyright (c) VMware, Inc. 2022. All rights reserved. +/* + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.geode.internal.size; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import org.apache.geode.internal.size.ObjectTraverser.Visitor; +import org.apache.geode.util.internal.GeodeGlossary; + +class ObjectTraverserTest { + @Test + void doesNotVisitPrimitiveInstanceFields() { + Object root = new ObjectWithOnlyPrimitiveFields(); + + List visits = new ArrayList<>(); + Visitor visitor = (parent, object) -> visits.add(object); + + boolean includeStatics = false; + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + assertThat(visits) + .containsExactly(root); + } + + @Test + void doesNotVisitPrimitiveStaticFields() { + Object root = new ObjectWithOnlyPrimitiveFields(); + + List visits = new ArrayList<>(); + Visitor visitor = (p, o) -> visits.add(o); + + boolean includeStatics = true; + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + assertThat(visits) + .containsExactly(root); + } + + @Test + void visitsEachReferenceInstanceField() { + Object instanceField1 = new Object(); + Object instanceField2 = new Object(); + ObjectWithReferenceFields root = new ObjectWithReferenceFields(instanceField1, instanceField2); + Object staticField1 = new Object(); + Object staticField2 = new Object(); + ObjectWithReferenceFields.staticField1 = staticField1; + ObjectWithReferenceFields.staticField2 = staticField2; + + List visits = new ArrayList<>(); + Visitor visitor = (parent, object) -> visits.add(object); + + boolean includeStatics = false; + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + assertThat(visits) + .containsExactlyInAnyOrder(root, instanceField1, instanceField2) + .doesNotContain(staticField1, staticField2); // because includeStatics = false; + } + + @Test + void visitsEachReferenceStaticField() { + Object instanceField1 = new Object(); + Object instanceField2 = new Object(); + ObjectWithReferenceFields root = new ObjectWithReferenceFields(instanceField1, instanceField2); + Object staticField1 = new Object(); + Object staticField2 = new Object(); + ObjectWithReferenceFields.staticField1 = staticField1; + ObjectWithReferenceFields.staticField2 = staticField2; + + List visits = new ArrayList<>(); + Visitor visitor = (parent, object) -> visits.add(object); + + boolean includeStatics = true; + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + assertThat(visits) + .containsExactlyInAnyOrder( + root, instanceField1, instanceField2, + staticField1, staticField2); // because includeStatics = false; + } + + @Test + void visitsEachPrimitiveArrayInstanceField() { + boolean[] instanceBooleanArray = {true, false, false, true}; + int[] instanceIntArray = {1, 2, 3, 4}; + boolean[] staticBooleanArray = {false, false, true, true}; + int[] staticIntArray = {-1, -2, -3, -4}; + + Object root = new ObjectWithReferenceFields(instanceIntArray, instanceBooleanArray); + ObjectWithReferenceFields.staticField1 = staticBooleanArray; + ObjectWithReferenceFields.staticField2 = staticIntArray; + + List visits = new ArrayList<>(); + Visitor visitor = (parent, object) -> visits.add(object); + + boolean includeStatics = false; + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + assertThat(visits) + .containsExactlyInAnyOrder(root, instanceBooleanArray, instanceIntArray) + .doesNotContain(staticBooleanArray, staticIntArray); // because includeStatics = false; + } + + @Test + void visitsEachPrimitiveArrayStaticField() { + boolean[] instanceBooleanArray = {true, false, false, true}; + int[] instanceIntArray = {1, 2, 3, 4}; + boolean[] staticBooleanArray = {false, false, true, true}; + int[] staticIntArray = {-1, -2, -3, -4}; + + Object root = new ObjectWithReferenceFields(instanceIntArray, instanceBooleanArray); + ObjectWithReferenceFields.staticField1 = staticBooleanArray; + ObjectWithReferenceFields.staticField2 = staticIntArray; + + List visits = new ArrayList<>(); + Visitor visitor = (parent, object) -> visits.add(object); + + boolean includeStatics = true; + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + assertThat(visits).containsExactlyInAnyOrder( + root, instanceBooleanArray, instanceIntArray, + staticBooleanArray, staticIntArray); + } + + @Test + void visitsEachReferenceArrayInstanceFieldAndTheirElements() { + Object[] instanceField1 = {new Object()}; + Object[] instanceField2 = {new Object(), new Object()}; + Object[] staticField1 = {new Object(), new Object(), new Object()}; + Object[] staticField2 = {new Object(), new Object(), new Object(), new Object()}; + + Object root = new ObjectWithReferenceFields(instanceField2, instanceField1); + ObjectWithReferenceFields.staticField1 = staticField1; + ObjectWithReferenceFields.staticField2 = staticField2; + + List visits = new ArrayList<>(); + Visitor visitor = (parent, object) -> visits.add(object); + + boolean includeStatics = false; // Do not visit static fields or their elements. + + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + Set expectedVisits = new HashSet<>(); + expectedVisits.add(root); + expectedVisits.add(instanceField1); + expectedVisits.add(instanceField2); + Collections.addAll(expectedVisits, instanceField1); + Collections.addAll(expectedVisits, instanceField2); + + Set staticFieldsAndTheirElements = new HashSet<>(); + staticFieldsAndTheirElements.add(staticField1); + staticFieldsAndTheirElements.add(staticField2); + Collections.addAll(staticFieldsAndTheirElements, staticField1); + Collections.addAll(staticFieldsAndTheirElements, staticField2); + + assertThat(visits) + .containsExactlyInAnyOrderElementsOf(expectedVisits) + .doesNotContainAnyElementsOf(staticFieldsAndTheirElements); + } + + @Test + void visitsEachReferenceArrayStaticFieldAndTheirElements() { + Object[] instanceField1 = {new Object()}; + Object[] instanceField2 = {new Object(), new Object()}; + Object[] staticField1 = {new Object(), new Object(), new Object()}; + Object[] staticField2 = {new Object(), new Object(), new Object(), new Object()}; + + Object root = new ObjectWithReferenceFields(instanceField2, instanceField1); + ObjectWithReferenceFields.staticField1 = staticField1; + ObjectWithReferenceFields.staticField2 = staticField2; + + List visits = new ArrayList<>(); + Visitor visitor = (parent, object) -> visits.add(object); + + boolean includeStatics = true; // Visit static fields and their elements + new ObjectTraverser().breadthFirstSearch(root, visitor, includeStatics); + + Set expectedVisits = new HashSet<>(); + expectedVisits.add(root); + + // instance fields and their elements + expectedVisits.add(instanceField1); + expectedVisits.add(instanceField2); + Collections.addAll(expectedVisits, instanceField1); + Collections.addAll(expectedVisits, instanceField2); + + // static fields and their elements + expectedVisits.add(staticField1); + expectedVisits.add(staticField2); + Collections.addAll(expectedVisits, staticField1); + Collections.addAll(expectedVisits, staticField2); + + assertThat(visits) + .containsExactlyInAnyOrderElementsOf(expectedVisits); + } + + @Test + public void visitsEachObjectInCyclicGraphOnlyOnce() { + Set set = new HashSet<>(); + Object plainObject = new Object(); + Object[] arrayOfObjects = new Object[2]; + ArrayList arrayList = new ArrayList<>(); + + set.add(plainObject); + set.add(arrayOfObjects); + arrayOfObjects[0] = arrayList; + arrayList.add(set); // Creates a cycle: set -> arrayOfObjects -> arrayList -> set + + List visited = new ArrayList<>(); + Visitor visitor = (parent, object) -> visited.add(object); + + new ObjectTraverser().breadthFirstSearch(set, visitor, false); + + assertThat(visited) + .containsOnlyOnce(set, plainObject, arrayOfObjects, arrayList); + } + + @Test + public void doesNotVisitFieldsOfObjectIfVisitReturnsFalse() { + Set set1 = new HashSet<>(); + final Set set2 = new HashSet<>(); + Object object3 = new Object(); + set1.add(set2); + set2.add(object3); + + List visited = new ArrayList<>(); + + Visitor visitor = (parent, object) -> { + visited.add(object); + return object != set2; + }; + + new ObjectTraverser().breadthFirstSearch(set1, visitor, true); + + assertThat(visited) + .containsOnlyOnce(set1, set2) + .doesNotContain(object3); + } + + @Disabled(value = "Disabled because it needs to be verified manually") + @Test + public void testHistogram() throws Exception { + Set set1 = new HashSet<>(); + final Set set2 = new HashSet<>(); + Object object3 = new Object(); + set1.add(set2); + set2.add(object3); + + System.setProperty(GeodeGlossary.GEMFIRE_PREFIX + "ObjectSizer.SIZE_OF_CLASS", + "org.apache.geode.internal.size.SizeOfUtil0"); + System.out.println(ObjectGraphSizer.histogram(set1, true)); + } + + private static class ObjectWithOnlyPrimitiveFields { + private static final boolean B = false; + private static final double D = 1.2d; + private static final float F = 3.4f; + private static final int I = 5; + private static final long L = 6L; + private final boolean b = !B; + private final double d = -D; + private final float f = -F; + private final int i = -I; + private final long l = -L; + } + + private static class ObjectWithReferenceFields { + private static Object staticField1; + private static Object staticField2; + private final Object instanceField1; + private final Object instanceField2; + + ObjectWithReferenceFields(Object instanceField1, Object instanceField2) { + this.instanceField1 = instanceField1; + this.instanceField2 = instanceField2; + } + } +} diff --git a/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionObjectSizerJUnitTest.java b/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionObjectSizerJUnitTest.java index e55b1f7cca9e..83c83a6e43ba 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionObjectSizerJUnitTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionObjectSizerJUnitTest.java @@ -14,8 +14,7 @@ */ package org.apache.geode.internal.size; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import org.junit.Test; @@ -27,33 +26,42 @@ public class ReflectionObjectSizerJUnitTest { @Test public void skipsSizingDistributedSystem() { - Object referenceObject = mock(InternalDistributedSystem.class); - checkSizeDoesNotChange(referenceObject); + checkSizingSkippedFor(mock(InternalDistributedSystem.class)); } @Test public void skipsSizingClassLoader() { - checkSizeDoesNotChange(Thread.currentThread().getContextClassLoader()); + checkSizingSkippedFor(Thread.currentThread().getContextClassLoader()); } @Test public void skipsSizingLogger() { - checkSizeDoesNotChange(LogService.getLogger()); + checkSizingSkippedFor(LogService.getLogger()); } - private void checkSizeDoesNotChange(final Object referenceObject) { + @Test + public void skipSizingThread() { + checkSizingSkippedFor(new Thread(() -> { + })); + } + + @Test + public void skipSizingThreadGroup() { + checkSizingSkippedFor(new ThreadGroup("test")); + } + + private void checkSizingSkippedFor(final Object referenceObject) { final ReflectionObjectSizer sizer = ReflectionObjectSizer.getInstance(); final TestObject nullReference = new TestObject(null); int sizeWithoutReference = sizer.sizeof(nullReference); - final TestObject distributedSystemReference = new TestObject(referenceObject); + final TestObject objectReference = new TestObject(referenceObject); final TestObject stringReference = new TestObject("hello"); - assertEquals(sizeWithoutReference, sizer.sizeof(distributedSystemReference)); - assertNotEquals(sizeWithoutReference, sizer.sizeof(stringReference)); + assertThat(sizer.sizeof(objectReference)).isEqualTo(sizeWithoutReference); + assertThat(sizer.sizeof(stringReference)).isNotEqualTo(sizeWithoutReference); } private static class TestObject { - public TestObject(final Object reference) { this.reference = reference; } diff --git a/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionSingleObjectSizerTest.java b/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionSingleObjectSizerTest.java index 2a16ce33bc6c..8356a5524b3b 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionSingleObjectSizerTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/size/ReflectionSingleObjectSizerTest.java @@ -1,3 +1,4 @@ +// Copyright (c) VMware, Inc. 2022. All rights reserved. /* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding @@ -32,7 +33,7 @@ public class ReflectionSingleObjectSizerTest { @Test public void sizeofReturnsSafeSizeofGivenUnsafeFieldOffsetUnsupported() { Unsafe unsafe = mock(Unsafe.class); - when(unsafe.fieldOffset(any())).thenThrow(UnsupportedOperationException.class); + when(unsafe.objectFieldOffset(any())).thenThrow(UnsupportedOperationException.class); long result = sizeof(TestClass.class, false, unsafe); @@ -55,7 +56,7 @@ public void unsafeSizeofReturnsMinusOneGivenNullUnsafe() { public void unsafeSizeofReturnsFieldOffsetGivenMockedUnsafeFieldOffset() { Unsafe unsafe = mock(Unsafe.class); final long FIELD_OFFSET = 37; - when(unsafe.fieldOffset(any())).thenReturn(FIELD_OFFSET); + when(unsafe.objectFieldOffset(any())).thenReturn(FIELD_OFFSET); long result = unsafeSizeof(TestClass.class, unsafe); @@ -65,7 +66,7 @@ public void unsafeSizeofReturnsFieldOffsetGivenMockedUnsafeFieldOffset() { @Test public void unsafeSizeofReturnsMinusOneGivenUnsafeFieldOffsetUnsupported() { Unsafe unsafe = mock(Unsafe.class); - when(unsafe.fieldOffset(any())).thenThrow(UnsupportedOperationException.class); + when(unsafe.objectFieldOffset(any())).thenThrow(UnsupportedOperationException.class); long result = unsafeSizeof(TestClass.class, unsafe); diff --git a/geode-core/src/test/java/org/apache/geode/internal/stats50/VMStats50Test.java b/geode-core/src/test/java/org/apache/geode/internal/stats50/VMStats50Test.java new file mode 100644 index 000000000000..efb0aaa454ee --- /dev/null +++ b/geode-core/src/test/java/org/apache/geode/internal/stats50/VMStats50Test.java @@ -0,0 +1,31 @@ +/* + * Copyright 2022 VMware, Inc. + * https://network.tanzu.vmware.com/legal_documents/vmware_eula + */ +package org.apache.geode.internal.stats50; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.condition.OS.WINDOWS; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledOnOs; +import org.junit.jupiter.api.condition.EnabledOnOs; + +public class VMStats50Test { + @Test + public void verifyCpuBeanExists() { + assertThat(VMStats50.cpuBean).isNotNull(); + } + + @Test + @EnabledOnOs(WINDOWS) + public void unixBeanNullOnWindows() { + assertThat(VMStats50.unixBean).isNull(); + } + + @Test + @DisabledOnOs(WINDOWS) + public void unixBeanExistsOnUnix() { + assertThat(VMStats50.unixBean).isNotNull(); + } +} diff --git a/geode-core/src/test/java/org/apache/geode/internal/util/ProductVersionUtilTest.java b/geode-core/src/test/java/org/apache/geode/internal/util/ProductVersionUtilTest.java index 28ec38e651fb..ceac3428cf1d 100644 --- a/geode-core/src/test/java/org/apache/geode/internal/util/ProductVersionUtilTest.java +++ b/geode-core/src/test/java/org/apache/geode/internal/util/ProductVersionUtilTest.java @@ -39,13 +39,14 @@ public void getComponentVersionsReturnsGeodeVersionOnly() { @Test public void appendFullVersionAppendsGeodeVersion() throws IOException { - assertThat(ProductVersionUtil.appendFullVersion(new StringBuilder())).contains("Apache Geode"); + assertThat(ProductVersionUtil.appendFullVersion(new StringBuilder())) + .contains("VMware Tanzu GemFire"); } @Test public void getFullVersionContainsGeodeVersion() { assertThat(ProductVersionUtil.getFullVersion()) - .contains("Apache Geode") + .contains("VMware Tanzu GemFire") .contains("Source-Revision") .contains("Build-Id"); } diff --git a/geode-core/src/test/resources/expected-pom.xml b/geode-core/src/test/resources/expected-pom.xml index 74187a68058d..bca5b14447ee 100644 --- a/geode-core/src/test/resources/expected-pom.xml +++ b/geode-core/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-core ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -72,17 +72,17 @@ compile - org.apache.geode + com.vmware.gemfire geode-common compile - org.apache.geode + com.vmware.gemfire geode-serialization compile - org.apache.geode + com.vmware.gemfire geode-management compile @@ -169,22 +169,22 @@ runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-membership runtime - org.apache.geode + com.vmware.gemfire geode-unsafe runtime - org.apache.geode + com.vmware.gemfire geode-tcp-server runtime @@ -200,12 +200,12 @@ - org.apache.geode + com.vmware.gemfire geode-deployment-legacy runtime - org.apache.geode + com.vmware.gemfire geode-http-service runtime true diff --git a/geode-core/src/upgradeTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java b/geode-core/src/upgradeTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java index 0cf346fc3338..c9483a382c17 100644 --- a/geode-core/src/upgradeTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java +++ b/geode-core/src/upgradeTest/java/org/apache/geode/ClusterCommunicationsDUnitTest.java @@ -40,6 +40,7 @@ import static org.apache.geode.test.dunit.VM.getVM; import static org.apache.geode.test.util.ResourceUtils.createTempFileFromResource; import static org.apache.geode.test.version.VmConfigurations.hasGeodeVersion; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assumptions.assumeThat; @@ -84,6 +85,7 @@ import org.apache.geode.distributed.internal.MessageWithReply; import org.apache.geode.distributed.internal.ReplyException; import org.apache.geode.distributed.internal.ReplyMessage; +import org.apache.geode.distributed.internal.membership.InternalDistributedMember; import org.apache.geode.distributed.internal.membership.gms.membership.GMSJoinLeave; import org.apache.geode.internal.InternalDataSerializer; import org.apache.geode.internal.cache.DirectReplyMessage; @@ -272,7 +274,7 @@ public void receiveBigResponse() { getVM(1).invoke("receive a large direct-reply message", () -> { SerialAckedMessageWithBigReply messageWithBigReply = new SerialAckedMessageWithBigReply(); await().until(() -> { - messageWithBigReply.send(Collections.singleton(vm2ID)); + messageWithBigReply.send(uncheckedCast(Collections.singleton(vm2ID))); return true; }); }); @@ -467,14 +469,14 @@ public SerialAckedMessageWithBigReply() { } } - public void send(Set recipients) + public void send(Set recipients) throws InterruptedException, ReplyException { // this message is only used by battery tests so we can log info level debug // messages replyProcessor = new DirectReplyProcessor(originDm, recipients); processorId = replyProcessor.getProcessorId(); setRecipients(recipients); - Set failures = originDm.putOutgoing(this); + Set failures = originDm.putOutgoing(this); if (failures != null && !failures.isEmpty()) { for (Object failure : failures) { System.err.println("Unable to send serial acked message to " + failure); diff --git a/geode-core/src/upgradeTest/java/org/apache/geode/internal/net/SocketCreatorUpgradeTest.java b/geode-core/src/upgradeTest/java/org/apache/geode/internal/net/SocketCreatorUpgradeTest.java index 21d00624b5a4..815fc66e04de 100644 --- a/geode-core/src/upgradeTest/java/org/apache/geode/internal/net/SocketCreatorUpgradeTest.java +++ b/geode-core/src/upgradeTest/java/org/apache/geode/internal/net/SocketCreatorUpgradeTest.java @@ -221,7 +221,7 @@ public void startingOldGeode1_12_1_UpTo1_13_0WithProtocolsTLSv1_2Hangs() throws @Test public void upgradingToNewGeodeOnOldJavaWithProtocolsTLSv1_2() throws IOException { - assumeThat(testVersion).as("TODO") + assumeThat(testVersion).as("Geode versions less or equal to 1.12.0 can upgrade") .isLessThanOrEqualTo(TestVersion.valueOf("1.12.0")); generateSecurityProperties(PROTOCOL_TLSv1_2, securityPropertiesFile, keyStoreFile, trustStoreFile); @@ -238,6 +238,8 @@ public void upgradingToNewGeodeOnOldJavaWithProtocolsTLSv1_2() throws IOExceptio @Test public void upgradingToNewGeodeOnOldJavaWithProtocolsTLSv1_2Hangs() throws IOException { + assumeThat(testVersion).as("GemFire 9.15.0 and upwards can upgrade with no issues") + .isLessThan(TestVersion.valueOf("1.15.0")); assumeThat(testVersion).as("Geode 1.12.0 and older can upgrade.") .isGreaterThan(TestVersion.valueOf("1.12.0")); assumeThat(testVersion).as("Geode between [1.12.1, 1.3.0) can't connect p2p with just TLSv1.2") @@ -352,6 +354,8 @@ public void upgradingToNewJavaOnOldGeodeWithProtocolsAny() throws IOException { @Test public void upgradingToNewJavaOnOldGeodeWithProtocolsAnyHangs() throws IOException { + assumeThat(testVersion).as("GemFire 9.15.0 and upwards can upgrade with no issues") + .isLessThan(TestVersion.valueOf("1.15.0")); assumeThat(testVersion).as("Geode older than 1.13.0 can directly upgrade Java version.") .isGreaterThanOrEqualTo(TestVersion.valueOf("1.13.0")); @@ -383,6 +387,8 @@ public void upgradingToNewJavaOnOldGeodeWithProtocolsTLSv1_2() throws IOExceptio @Test public void upgradingToNewJavaOnOldGeodeWithProtocolsTLSv1_2Hangs() throws IOException { + assumeThat(testVersion).as("GemFire 9.15.0 and upwards can upgrade with no issues") + .isLessThan(TestVersion.valueOf("1.15.0")); assumeThat(testVersion).as("Geode 1.12.0 and older can upgrade.") .isGreaterThan(TestVersion.valueOf("1.12.0")); assumeThat(testVersion) @@ -422,6 +428,8 @@ public void upgradingToNewJavaOnOldGeodeWithProtocolsTLSv1_2_SSLv2Hello() throws @Test public void upgradingToNewJavaOnOldGeodeWithProtocolsTLSv1_2_SSLv2HelloHangs() throws IOException { + assumeThat(testVersion).as("GemFire 9.15.0 and upwards can upgrade with no issues") + .isLessThan(TestVersion.valueOf("1.15.0")); assumeThat(testVersion).as("Geode older than 1.13.0 can directly upgrade Java version.") .isGreaterThanOrEqualTo(TestVersion.valueOf("1.13.0")); @@ -451,6 +459,8 @@ public void upgradingToNewGeodeAndNewJavaWithProtocolsAny() throws IOException { @Test public void upgradingToNewGeodeAndNewJavaWithProtocolsTLSv1_2Hangs() throws IOException { + assumeThat(testVersion).as("GemFire 9.15.0 and upwards can upgrade with no issues") + .isLessThan(TestVersion.valueOf("1.15.0")); assumeThat(testVersion).as("Geode 1.12.0 and older can upgrade.") .isGreaterThan(TestVersion.valueOf("1.12.0")); assumeThat(testVersion).as("Geode between [1.12.1, 1.3.0) can't connect p2p with just TLSv1.2") diff --git a/geode-cq/src/test/resources/expected-pom.xml b/geode-cq/src/test/resources/expected-pom.xml index c94b826278a8..6bf5d2ae573a 100644 --- a/geode-cq/src/test/resources/expected-pom.xml +++ b/geode-cq/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-cq ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,22 +47,22 @@ - org.apache.geode + com.vmware.gemfire geode-core runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-membership runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime diff --git a/geode-deployment/geode-deployment-legacy/src/test/resources/expected-pom.xml b/geode-deployment/geode-deployment-legacy/src/test/resources/expected-pom.xml index 70dd4cd8828d..b7e4759bfc05 100644 --- a/geode-deployment/geode-deployment-legacy/src/test/resources/expected-pom.xml +++ b/geode-deployment/geode-deployment-legacy/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-deployment-legacy ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom diff --git a/geode-docs/getting_started/intro_to_clients.html.md.erb b/geode-docs/getting_started/intro_to_clients.html.md.erb index d822cba7d096..e356de66490a 100644 --- a/geode-docs/getting_started/intro_to_clients.html.md.erb +++ b/geode-docs/getting_started/intro_to_clients.html.md.erb @@ -68,7 +68,7 @@ Examples are shown here for Maven and Gradle. Replace $VERSION with the version ``` - org.apache.geode + com.vmware.gemfire geode-core $VERSION @@ -79,7 +79,7 @@ Examples are shown here for Maven and Gradle. Replace $VERSION with the version ``` dependencies { - implementation "org.apache.geode:geode-core:$VERSION" + implementation "com.vmware.gemfire:geode-core:$VERSION" } ``` diff --git a/geode-docs/managing/troubleshooting/log_messages_and_solutions.html.md.erb b/geode-docs/managing/troubleshooting/log_messages_and_solutions.html.md.erb index b4837c46639d..614e1716b068 100644 --- a/geode-docs/managing/troubleshooting/log_messages_and_solutions.html.md.erb +++ b/geode-docs/managing/troubleshooting/log_messages_and_solutions.html.md.erb @@ -531,8 +531,8 @@ However, if seeing these messages coincides with symptoms like client side timeo to an insufficient read-timeout in the client side pool configuration, or an insufficient accept queue on the server side. Another setting that warrants investigation is the BridgeServer.HANDSHAKE_POOL_SIZE. If you have not altered this setting in your system properties, -you are likely using the default value of 4, which has been seen to be insufficient for many -environments. Recommend increasing this <%=vars.product_name%> system property to at least 20. +you are likely using the default value of 50. If it's still insufficient, recommend increasing +this <%=vars.product_name%> system property to a larger number. ## PCC service metrics component failing to connect to locator/server diff --git a/geode-docs/tools_modules/http_session_mgmt/tomcat_setting_up_the_module.html.md.erb b/geode-docs/tools_modules/http_session_mgmt/tomcat_setting_up_the_module.html.md.erb index 1698795c3edb..c08e4b0c969f 100644 --- a/geode-docs/tools_modules/http_session_mgmt/tomcat_setting_up_the_module.html.md.erb +++ b/geode-docs/tools_modules/http_session_mgmt/tomcat_setting_up_the_module.html.md.erb @@ -36,12 +36,7 @@ To run <%=vars.product_name%> in a peer-to-peer configuration, add the following Depending on the version of Tomcat you are using, add one of the following lines to `$CATALINA_HOME$/conf/context.xml` within the `` tag: -For Tomcat 7.0: - -``` pre - -``` -For Tomcat 8.0 and 8.5: +For Tomcat 8.5: ``` pre @@ -65,13 +60,7 @@ To run <%=vars.product_name%> in a client/server configuration, the application Depending on the version of Tomcat you are using, add one of the following lines to `$CATALINA_HOME$/conf/context.xml` within the `` tag: -For Tomcat 7.0: - -``` pre - -``` - -For Tomcat 8.0 and 8.5: +For Tomcat 8.5: ``` pre diff --git a/geode-docs/tools_modules/http_session_mgmt/weblogic_setting_up_the_module.html.md.erb b/geode-docs/tools_modules/http_session_mgmt/weblogic_setting_up_the_module.html.md.erb index 084db231743b..4cb652961082 100644 --- a/geode-docs/tools_modules/http_session_mgmt/weblogic_setting_up_the_module.html.md.erb +++ b/geode-docs/tools_modules/http_session_mgmt/weblogic_setting_up_the_module.html.md.erb @@ -68,6 +68,7 @@ To modify your war or ear file manually, make the following updates: - fastutil jar - geode-common jar - geode-core jar + - geode-deployment-legacy jar - geode-management jar - geode-logging jar - geode-serialization jar diff --git a/geode-dunit/src/test/resources/expected-pom.xml b/geode-dunit/src/test/resources/expected-pom.xml index c394379a567b..f61d4a9e5078 100644 --- a/geode-dunit/src/test/resources/expected-pom.xml +++ b/geode-dunit/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-dunit ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,7 +47,7 @@ - org.apache.geode + com.vmware.gemfire geode-junit compile @@ -58,42 +58,42 @@ - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime - org.apache.geode + com.vmware.gemfire geode-membership runtime - org.apache.geode + com.vmware.gemfire geode-tcp-server runtime - org.apache.geode + com.vmware.gemfire geode-core runtime - org.apache.geode + com.vmware.gemfire geode-gfsh runtime - org.apache.geode + com.vmware.gemfire geode-cq runtime - org.apache.geode + com.vmware.gemfire geode-log4j runtime diff --git a/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/commands/StartMemberUtilsTest.java b/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/commands/StartMemberUtilsTest.java index ec9f801143cd..66295fb05c9f 100644 --- a/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/commands/StartMemberUtilsTest.java +++ b/geode-gfsh/src/integrationTest/java/org/apache/geode/management/internal/cli/commands/StartMemberUtilsTest.java @@ -19,7 +19,7 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.jupiter.api.condition.JRE.JAVA_13; import static org.junit.jupiter.api.condition.JRE.JAVA_14; -import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.File; @@ -55,13 +55,12 @@ public class StartMemberUtilsTest { @Test public void workingDirCantBeCreatedThrowsException() { - File userSpecifiedDir = spy(new File("cantCreateDir")); + File userSpecifiedDir = mock(File.class, "cantCreateDir"); when(userSpecifiedDir.exists()).thenReturn(false); when(userSpecifiedDir.mkdirs()).thenReturn(false); - assertThatThrownBy( - () -> StartMemberUtils.resolveWorkingDirectory(userSpecifiedDir)) - .isInstanceOf(IllegalStateException.class) - .hasMessageContaining("Could not create directory"); + assertThatThrownBy(() -> StartMemberUtils.resolveWorkingDirectory(userSpecifiedDir)) + .isInstanceOf(IllegalStateException.class) + .hasMessageContaining("Could not create directory"); } @Test diff --git a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/MemberJvmOptions.java b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/MemberJvmOptions.java index dbfc1ee40043..39da7834ddba 100644 --- a/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/MemberJvmOptions.java +++ b/geode-gfsh/src/main/java/org/apache/geode/management/internal/cli/commands/MemberJvmOptions.java @@ -28,7 +28,6 @@ import org.apache.geode.distributed.internal.deadlock.UnsafeThreadLocal; import org.apache.geode.internal.offheap.AddressableMemoryManager; -import org.apache.geode.internal.stats50.VMStats50; import org.apache.geode.unsafe.internal.com.sun.jmx.remote.security.MBeanServerAccessController; import org.apache.geode.unsafe.internal.sun.nio.ch.DirectBuffer; @@ -52,16 +51,10 @@ public class MemberJvmOptions { * open needed by {@link AddressableMemoryManager} */ private static final String JAVA_NIO_OPEN = "--add-opens=java.base/java.nio=ALL-UNNAMED"; - /** - * open needed by {@link VMStats50} - */ - private static final String COM_SUN_MANAGEMENT_INTERNAL_OPEN = - "--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED"; static final List JAVA_11_OPTIONS = Arrays.asList( COM_SUN_JMX_REMOTE_SECURITY_EXPORT, SUN_NIO_CH_EXPORT, - COM_SUN_MANAGEMENT_INTERNAL_OPEN, JAVA_LANG_OPEN, JAVA_NIO_OPEN); diff --git a/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommandsJUnitTest.java b/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommandsJUnitTest.java index faea5008598b..8a61db5ecc52 100644 --- a/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommandsJUnitTest.java +++ b/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/DiskStoreCommandsJUnitTest.java @@ -14,6 +14,7 @@ */ package org.apache.geode.management.internal.cli.commands; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.ArgumentMatchers.any; @@ -63,7 +64,7 @@ */ public class DiskStoreCommandsJUnitTest { private AbstractExecution mockFunctionExecutor; - private ResultCollector mockResultCollector; + private ResultCollector mockResultCollector; private DistributedMember mockDistributedMember; private ListDiskStoresCommand listDiskStoresCommand; private DescribeDiskStoreCommand describeDiskStoreCommand; @@ -105,7 +106,7 @@ public void testGetDiskStoreDescription() { final DiskStoreDetails expectedDiskStoredDetails = createDiskStoreDetails(memberId, diskStoreName); when(mockFunctionExecutor.execute(any(DescribeDiskStoreFunction.class))) - .thenReturn(mockResultCollector); + .thenReturn(uncheckedCast(mockResultCollector)); when(mockResultCollector.getResult()) .thenReturn(Collections.singletonList(expectedDiskStoredDetails)); @@ -144,7 +145,7 @@ public void testGetDiskStoreDescriptionWithInvalidFunctionResultReturnType() { final String memberId = "mockMember"; final String diskStoreName = "mockDiskStore"; when(mockFunctionExecutor.execute(any(DescribeDiskStoreFunction.class))) - .thenReturn(mockResultCollector); + .thenReturn(uncheckedCast(mockResultCollector)); when(mockResultCollector.getResult()).thenReturn(Collections.singletonList(new Object())); assertThatThrownBy( @@ -170,7 +171,7 @@ public void testGetDiskStoreList() { results.add(CollectionUtils.asSet(diskStoreDetails1, diskStoreDetails2)); results.add(CollectionUtils.asSet(diskStoreDetails4, diskStoreDetails3)); when(mockFunctionExecutor.execute(any(ListDiskStoresFunction.class))) - .thenReturn(mockResultCollector); + .thenReturn(uncheckedCast(mockResultCollector)); when(mockResultCollector.getResult()).thenReturn(results); final List actualDiskStores = @@ -199,7 +200,7 @@ public void testGetDiskStoreListReturnsFunctionInvocationTargetExceptionInResult results.add(CollectionUtils.asSet(diskStoreDetails)); results.add(new FunctionInvocationTargetException("expected")); when(mockFunctionExecutor.execute(any(ListDiskStoresFunction.class))) - .thenReturn(mockResultCollector); + .thenReturn(uncheckedCast(mockResultCollector)); when(mockResultCollector.getResult()).thenReturn(results); final List actualDiskStores = diff --git a/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/ListIndexCommandJUnitTest.java b/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/ListIndexCommandJUnitTest.java index 3652b25279fa..7ddd5a360965 100644 --- a/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/ListIndexCommandJUnitTest.java +++ b/geode-gfsh/src/test/java/org/apache/geode/management/internal/cli/commands/ListIndexCommandJUnitTest.java @@ -15,6 +15,7 @@ package org.apache.geode.management.internal.cli.commands; import static org.apache.geode.cache.Region.SEPARATOR; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.ArgumentMatchers.any; @@ -60,16 +61,16 @@ */ public class ListIndexCommandJUnitTest { private ListIndexCommand listIndexCommand; - private ResultCollector mockResultCollector; + private ResultCollector mockResultCollector; private AbstractExecution mockFunctionExecutor; @Before public void setup() { listIndexCommand = spy(ListIndexCommand.class); - mockResultCollector = mock(ResultCollector.class, "ResultCollector"); + mockResultCollector = uncheckedCast(mock(ResultCollector.class, "ResultCollector")); mockFunctionExecutor = mock(AbstractExecution.class, "Function Executor"); when(mockFunctionExecutor.execute(any(ListIndexFunction.class))) - .thenReturn(mockResultCollector); + .thenReturn(uncheckedCast(mockResultCollector)); doReturn(Collections.emptySet()).when(listIndexCommand).getAllMembers(); doReturn(mockFunctionExecutor).when(listIndexCommand).getMembersFunctionExecutor(any()); } diff --git a/geode-gfsh/src/test/resources/expected-pom.xml b/geode-gfsh/src/test/resources/expected-pom.xml index df8b3fadf15c..e4e7df29d571 100644 --- a/geode-gfsh/src/test/resources/expected-pom.xml +++ b/geode-gfsh/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-gfsh ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,12 +47,12 @@ - org.apache.geode + com.vmware.gemfire geode-core compile - org.apache.geode + com.vmware.gemfire geode-common compile @@ -92,22 +92,22 @@ - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-membership runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime - org.apache.geode + com.vmware.gemfire geode-unsafe runtime diff --git a/geode-http-service/src/main/java/org/apache/geode/internal/cache/http/service/InternalHttpService.java b/geode-http-service/src/main/java/org/apache/geode/internal/cache/http/service/InternalHttpService.java index 8c97c1f2d921..7e8ffb753e11 100644 --- a/geode-http-service/src/main/java/org/apache/geode/internal/cache/http/service/InternalHttpService.java +++ b/geode-http-service/src/main/java/org/apache/geode/internal/cache/http/service/InternalHttpService.java @@ -104,6 +104,7 @@ public void createJettyServer(String bindAddress, int port, SSLConfig sslConfig) HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSecureScheme(HTTPS); httpConfig.setSecurePort(port); + httpConfig.setSendServerVersion(false); if (sslConfig.isEnabled()) { SslContextFactory.Server sslContextFactory = new SslContextFactory.Server(); diff --git a/geode-http-service/src/test/resources/expected-pom.xml b/geode-http-service/src/test/resources/expected-pom.xml index 2768c8969e0a..39330a07ebd5 100644 --- a/geode-http-service/src/test/resources/expected-pom.xml +++ b/geode-http-service/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-http-service ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,7 +47,7 @@ - org.apache.geode + com.vmware.gemfire geode-logging runtime diff --git a/geode-jmh/src/test/resources/expected-pom.xml b/geode-jmh/src/test/resources/expected-pom.xml index c0ee0ed60fad..58fd24ac5271 100644 --- a/geode-jmh/src/test/resources/expected-pom.xml +++ b/geode-jmh/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-jmh ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom diff --git a/geode-junit/src/test/resources/expected-pom.xml b/geode-junit/src/test/resources/expected-pom.xml index ff485989635b..d45f47c3f8f4 100644 --- a/geode-junit/src/test/resources/expected-pom.xml +++ b/geode-junit/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-junit ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom diff --git a/geode-log4j/src/test/resources/expected-pom.xml b/geode-log4j/src/test/resources/expected-pom.xml index 874caa6c5ea3..684203a9b90e 100644 --- a/geode-log4j/src/test/resources/expected-pom.xml +++ b/geode-log4j/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-log4j ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,12 +47,12 @@ - org.apache.geode + com.vmware.gemfire geode-core runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime diff --git a/geode-logging/src/test/resources/expected-pom.xml b/geode-logging/src/test/resources/expected-pom.xml index cb31f131f708..c60ffd41c7bf 100644 --- a/geode-logging/src/test/resources/expected-pom.xml +++ b/geode-logging/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-logging ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,7 +47,7 @@ - org.apache.geode + com.vmware.gemfire geode-common compile diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/LuceneIndex.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/LuceneIndex.java index 8645d977b348..db9c6a3fd867 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/LuceneIndex.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/LuceneIndex.java @@ -79,7 +79,7 @@ public interface LuceneIndex { * * @return the {@link LuceneSerializer} associated with this index */ - LuceneSerializer getLuceneSerializer(); + LuceneSerializer getLuceneSerializer(); /** * Returns a boolean value to indicate if reindexing is in progress. diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java index 7db8b96f204b..af78f5164c16 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/IndexRepositoryFactory.java @@ -48,14 +48,15 @@ public class IndexRepositoryFactory { public IndexRepositoryFactory() {} - public IndexRepository computeIndexRepository(final Integer bucketId, LuceneSerializer serializer, + public IndexRepository computeIndexRepository(final Integer bucketId, + LuceneSerializer serializer, InternalLuceneIndex index, PartitionedRegion userRegion, final IndexRepository oldRepository, PartitionedRepositoryManager partitionedRepositoryManager) throws IOException { LuceneIndexForPartitionedRegion indexForPR = (LuceneIndexForPartitionedRegion) index; final PartitionedRegion fileRegion = indexForPR.getFileAndChunkRegion(); // We need to ensure that all members have created the fileAndChunk region before continuing - Region prRoot = PartitionedRegionHelper.getPRRoot(fileRegion.getCache()); + Region prRoot = PartitionedRegionHelper.getPRRoot(fileRegion.getCache()); PartitionRegionConfig prConfig = (PartitionRegionConfig) prRoot.get(fileRegion.getRegionIdentifier()); LuceneFileRegionColocationListener luceneFileRegionColocationCompleteListener = @@ -74,14 +75,14 @@ public IndexRepository computeIndexRepository(final Integer bucketId, LuceneSeri * conditions. * This is a util function just to not let computeIndexRepository be a huge chunk of code. */ - protected IndexRepository finishComputingRepository(Integer bucketId, LuceneSerializer serializer, + protected IndexRepository finishComputingRepository(Integer bucketId, + LuceneSerializer serializer, PartitionedRegion userRegion, IndexRepository oldRepository, InternalLuceneIndex index) throws IOException { LuceneIndexForPartitionedRegion indexForPR = (LuceneIndexForPartitionedRegion) index; final PartitionedRegion fileRegion = indexForPR.getFileAndChunkRegion(); BucketRegion fileAndChunkBucket = getMatchingBucket(fileRegion, bucketId); BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId); - boolean success = false; if (fileAndChunkBucket == null) { if (oldRepository != null) { oldRepository.cleanup(); @@ -111,14 +112,14 @@ protected IndexRepository finishComputingRepository(Integer bucketId, LuceneSeri } final IndexRepository repo; - InternalCache cache = (InternalCache) userRegion.getRegionService(); - boolean initialPdxReadSerializedFlag = cache.getPdxReadSerializedOverride(); + final InternalCache cache = (InternalCache) userRegion.getRegionService(); + final boolean initialPdxReadSerializedFlag = cache.getPdxReadSerializedOverride(); cache.setPdxReadSerializedOverride(true); + boolean success = false; try { IndexWriter writer = buildIndexWriter(bucketId, fileAndChunkBucket, indexForPR); repo = new IndexRepositoryImpl(fileAndChunkBucket, writer, serializer, indexForPR.getIndexStats(), dataBucket, lockService, lockName, indexForPR); - success = false; // fileRegion ops (get/put) need bucketId as a callbackArg for PartitionResolver if (null != fileRegion.get(APACHE_GEODE_INDEX_COMPLETE, bucketId)) { @@ -200,7 +201,7 @@ private boolean reindexUserDataRegion(Integer bucketId, PartitionedRegion userRe return true; } - private Object getValue(Region.Entry entry) { + private Object getValue(Region.Entry entry) { final EntrySnapshot es = (EntrySnapshot) entry; Object value; try { @@ -211,8 +212,9 @@ private Object getValue(Region.Entry entry) { return value; } + @SuppressWarnings("unchecked") protected Map getBucketTargetingMap(BucketRegion region, int bucketId) { - return new BucketTargetingMap<>(region, bucketId); + return new BucketTargetingMap(region, bucketId); } protected String getLockName(final BucketRegion fileAndChunkBucket) { diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneFileRegionColocationListener.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneFileRegionColocationListener.java index 3d42b1ef6d42..13fcf27f07ed 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneFileRegionColocationListener.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneFileRegionColocationListener.java @@ -41,7 +41,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return (obj != null && obj instanceof LuceneFileRegionColocationListener + return (obj instanceof LuceneFileRegionColocationListener && ((LuceneFileRegionColocationListener) obj).bucketID != null && ((LuceneFileRegionColocationListener) obj).bucketID.equals(bucketID)); } diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java index a2b93d6c5393..1792fc4c102a 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegion.java @@ -16,6 +16,7 @@ package org.apache.geode.cache.lucene.internal; import static org.apache.geode.cache.Region.SEPARATOR; +import static org.apache.geode.util.internal.UncheckedUtils.uncheckedCast; import java.util.Set; import java.util.concurrent.ExecutorService; @@ -49,7 +50,7 @@ import org.apache.geode.internal.cache.xmlcache.RegionAttributesCreation; public class LuceneIndexForPartitionedRegion extends LuceneIndexImpl { - protected Region fileAndChunkRegion; + protected Region fileAndChunkRegion; protected final FileSystemStats fileSystemStats; public static final String FILES_REGION_SUFFIX = ".files"; @@ -66,14 +67,12 @@ public LuceneIndexForPartitionedRegion(String indexName, String regionPath, Inte } @Override - protected RepositoryManager createRepositoryManager(LuceneSerializer luceneSerializer) { - LuceneSerializer mapper = luceneSerializer; + protected RepositoryManager createRepositoryManager(LuceneSerializer luceneSerializer) { + LuceneSerializer mapper = luceneSerializer; if (mapper == null) { mapper = new HeterogeneousLuceneSerializer(); } - PartitionedRepositoryManager partitionedRepositoryManager = - new PartitionedRepositoryManager(this, mapper, waitingThreadPoolFromDM); - return partitionedRepositoryManager; + return new PartitionedRepositoryManager(this, mapper, waitingThreadPoolFromDM); } @Override @@ -94,15 +93,16 @@ public boolean isIndexingInProgress() { protected void createLuceneListenersAndFileChunkRegions( PartitionedRepositoryManager partitionedRepositoryManager) { partitionedRepositoryManager.setUserRegionForRepositoryManager((PartitionedRegion) dataRegion); - RegionShortcut regionShortCut; final boolean withPersistence = withPersistence(); - RegionAttributes regionAttributes = dataRegion.getAttributes(); + final RegionAttributes regionAttributes = + uncheckedCast(dataRegion.getAttributes()); final boolean withStorage = regionAttributes.getPartitionAttributes().getLocalMaxMemory() > 0; // TODO: 1) dataRegion should be withStorage // 2) Persistence to Persistence // 3) Replicate to Replicate, Partition To Partition // 4) Offheap to Offheap + final RegionShortcut regionShortCut; if (!withStorage) { regionShortCut = RegionShortcut.PARTITION_PROXY; } else if (withPersistence) { @@ -114,7 +114,8 @@ protected void createLuceneListenersAndFileChunkRegions( // create PR fileAndChunkRegion, but not to create its buckets for now final String fileRegionName = createFileRegionName(); - PartitionAttributes partitionAttributes = dataRegion.getPartitionAttributes(); + PartitionAttributes partitionAttributes = + uncheckedCast(dataRegion.getPartitionAttributes()); DistributionManager dm = cache.getInternalDistributedSystem().getDistributionManager(); LuceneBucketListener lucenePrimaryBucketListener = new LuceneBucketListener(partitionedRepositoryManager, dm); @@ -145,30 +146,32 @@ public String createFileRegionName() { return LuceneServiceImpl.getUniqueIndexRegionName(indexName, regionPath, FILES_REGION_SUFFIX); } - private PartitionAttributesFactory configureLuceneRegionAttributesFactory( - PartitionAttributesFactory attributesFactory, - PartitionAttributes dataRegionAttributes) { + private static void configureLuceneRegionAttributesFactory( + PartitionAttributesFactory attributesFactory, + PartitionAttributes dataRegionAttributes) { attributesFactory.setTotalNumBuckets(dataRegionAttributes.getTotalNumBuckets()); attributesFactory.setRedundantCopies(dataRegionAttributes.getRedundantCopies()); attributesFactory.setPartitionResolver(getPartitionResolver(dataRegionAttributes)); attributesFactory.setRecoveryDelay(dataRegionAttributes.getRecoveryDelay()); attributesFactory.setStartupRecoveryDelay(dataRegionAttributes.getStartupRecoveryDelay()); - return attributesFactory; } - private PartitionResolver getPartitionResolver(PartitionAttributes dataRegionAttributes) { + private static PartitionResolver getPartitionResolver( + PartitionAttributes dataRegionAttributes) { if (dataRegionAttributes.getPartitionResolver() instanceof FixedPartitionResolver) { - return new BucketTargetingFixedResolver(); + return new BucketTargetingFixedResolver<>(); } else { - return new BucketTargetingResolver(); + return new BucketTargetingResolver<>(); } } protected Region createRegion(final String regionName, final RegionShortcut regionShortCut, final String colocatedWithRegionName, - final PartitionAttributes partitionAttributes, final RegionAttributes regionAttributes, + final PartitionAttributes partitionAttributes, + final RegionAttributes regionAttributes, PartitionListener lucenePrimaryBucketListener) { - PartitionAttributesFactory partitionAttributesFactory = new PartitionAttributesFactory(); + PartitionAttributesFactory partitionAttributesFactory = + new PartitionAttributesFactory<>(); if (lucenePrimaryBucketListener != null) { partitionAttributesFactory.addPartitionListener(lucenePrimaryBucketListener); } @@ -176,21 +179,22 @@ protected Region createRegion(final String regionName, configureLuceneRegionAttributesFactory(partitionAttributesFactory, partitionAttributes); // Create RegionAttributes based on input RegionShortcut - RegionAttributes baseAttributes = cache.getRegionAttributes(regionShortCut.toString()); + RegionAttributes baseAttributes = cache.getRegionAttributes(regionShortCut.toString()); RegionAttributesCreation attributes = new RegionAttributesCreation(baseAttributes, false); attributes.setPartitionAttributes(partitionAttributesFactory.create()); if (regionAttributes.getDataPolicy().withPersistence()) { attributes.setDiskStoreName(regionAttributes.getDiskStoreName()); } - return createRegion(regionName, attributes); + return createRegion(regionName, uncheckedCast(attributes)); } public void close() {} @Override public void dumpFiles(final String directory) { - ResultCollector results = FunctionService.onRegion(getDataRegion()) + @SuppressWarnings("unchecked") + ResultCollector results = FunctionService.onRegion(getDataRegion()) .setArguments(new String[] {directory, indexName}).execute(DumpDirectoryFiles.ID); results.getResult(); } @@ -259,7 +263,7 @@ private void destroyOnRemoteMembers() { } catch (ReplyException e) { Throwable cause = e.getCause(); if (cause instanceof IllegalArgumentException) { - // If the IllegalArgumentException is index not found, then its ok; otherwise rethrow it. + // If the IllegalArgumentException is index not found, then it's ok; otherwise rethrow it. String fullRegionPath = regionPath.startsWith(SEPARATOR) ? regionPath : SEPARATOR + regionPath; String indexNotFoundMessage = String.format("Lucene index %s was not found in region %s", diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java index 3a232828b7d3..cc89cb374bb2 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneIndexImpl.java @@ -26,7 +26,6 @@ import org.apache.geode.cache.DataPolicy; import org.apache.geode.cache.Region; import org.apache.geode.cache.RegionAttributes; -import org.apache.geode.cache.asyncqueue.AsyncEventQueue; import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueFactoryImpl; import org.apache.geode.cache.asyncqueue.internal.AsyncEventQueueImpl; import org.apache.geode.cache.lucene.LuceneSerializer; @@ -51,7 +50,7 @@ public abstract class LuceneIndexImpl implements InternalLuceneIndex { protected String[] searchableFieldNames; protected RepositoryManager repositoryManager; protected Analyzer analyzer; - protected LuceneSerializer luceneSerializer; + protected LuceneSerializer luceneSerializer; protected LocalRegion dataRegion; protected LuceneIndexImpl(String indexName, String regionPath, InternalCache cache) { @@ -82,10 +81,9 @@ protected LocalRegion getDataRegion() { } protected boolean withPersistence() { - RegionAttributes ra = dataRegion.getAttributes(); + RegionAttributes ra = dataRegion.getAttributes(); DataPolicy dp = ra.getDataPolicy(); - final boolean withPersistence = dp.withPersistence(); - return withPersistence; + return dp.withPersistence(); } protected void setSearchableFields(String[] fields) { @@ -120,11 +118,11 @@ public Analyzer getAnalyzer() { } @Override - public LuceneSerializer getLuceneSerializer() { + public LuceneSerializer getLuceneSerializer() { return luceneSerializer; } - public void setLuceneSerializer(LuceneSerializer serializer) { + public void setLuceneSerializer(LuceneSerializer serializer) { luceneSerializer = serializer; } @@ -151,38 +149,38 @@ public void initialize() { addExtension(dataRegion); } - protected void setupRepositoryManager(LuceneSerializer luceneSerializer) { + protected void setupRepositoryManager(LuceneSerializer luceneSerializer) { repositoryManager = createRepositoryManager(luceneSerializer); } - protected abstract RepositoryManager createRepositoryManager(LuceneSerializer luceneSerializer); + protected abstract RepositoryManager createRepositoryManager( + LuceneSerializer luceneSerializer); protected abstract void createLuceneListenersAndFileChunkRegions( PartitionedRepositoryManager partitionedRepositoryManager); - protected AsyncEventQueue createAEQ(Region dataRegion) { + protected void createAEQ(Region dataRegion) { String aeqId = LuceneServiceImpl.getUniqueIndexName(getName(), regionPath); - return createAEQ(createAEQFactory(dataRegion.getAttributes()), aeqId); + createAEQ(createAEQFactory(dataRegion.getAttributes()), aeqId); } - protected AsyncEventQueue createAEQ(RegionAttributes attributes, String aeqId) { + protected void createAEQ(RegionAttributes attributes, String aeqId) { if (attributes.getPartitionAttributes() != null) { if (attributes.getPartitionAttributes().getLocalMaxMemory() == 0) { // accessor will not create AEQ - return null; + return; } } - return createAEQ(createAEQFactory(attributes), aeqId); + createAEQ(createAEQFactory(attributes), aeqId); } - private AsyncEventQueue createAEQ(AsyncEventQueueFactoryImpl factory, String aeqId) { + private void createAEQ(AsyncEventQueueFactoryImpl factory, String aeqId) { LuceneEventListener listener = new LuceneEventListener(cache, repositoryManager); factory.setGatewayEventSubstitutionListener(new LuceneEventSubstitutionFilter()); - AsyncEventQueue indexQueue = factory.create(aeqId, listener); - return indexQueue; + factory.create(aeqId, listener); } - private AsyncEventQueueFactoryImpl createAEQFactory(final RegionAttributes attributes) { + private AsyncEventQueueFactoryImpl createAEQFactory(final RegionAttributes attributes) { AsyncEventQueueFactoryImpl factory = (AsyncEventQueueFactoryImpl) cache.createAsyncEventQueueFactory(); // TODO: not sure if serial AEQ working or not @@ -216,8 +214,8 @@ protected void addExtension(LocalRegion dataRegion) { @Override public void destroy(boolean initiator) { // Find and delete the appropriate extension - Extension extensionToDelete = null; - for (Extension extension : getDataRegion().getExtensionPoint().getExtensions()) { + Extension> extensionToDelete = null; + for (Extension> extension : getDataRegion().getExtensionPoint().getExtensions()) { LuceneIndexCreation index = (LuceneIndexCreation) extension; if (index.getName().equals(indexName)) { extensionToDelete = extension; @@ -270,9 +268,7 @@ protected Region createRegion(final String regionName, try { return cache.createVMRegion(regionName, attributes, ira); } catch (Exception e) { - InternalGemFireError ige = new InternalGemFireError( - "unexpected exception", e); - throw ige; + throw new InternalGemFireError("unexpected exception", e); } } @@ -290,7 +286,7 @@ private void destroyAsyncEventQueue(boolean initiator) { // Remove the id from the dataRegion's AsyncEventQueue ids // Note: The region may already have been destroyed by a remote member - Region region = getDataRegion(); + Region region = getDataRegion(); if (!region.isDestroyed()) { region.getAttributesMutator().removeAsyncEventQueueId(aeqId); } diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneRawIndex.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneRawIndex.java index f8e8e9deb221..e203ab160d8a 100755 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneRawIndex.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneRawIndex.java @@ -27,14 +27,13 @@ protected LuceneRawIndex(String indexName, String regionPath, InternalCache cach } @Override - protected RepositoryManager createRepositoryManager(LuceneSerializer luceneSerializer) { + protected RepositoryManager createRepositoryManager(LuceneSerializer luceneSerializer) { HeterogeneousLuceneSerializer mapper = (HeterogeneousLuceneSerializer) luceneSerializer; if (mapper == null) { mapper = new HeterogeneousLuceneSerializer(); } - RawLuceneRepositoryManager rawLuceneRepositoryManager = new RawLuceneRepositoryManager(this, + return new RawLuceneRepositoryManager(this, mapper, cache.getDistributionManager().getExecutors().getWaitingThreadPool()); - return rawLuceneRepositoryManager; } @Override @@ -44,9 +43,7 @@ protected void createLuceneListenersAndFileChunkRegions( } @Override - public void dumpFiles(String directory) { - return; - } + public void dumpFiles(String directory) {} @Override public void destroy(boolean initiator) {} diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java index 480ede33609c..c7ca32147252 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/LuceneServiceImpl.java @@ -149,7 +149,7 @@ public Class getInterface() { return InternalLuceneService.class; } - public void beforeRegionDestroyed(Region region) { + public void beforeRegionDestroyed(Region region) { List indexes = getIndexes(region.getFullPath()); if (!indexes.isEmpty()) { String indexNames = @@ -161,7 +161,7 @@ public void beforeRegionDestroyed(Region region) { } } - public void cleanupFailedInitialization(Region region) { + public void cleanupFailedInitialization(Region region) { List definedIndexes = getDefinedIndexes(region.getFullPath()); for (LuceneIndexCreationProfile definedIndex : definedIndexes) { // Get the AsyncEventQueue @@ -189,20 +189,20 @@ public static String getUniqueIndexRegionName(String indexName, String regionPat } public void createIndex(String indexName, String regionPath, Map fieldAnalyzers, - LuceneSerializer serializer, boolean allowOnExistingRegion) { + LuceneSerializer serializer, boolean allowOnExistingRegion) { if (fieldAnalyzers == null || fieldAnalyzers.isEmpty()) { throw new IllegalArgumentException("At least one field must be indexed"); } Analyzer analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(), fieldAnalyzers); Set fieldsSet = fieldAnalyzers.keySet(); - String[] fields = fieldsSet.toArray(new String[fieldsSet.size()]); + String[] fields = fieldsSet.toArray(new String[0]); createIndex(indexName, regionPath, analyzer, fieldAnalyzers, serializer, allowOnExistingRegion, fields); } public void createIndex(final String indexName, String regionPath, final Analyzer analyzer, - final Map fieldAnalyzers, final LuceneSerializer serializer, + final Map fieldAnalyzers, final LuceneSerializer serializer, boolean allowOnExistingRegion, final String... fields) { if (!regionPath.startsWith(SEPARATOR)) { @@ -255,7 +255,7 @@ protected void validateAllMembersAreTheSameVersion(PartitionedRegion region) { private void createIndexOnExistingRegion(PartitionedRegion region, String indexName, String regionPath, String[] fields, Analyzer analyzer, Map fieldAnalyzers, - LuceneSerializer serializer) { + LuceneSerializer serializer) { validateRegionAttributes(region.getAttributes()); LuceneIndexCreationProfile luceneIndexCreationProfile = new LuceneIndexCreationProfile( @@ -311,12 +311,6 @@ protected boolean createLuceneIndexOnDataRegion(final PartitionedRegion userRegi PartitionedRepositoryManager repositoryManager = (PartitionedRepositoryManager) luceneIndex.getRepositoryManager(); Set primaryBucketIds = userRegion.getDataStore().getAllLocalPrimaryBucketIds(); - /** - * - * Calling getRepository will in turn call computeRepository - * which is responsible for indexing the user region. - * - **/ for (final int primaryBucketId : primaryBucketIds) { try { BucketRegion userBucket = userRegion.getDataStore().getLocalBucketById(primaryBucketId); @@ -324,12 +318,10 @@ protected boolean createLuceneIndexOnDataRegion(final PartitionedRegion userRegi throw new BucketNotFoundException( "Bucket ID : " + primaryBucketId + " not found during lucene indexing"); } - /** - * + /* * Calling getRepository will in turn call computeRepository * which is responsible for indexing the user region. - * - **/ + */ repositoryManager.getRepository(primaryBucketId); } catch (BucketNotFoundException | PrimaryBucketException e) { logger.debug("Bucket ID : " + primaryBucketId @@ -349,7 +341,7 @@ protected boolean createLuceneIndexOnDataRegion(final PartitionedRegion userRegi } } - static void validateRegionAttributes(RegionAttributes attrs) { + static void validateRegionAttributes(RegionAttributes attrs) { if (!attrs.getDataPolicy().withPartitioning()) { // replicated region throw new UnsupportedOperationException( @@ -389,8 +381,9 @@ public void afterDataRegionCreated(InternalLuceneIndex index) { } public LuceneIndexImpl beforeDataRegionCreated(final String indexName, final String regionPath, - RegionAttributes attributes, final Analyzer analyzer, - final Map fieldAnalyzers, String aeqId, final LuceneSerializer serializer, + RegionAttributes attributes, final Analyzer analyzer, + final Map fieldAnalyzers, String aeqId, + final LuceneSerializer serializer, final String... fields) { LuceneIndexImpl index = createIndexObject(indexName, regionPath); index.setSearchableFields(fields); @@ -418,7 +411,7 @@ private void registerDefinedIndex(final String indexName, final String regionPat @Override public LuceneIndex getIndex(String indexName, String regionPath) { - Region region = cache.getRegion(regionPath); + Region region = cache.getRegion(regionPath); if (region == null) { return null; } @@ -431,7 +424,7 @@ public Collection getAllIndexes() { } public List getIndexes(String regionPath) { - List indexes = new ArrayList(); + List indexes = new ArrayList<>(); for (LuceneIndex index : getAllIndexes()) { if (index.getRegionPath().equals(regionPath)) { indexes.add(index); @@ -441,7 +434,7 @@ public List getIndexes(String regionPath) { } public List getDefinedIndexes(String regionPath) { - List profiles = new ArrayList(); + List profiles = new ArrayList<>(); for (LuceneIndexCreationProfile profile : getAllDefinedIndexes()) { if (profile.getRegionPath().equals(regionPath)) { profiles.add(profile); @@ -606,10 +599,6 @@ private void registerIndex(LuceneIndex index) { } } - public void unregisterIndex(final String region) { - indexMap.remove(region); - } - @Override public void register(DataSerializableFixedIdRegistrar registrar) { registrar.register(CREATE_REGION_MESSAGE_LUCENE, @@ -658,19 +647,22 @@ public LuceneIndexCreationProfile getDefinedIndex(String indexName, String regio @Override public boolean waitUntilFlushed(String indexName, String regionPath, long timeout, TimeUnit unit) throws InterruptedException { - Region dataRegion = cache.getRegion(regionPath); + final Region dataRegion = cache.getRegion(regionPath); if (dataRegion == null) { logger.info("Data region " + regionPath + " not found"); return false; } - WaitUntilFlushedFunctionContext context = + final WaitUntilFlushedFunctionContext context = new WaitUntilFlushedFunctionContext(indexName, timeout, unit); - Execution execution = FunctionService.onRegion(dataRegion); - ResultCollector rs = execution.setArguments(context).execute(WaitUntilFlushedFunction.ID); - List results = (List) rs.getResult(); - for (Boolean oneResult : results) { - if (oneResult == false) { + @SuppressWarnings("unchecked") + final Execution> execution = + FunctionService.onRegion(dataRegion); + final ResultCollector> rs = + execution.setArguments(context).execute(WaitUntilFlushedFunction.ID); + final List results = rs.getResult(); + for (final Boolean oneResult : results) { + if (!oneResult) { return false; } } @@ -679,7 +671,7 @@ public boolean waitUntilFlushed(String indexName, String regionPath, long timeou @Override public boolean isIndexingInProgress(String indexName, String regionPath) { - Region region = cache.getRegion(regionPath); + final Region region = cache.getRegion(regionPath); if (region == null) { logger.info("Data region " + regionPath + " not found"); return false; @@ -688,8 +680,8 @@ public boolean isIndexingInProgress(String indexName, String regionPath) { // rolled to a version more than or equal to client's // hence we don't need to validate the servers. if (!cache.isClient()) { - // Also a check for PartitionedRegion. As we cannot use the same method calls to - // to get the members hosting the region for RR (future implementation) + // Also a check for PartitionedRegion. As we cannot use the same method calls to get the + // members hosting the region for RR (future implementation) if (region instanceof PartitionedRegion) { PartitionedRegion dataRegion = (PartitionedRegion) region; // Validate all members are Apache Geode v1.7.0 or above @@ -703,12 +695,13 @@ public boolean isIndexingInProgress(String indexName, String regionPath) { } } } - Execution execution = FunctionService.onRegion(region); - ResultCollector resultCollector = + @SuppressWarnings("unchecked") + final Execution> execution = FunctionService.onRegion(region); + final ResultCollector> resultCollector = execution.setArguments(indexName).execute(IndexingInProgressFunction.ID); - List results = (List) resultCollector.getResult(); - for (Boolean result : results) { - if (result == true) { + final List results = resultCollector.getResult(); + for (final Boolean result : results) { + if (result) { return true; } } diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManager.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManager.java index a373d70610b1..e9e328255ca1 100755 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManager.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/PartitionedRepositoryManager.java @@ -54,14 +54,14 @@ public class PartitionedRepositoryManager implements RepositoryManager { /** The user region for this index */ protected PartitionedRegion userRegion = null; - protected final LuceneSerializer serializer; + protected final LuceneSerializer serializer; protected final InternalLuceneIndex index; protected volatile boolean closed; private final CountDownLatch isDataRegionReady = new CountDownLatch(1); private final ExecutorService waitingThreadPoolFromDM; - public PartitionedRepositoryManager(InternalLuceneIndex index, LuceneSerializer serializer, + public PartitionedRepositoryManager(InternalLuceneIndex index, LuceneSerializer serializer, ExecutorService waitingThreadPool) { this.index = index; this.serializer = serializer; @@ -74,16 +74,16 @@ public void setUserRegionForRepositoryManager(PartitionedRegion userRegion) { } @Override - public Collection getRepositories(RegionFunctionContext ctx) + public Collection getRepositories(RegionFunctionContext ctx) throws BucketNotFoundException { return getRepositories(ctx, false); } @Override - public Collection getRepositories(RegionFunctionContext ctx, + public Collection getRepositories(RegionFunctionContext ctx, boolean waitForRepository) throws BucketNotFoundException { Region region = ctx.getDataSet(); - int[] buckets = ((InternalRegionFunctionContext) ctx).getLocalBucketArray(region); + int[] buckets = ((InternalRegionFunctionContext) ctx).getLocalBucketArray(region); if (buckets == null || buckets[0] == 0) { return null; } @@ -116,12 +116,12 @@ public Collection getRepositories(RegionFunctionContext ctx, } @Override - public IndexRepository getRepository(Region region, Object key, Object callbackArg) + public IndexRepository getRepository(Region region, Object key, Object callbackArg) throws BucketNotFoundException { BucketRegion userBucket = userRegion.getBucketRegion(key, callbackArg); if (userBucket == null) { throw new BucketNotFoundException("User bucket was not found for region " + region + "key " - + key + " callbackarg " + callbackArg); + + key + " callbackArg " + callbackArg); } return getRepository(userBucket.getId()); @@ -145,7 +145,7 @@ protected IndexRepository getRepository(Integer bucketId) throws BucketNotFoundE return repo; } - protected IndexRepository computeRepository(Integer bucketId, LuceneSerializer serializer, + protected IndexRepository computeRepository(Integer bucketId, LuceneSerializer serializer, InternalLuceneIndex index, PartitionedRegion userRegion, IndexRepository oldRepository) throws IOException { return indexRepositoryFactory.computeIndexRepository(bucketId, serializer, index, userRegion, @@ -159,7 +159,7 @@ protected IndexRepository computeRepository(Integer bucketId) { } catch (InterruptedException e) { throw new InternalGemFireError("Unable to create index repository", e); } - IndexRepository repo = indexRepositories.compute(bucketId, (key, oldRepository) -> { + return indexRepositories.compute(bucketId, (key, oldRepository) -> { try { if (closed) { if (oldRepository != null) { @@ -172,7 +172,6 @@ protected IndexRepository computeRepository(Integer bucketId) { throw new InternalGemFireError("Unable to create index repository", e); } }); - return repo; } protected void allowRepositoryComputation() { diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawIndexRepositoryFactory.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawIndexRepositoryFactory.java index 984d3eb870c5..cd9262e4a81f 100755 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawIndexRepositoryFactory.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawIndexRepositoryFactory.java @@ -33,21 +33,22 @@ public class RawIndexRepositoryFactory extends IndexRepositoryFactory { public RawIndexRepositoryFactory() {} @Override - public IndexRepository computeIndexRepository(final Integer bucketId, LuceneSerializer serializer, + public IndexRepository computeIndexRepository(final Integer bucketId, + LuceneSerializer serializer, InternalLuceneIndex index, PartitionedRegion userRegion, IndexRepository oldRepository, PartitionedRepositoryManager partitionedRepositoryManager) throws IOException { - final IndexRepository repo; if (oldRepository != null) { oldRepository.cleanup(); } LuceneRawIndex indexForRaw = (LuceneRawIndex) index; BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId); - Directory dir = null; + final Directory dir; if (indexForRaw.withPersistence()) { String bucketLocation = LuceneServiceImpl.getUniqueIndexName(index.getName(), index.getRegionPath() + "_" + bucketId); File location = new File(index.getName(), bucketLocation); if (!location.exists()) { + // noinspection ResultOfMethodCallIgnored location.mkdirs(); } dir = new NIOFSDirectory(location.toPath()); diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawLuceneRepositoryManager.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawLuceneRepositoryManager.java index 25a4678525f6..a9904e929c10 100755 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawLuceneRepositoryManager.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/RawLuceneRepositoryManager.java @@ -25,7 +25,7 @@ public class RawLuceneRepositoryManager extends PartitionedRepositoryManager { public static IndexRepositoryFactory indexRepositoryFactory = new RawIndexRepositoryFactory(); - public RawLuceneRepositoryManager(LuceneIndexImpl index, LuceneSerializer serializer, + public RawLuceneRepositoryManager(LuceneIndexImpl index, LuceneSerializer serializer, ExecutorService waitingThreadPool) { super(index, serializer, waitingThreadPool); } @@ -42,7 +42,7 @@ protected IndexRepository getRepository(Integer bucketId) throws BucketNotFoundE } @Override - public IndexRepository computeRepository(Integer bucketId, LuceneSerializer serializer, + public IndexRepository computeRepository(Integer bucketId, LuceneSerializer serializer, InternalLuceneIndex index, PartitionedRegion userRegion, IndexRepository oldRepository) throws IOException { return indexRepositoryFactory.computeIndexRepository(bucketId, serializer, index, userRegion, diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingFixedResolver.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingFixedResolver.java index bdd57a3c1820..c446d5cdd2ee 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingFixedResolver.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingFixedResolver.java @@ -27,12 +27,12 @@ * from the target partitioning. * * This is a bit messy, mostly because there's no good way to get the FixedPartition from the actual - * bucket id without iterating over all of the fixed partitions. + * bucket id without iterating over all the fixed partitions. */ -public class BucketTargetingFixedResolver implements FixedPartitionResolver { +public class BucketTargetingFixedResolver implements FixedPartitionResolver { @Override - public Object getRoutingObject(final EntryOperation opDetails) { + public Object getRoutingObject(final EntryOperation opDetails) { int targetBucketId = (Integer) opDetails.getCallbackArgument(); final Map.Entry targetPartition = getFixedPartition(opDetails); @@ -50,13 +50,13 @@ public void close() { } @Override - public String getPartitionName(final EntryOperation opDetails, - @Deprecated final Set targetPartitions) { + public String getPartitionName(final EntryOperation opDetails, + @Deprecated final Set targetPartitions) { final Map.Entry targetPartition = getFixedPartition(opDetails); return targetPartition.getKey(); } - protected Map.Entry getFixedPartition(final EntryOperation opDetails) { + protected Map.Entry getFixedPartition(final EntryOperation opDetails) { PartitionedRegion region = (PartitionedRegion) opDetails.getRegion(); int targetBucketId = (Integer) opDetails.getCallbackArgument(); Map partitions = region.getPartitionsMap(); diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingMap.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingMap.java index e23e3efe30fb..49cef4f008c5 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingMap.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingMap.java @@ -20,16 +20,15 @@ import org.apache.geode.cache.EntryExistsException; import org.apache.geode.cache.EntryNotFoundException; import org.apache.geode.cache.Region; -import org.apache.geode.internal.cache.BucketRegion; public class BucketTargetingMap extends AbstractMap { private final Region region; public Object callbackArg; - public BucketTargetingMap(BucketRegion region, int bucketId) { - callbackArg = bucketId; + public BucketTargetingMap(Region region, int bucketId) { this.region = region; + callbackArg = bucketId; } @Override @@ -37,6 +36,7 @@ public Set keySet() { return region.keySet(); } + @SuppressWarnings("unchecked") @Override public V putIfAbsent(final K key, final V value) { try { diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingResolver.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingResolver.java index a6b5c104b8df..f9968c094db1 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingResolver.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/partition/BucketTargetingResolver.java @@ -21,9 +21,9 @@ * A partition resolver that expects all operations to be performed with a callback argument that * indicates the actual bucket to target. */ -public class BucketTargetingResolver implements PartitionResolver { +public class BucketTargetingResolver implements PartitionResolver { @Override - public Object getRoutingObject(final EntryOperation opDetails) { + public Object getRoutingObject(final EntryOperation opDetails) { return opDetails.getCallbackArgument(); } diff --git a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/repository/RepositoryManager.java b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/repository/RepositoryManager.java index 3f7d29273551..7af821cfb561 100644 --- a/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/repository/RepositoryManager.java +++ b/geode-lucene/src/main/java/org/apache/geode/cache/lucene/internal/repository/RepositoryManager.java @@ -27,7 +27,7 @@ */ public interface RepositoryManager { - IndexRepository getRepository(Region region, Object key, Object callbackArg) + IndexRepository getRepository(Region region, Object key, Object callbackArg) throws BucketNotFoundException; /** @@ -37,7 +37,7 @@ IndexRepository getRepository(Region region, Object key, Object callbackArg) * @return a collection of {@link IndexRepository} instances * @throws BucketNotFoundException if any of the requested buckets is not found on this member */ - Collection getRepositories(RegionFunctionContext context) + Collection getRepositories(RegionFunctionContext context) throws BucketNotFoundException; /** @@ -50,7 +50,7 @@ Collection getRepositories(RegionFunctionContext context) * @return a collection of {@link IndexRepository} instances * @throws BucketNotFoundException if any of the requested buckets is not found on this member */ - Collection getRepositories(RegionFunctionContext context, boolean waitOnRetry) + Collection getRepositories(RegionFunctionContext context, boolean waitOnRetry) throws BucketNotFoundException; /** diff --git a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java index 1ab067e12602..0a1e5fd83a55 100644 --- a/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java +++ b/geode-lucene/src/test/java/org/apache/geode/cache/lucene/internal/LuceneIndexForPartitionedRegionTest.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; @@ -299,7 +300,7 @@ protected LuceneIndexForPartitionedRegion setupSpy(final Region region, index.setSearchableFields(new String[] {"field"}); LuceneIndexForPartitionedRegion spy = spy(index); doReturn(null).when(spy).createRegion(any(), any(), any(), any(), any(), any()); - doReturn(null).when(spy).createAEQ(any(), any()); + doNothing().when(spy).createAEQ(any(), any()); spy.setupRepositoryManager(null); spy.createAEQ(region.getAttributes(), aeq); spy.initialize(); @@ -355,7 +356,7 @@ public void initializeShouldCreatePartitionPersistentFileRegion() { index.setSearchableFields(new String[] {"field"}); LuceneIndexForPartitionedRegion spy = spy(index); doReturn(null).when(spy).createRegion(any(), any(), any(), any(), any(), any()); - doReturn(null).when(spy).createAEQ(any(), any()); + doNothing().when(spy).createAEQ(any(), any()); spy.setupRepositoryManager(null); spy.createAEQ(any(), any()); spy.initialize(); @@ -380,7 +381,7 @@ public void dumpFilesShouldInvokeDumpFunction() { new LuceneIndexForPartitionedRegion(name, regionPath, cache); index = spy(index); when(index.getFieldNames()).thenReturn(fields); - doReturn(aeq).when(index).createAEQ(any(), any()); + doNothing().when(index).createAEQ(any(), any()); index.setupRepositoryManager(null); index.createAEQ(cache.getRegionAttributes(regionPath), aeq.getId()); index.initialize(); diff --git a/geode-lucene/src/test/resources/expected-pom.xml b/geode-lucene/src/test/resources/expected-pom.xml index 846ac142c3e4..01868af5535f 100644 --- a/geode-lucene/src/test/resources/expected-pom.xml +++ b/geode-lucene/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-lucene ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,7 +47,7 @@ - org.apache.geode + com.vmware.gemfire geode-core compile @@ -57,22 +57,22 @@ compile - org.apache.geode + com.vmware.gemfire geode-gfsh runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-membership runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime diff --git a/geode-management/src/test/resources/expected-pom.xml b/geode-management/src/test/resources/expected-pom.xml index 3211b783d9b0..6a97e88a1030 100644 --- a/geode-management/src/test/resources/expected-pom.xml +++ b/geode-management/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-management ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,7 +47,7 @@ - org.apache.geode + com.vmware.gemfire geode-serialization runtime diff --git a/geode-membership/src/main/java/org/apache/geode/distributed/internal/membership/api/Message.java b/geode-membership/src/main/java/org/apache/geode/distributed/internal/membership/api/Message.java index 39e30f1ab129..9ae5034218e4 100644 --- a/geode-membership/src/main/java/org/apache/geode/distributed/internal/membership/api/Message.java +++ b/geode-membership/src/main/java/org/apache/geode/distributed/internal/membership/api/Message.java @@ -40,12 +40,14 @@ public interface Message extends DataSerializableFi MemberIdentifier ALL_RECIPIENTS = null; /** - * Establishes the destination of a message + * Sets the intended recipient of the message. If recipient is Message.ALL_RECIPIENTS + * then the message will be sent to all distribution managers. */ void setRecipient(ID member); /** - * Establishes one or more destinations of a message + * Sets the intended recipient of the message. If recipient set contains + * Message.ALL_RECIPIENTS then the message will be sent to all distribution managers. */ void setRecipients(Collection recipients); @@ -71,6 +73,8 @@ public interface Message extends DataSerializableFi /** * is this message intended for all members of the cluster? (note: this does not send * the message to the node initiating the message) + * + * @return {@code true} if message will be sent to everyone, otherwise {@code false}. */ boolean forAll(); diff --git a/geode-membership/src/test/resources/expected-pom.xml b/geode-membership/src/test/resources/expected-pom.xml index 392fa3d82da3..b1d603ce93bb 100644 --- a/geode-membership/src/test/resources/expected-pom.xml +++ b/geode-membership/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-membership ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,22 +47,22 @@ - org.apache.geode + com.vmware.gemfire geode-common compile - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime - org.apache.geode + com.vmware.gemfire geode-tcp-server runtime diff --git a/geode-memcached/src/test/resources/expected-pom.xml b/geode-memcached/src/test/resources/expected-pom.xml index c1067c1be3e8..263670418fcf 100644 --- a/geode-memcached/src/test/resources/expected-pom.xml +++ b/geode-memcached/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-memcached ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,17 +47,17 @@ - org.apache.geode + com.vmware.gemfire geode-core runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime diff --git a/geode-old-client-support/src/test/resources/expected-pom.xml b/geode-old-client-support/src/test/resources/expected-pom.xml index b904aa36b599..8c5b391bab1b 100644 --- a/geode-old-client-support/src/test/resources/expected-pom.xml +++ b/geode-old-client-support/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-old-client-support ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,17 +47,17 @@ - org.apache.geode + com.vmware.gemfire geode-core runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime diff --git a/geode-pulse/src/test/resources/expected-pom.xml b/geode-pulse/src/test/resources/expected-pom.xml index 6e5fe6bfc70f..7d498dca4b55 100644 --- a/geode-pulse/src/test/resources/expected-pom.xml +++ b/geode-pulse/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-pulse ${version} war diff --git a/geode-rebalancer/src/test/resources/expected-pom.xml b/geode-rebalancer/src/test/resources/expected-pom.xml index 5f9ff4b944b9..1476738e5afd 100644 --- a/geode-rebalancer/src/test/resources/expected-pom.xml +++ b/geode-rebalancer/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-rebalancer ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,17 +47,17 @@ - org.apache.geode + com.vmware.gemfire geode-core compile - org.apache.geode + com.vmware.gemfire geode-serialization runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime diff --git a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/KnownVersion.java b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/KnownVersion.java index 935a749a87f9..fcf0cd6d6ed4 100644 --- a/geode-serialization/src/main/java/org/apache/geode/internal/serialization/KnownVersion.java +++ b/geode-serialization/src/main/java/org/apache/geode/internal/serialization/KnownVersion.java @@ -48,7 +48,7 @@ public class KnownVersion extends AbstractVersion { private final byte patch; private final boolean modifiesClientServerProtocol; - public static final int HIGHEST_VERSION = 160; + public static final int HIGHEST_VERSION = 200; @Immutable private static final KnownVersion[] VALUES = new KnownVersion[HIGHEST_VERSION + 1]; @@ -209,12 +209,12 @@ public class KnownVersion extends AbstractVersion { new KnownVersion("GEODE", "1.15.0", (byte) 1, (byte) 15, (byte) 0, (byte) 0, GEODE_1_15_0_ORDINAL, true); - private static final short GEODE_1_16_0_ORDINAL = 160; + private static final short GEMFIRE_10_0_0_ORDINAL = 200; @Immutable - public static final KnownVersion GEODE_1_16_0 = - new KnownVersion("GEODE", "1.16.0", (byte) 1, (byte) 16, (byte) 0, (byte) 0, - GEODE_1_16_0_ORDINAL); + public static final KnownVersion GEMFIRE_10_0_0 = + new KnownVersion("GEMFIRE", "10.0.0", (byte) 10, (byte) 0, (byte) 0, (byte) 0, + GEMFIRE_10_0_0_ORDINAL); /* NOTE: when adding a new version bump the ordinal by 10. Ordinals can be short ints */ @@ -229,7 +229,7 @@ public class KnownVersion extends AbstractVersion { * HIGHEST_VERSION when changing CURRENT !!! */ @Immutable - public static final KnownVersion CURRENT = GEODE_1_16_0; + public static final KnownVersion CURRENT = GEMFIRE_10_0_0; /** * A lot of versioning code needs access to the current version's ordinal diff --git a/geode-serialization/src/test/resources/expected-pom.xml b/geode-serialization/src/test/resources/expected-pom.xml index ae1f34ebe1ef..9c46d523cf93 100644 --- a/geode-serialization/src/test/resources/expected-pom.xml +++ b/geode-serialization/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-serialization ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,12 +47,12 @@ - org.apache.geode + com.vmware.gemfire geode-common runtime - org.apache.geode + com.vmware.gemfire geode-logging runtime diff --git a/geode-server-all/src/integrationTest/resources/dependency_classpath.txt b/geode-server-all/src/integrationTest/resources/dependency_classpath.txt index c2929148ac1c..b49b95c1315a 100644 --- a/geode-server-all/src/integrationTest/resources/dependency_classpath.txt +++ b/geode-server-all/src/integrationTest/resources/dependency_classpath.txt @@ -1,8 +1,8 @@ spring-web-5.3.21.jar -shiro-event-1.9.0.jar -shiro-crypto-hash-1.9.0.jar -shiro-crypto-cipher-1.9.0.jar -shiro-config-core-1.9.0.jar +shiro-event-1.9.1.jar +shiro-crypto-hash-1.9.1.jar +shiro-crypto-cipher-1.9.1.jar +shiro-config-core-1.9.1.jar commons-digester-2.1.jar commons-validator-1.7.jar spring-jcl-5.3.21.jar @@ -23,13 +23,13 @@ geode-cq-0.0.0.jar geode-old-client-support-0.0.0.jar javax.servlet-api-3.1.0.jar jgroups-3.6.14.Final.jar -shiro-cache-1.9.0.jar +shiro-cache-1.9.1.jar httpcore-4.4.15.jar spring-beans-5.3.21.jar lucene-queries-6.6.6.jar -shiro-core-1.9.0.jar +shiro-core-1.9.1.jar HikariCP-4.0.3.jar -slf4j-api-1.7.32.jar +slf4j-api-1.7.36.jar geode-http-service-0.0.0.jar commons-collections-3.2.2.jar httpclient-4.5.13.jar @@ -63,7 +63,7 @@ jetty-io-9.4.46.v20220331.jar geode-deployment-legacy-0.0.0.jar commons-beanutils-1.9.4.jar log4j-core-2.17.2.jar -shiro-crypto-core-1.9.0.jar +shiro-crypto-core-1.9.1.jar jaxb-api-2.3.1.jar geode-unsafe-0.0.0.jar spring-shell-1.2.0.RELEASE.jar @@ -73,14 +73,14 @@ log4j-jul-2.17.2.jar HdrHistogram-2.1.12.jar jackson-annotations-2.13.3.jar micrometer-core-1.9.1.jar -shiro-config-ogdl-1.9.0.jar +shiro-config-ogdl-1.9.1.jar geode-log4j-0.0.0.jar lucene-analyzers-phonetic-6.6.6.jar spring-context-5.3.21.jar jetty-security-9.4.46.v20220331.jar geode-logging-0.0.0.jar commons-io-2.11.0.jar -shiro-lang-1.9.0.jar +shiro-lang-1.9.1.jar javax.transaction-api-1.3.jar geode-common-0.0.0.jar antlr-2.7.7.jar diff --git a/geode-server-all/src/test/resources/expected-pom.xml b/geode-server-all/src/test/resources/expected-pom.xml index 92a10910b554..a82e312c12a2 100644 --- a/geode-server-all/src/test/resources/expected-pom.xml +++ b/geode-server-all/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-server-all ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,87 +47,87 @@ - org.apache.geode + com.vmware.gemfire geode-common compile - org.apache.geode + com.vmware.gemfire geode-connectors compile - org.apache.geode + com.vmware.gemfire geode-core compile - org.apache.geode + com.vmware.gemfire geode-cq compile - org.apache.geode + com.vmware.gemfire geode-gfsh compile - org.apache.geode + com.vmware.gemfire geode-log4j compile - org.apache.geode + com.vmware.gemfire geode-logging compile - org.apache.geode + com.vmware.gemfire geode-management compile - org.apache.geode + com.vmware.gemfire geode-lucene compile - org.apache.geode + com.vmware.gemfire geode-rebalancer compile - org.apache.geode + com.vmware.gemfire geode-serialization compile - org.apache.geode + com.vmware.gemfire geode-memcached runtime - org.apache.geode + com.vmware.gemfire geode-old-client-support runtime - org.apache.geode + com.vmware.gemfire geode-wan runtime - org.apache.geode + com.vmware.gemfire geode-tcp-server runtime - org.apache.geode + com.vmware.gemfire geode-unsafe runtime - org.apache.geode + com.vmware.gemfire geode-http-service runtime diff --git a/geode-tcp-server/src/test/resources/expected-pom.xml b/geode-tcp-server/src/test/resources/expected-pom.xml index cced410793b2..9bc3adb5ce62 100644 --- a/geode-tcp-server/src/test/resources/expected-pom.xml +++ b/geode-tcp-server/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-tcp-server ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,17 +47,17 @@ - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime - org.apache.geode + com.vmware.gemfire geode-common runtime diff --git a/geode-unsafe/src/main/java/org/apache/geode/unsafe/internal/sun/misc/Unsafe.java b/geode-unsafe/src/main/java/org/apache/geode/unsafe/internal/sun/misc/Unsafe.java index 3c5db9e96df1..6bf3a9150ba7 100644 --- a/geode-unsafe/src/main/java/org/apache/geode/unsafe/internal/sun/misc/Unsafe.java +++ b/geode-unsafe/src/main/java/org/apache/geode/unsafe/internal/sun/misc/Unsafe.java @@ -1,3 +1,4 @@ +// Copyright (c) VMware, Inc. 2022. All rights reserved. /* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding @@ -20,12 +21,11 @@ /** * This class wraps the sun.misc.Unsafe class which is only available on Sun JVMs. It is also * available on other JVMs (like IBM). - * - * */ public class Unsafe { private final sun.misc.Unsafe unsafe; + { sun.misc.Unsafe tmp; try { @@ -42,6 +42,14 @@ public long objectFieldOffset(Field f) { return unsafe.objectFieldOffset(f); } + public long staticFieldOffset(Field f) { + return unsafe.staticFieldOffset(f); + } + + public Object staticFieldBase(Field f) { + return unsafe.staticFieldBase(f); + } + public int getInt(Object o, long offset) { return unsafe.getInt(o, offset); } @@ -162,7 +170,6 @@ public void putByte(long addr, byte value) { } - public void copyMemory(Object o1, long addr1, Object o2, long addr2, long size) { unsafe.copyMemory(o1, addr1, o2, addr2, size); } @@ -183,10 +190,6 @@ public int arrayScaleIndex(Class c) { return unsafe.arrayIndexScale(c); } - public long fieldOffset(Field f) { - return unsafe.objectFieldOffset(f); - } - public int getPageSize() { return unsafe.pageSize(); } diff --git a/geode-unsafe/src/test/resources/expected-pom.xml b/geode-unsafe/src/test/resources/expected-pom.xml index a5533e9a7c87..665650cf55fd 100644 --- a/geode-unsafe/src/test/resources/expected-pom.xml +++ b/geode-unsafe/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-unsafe ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom diff --git a/geode-wan/src/main/java/org/apache/geode/cache/wan/internal/parallel/ParallelGatewaySenderImpl.java b/geode-wan/src/main/java/org/apache/geode/cache/wan/internal/parallel/ParallelGatewaySenderImpl.java index a0edf12f68b8..86aa4f282dd5 100644 --- a/geode-wan/src/main/java/org/apache/geode/cache/wan/internal/parallel/ParallelGatewaySenderImpl.java +++ b/geode-wan/src/main/java/org/apache/geode/cache/wan/internal/parallel/ParallelGatewaySenderImpl.java @@ -180,8 +180,7 @@ public void fillInProfile(Profile profile) { @Override public void setModifiedEventId(EntryEventImpl clonedEvent) { - int bucketId = -1; - // merged from 42004 + final int bucketId; if (clonedEvent.getRegion() instanceof DistributedRegion) { bucketId = PartitionedRegionHelper.getHashKey(clonedEvent.getKey(), getMaxParallelismForReplicatedRegion()); diff --git a/geode-wan/src/test/resources/expected-pom.xml b/geode-wan/src/test/resources/expected-pom.xml index 52547b2739bb..4caf3726df66 100644 --- a/geode-wan/src/test/resources/expected-pom.xml +++ b/geode-wan/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-wan ${version} Apache Geode @@ -37,7 +37,7 @@ - org.apache.geode + com.vmware.gemfire geode-all-bom ${version} pom @@ -47,32 +47,32 @@ - org.apache.geode + com.vmware.gemfire geode-logging runtime - org.apache.geode + com.vmware.gemfire geode-membership runtime - org.apache.geode + com.vmware.gemfire geode-serialization runtime - org.apache.geode + com.vmware.gemfire geode-tcp-server runtime - org.apache.geode + com.vmware.gemfire geode-core runtime - org.apache.geode + com.vmware.gemfire geode-gfsh runtime diff --git a/geode-web-api/src/test/resources/expected-pom.xml b/geode-web-api/src/test/resources/expected-pom.xml index 1ada3a084683..a4b1ce07cb8d 100644 --- a/geode-web-api/src/test/resources/expected-pom.xml +++ b/geode-web-api/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-web-api ${version} war diff --git a/geode-web-management/src/integrationTest/java/org/apache/geode/management/internal/rest/DocLinksControllerRestIntegrationTest.java b/geode-web-management/src/integrationTest/java/org/apache/geode/management/internal/rest/DocLinksControllerRestIntegrationTest.java index aaab8b4f58fc..a378c2a23f3e 100644 --- a/geode-web-management/src/integrationTest/java/org/apache/geode/management/internal/rest/DocLinksControllerRestIntegrationTest.java +++ b/geode-web-management/src/integrationTest/java/org/apache/geode/management/internal/rest/DocLinksControllerRestIntegrationTest.java @@ -19,9 +19,11 @@ import static org.hamcrest.Matchers.is; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.header; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.jsonPath; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import com.jayway.jsonpath.JsonPath; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -31,6 +33,7 @@ import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringRunner; import org.springframework.test.context.web.WebAppConfiguration; +import org.springframework.test.web.servlet.MvcResult; import org.springframework.web.context.WebApplicationContext; import org.apache.geode.test.dunit.rules.ClusterStartupRule; @@ -65,11 +68,19 @@ public void before() { @Test public void getDocumentationLinks() throws Exception { - webContext.perform(get("/")) + MvcResult mvcResult = webContext.perform(get("/")) .andDo(print()) .andExpect(status().isOk()) + .andExpect(header().doesNotExist("server")) .andExpect(jsonPath("$.latest", is(basePath + "/v3/api-docs"))) .andExpect(jsonPath("$.supported", hasSize(1))) - .andExpect(jsonPath("$.supported[0]", is(basePath + "/v3/api-docs"))); + .andReturn(); + + String content = mvcResult.getResponse().getContentAsString(); + String latestLink = JsonPath.read(content, "$.latest"); + webContext.perform(get(latestLink)) + .andDo(print()) + .andExpect(status().isOk()) + .andExpect(jsonPath("$.info.title", is("Apache Geode Management REST API"))); } } diff --git a/geode-web-management/src/main/java/org/apache/geode/management/internal/rest/ManagementLoggingFilter.java b/geode-web-management/src/main/java/org/apache/geode/management/internal/rest/ManagementLoggingFilter.java index 3f28f9465ef6..4e43beee0c3a 100644 --- a/geode-web-management/src/main/java/org/apache/geode/management/internal/rest/ManagementLoggingFilter.java +++ b/geode-web-management/src/main/java/org/apache/geode/management/internal/rest/ManagementLoggingFilter.java @@ -17,6 +17,7 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; +import java.util.StringTokenizer; import javax.servlet.FilterChain; import javax.servlet.ServletException; @@ -73,6 +74,10 @@ private void logRequest(HttpServletRequest request, ContentCachingRequestWrapper } String payload = getContentAsString(wrappedRequest.getContentAsByteArray(), wrappedRequest.getCharacterEncoding()); + String content_type = request.getContentType(); + if (content_type != null && content_type.contains("multipart/form-data")) { + payload = stripMultiPartFileContent(payload); + } String message = String.format(requestPattern, request.getMethod(), requestUrl, request.getRemoteUser(), payload); logMessage(message); @@ -102,18 +107,33 @@ private String getContentAsString(byte[] buf, String encoding) { return ""; } int length = Math.min(buf.length, MAX_PAYLOAD_LENGTH); - - for (int i = 0; i < length; i++) { - if (buf[i] != '\n' && buf[i] != '\r' && - (buf[i] < ' ' || buf[i] > '~')) { - buf[i] = '?'; - } - } - try { return new String(buf, 0, length, encoding); } catch (UnsupportedEncodingException ex) { return "[unknown]"; } } + + String stripMultiPartFileContent(String content) { + StringBuffer buffer = new StringBuffer(); + StringTokenizer tokenizer = new StringTokenizer(content, "\n"); + boolean skipLine = false; + while (tokenizer.hasMoreTokens()) { + String line = tokenizer.nextToken(); + if (!skipLine) { + buffer.append(line + "\n"); + } + if (line.contains("application/java-archive")) { + // skip the following lines + skipLine = true; + } + // skipped line until the next section border + else if (skipLine && line.startsWith("--")) { + buffer.append("{File Content Logging Skipped}\n"); + buffer.append(line + "\n"); + skipLine = false; + } + } + return buffer.toString(); + } } diff --git a/geode-web-management/src/test/java/org/apache/geode/management/internal/rest/ManagementLoggingFilterTest.java b/geode-web-management/src/test/java/org/apache/geode/management/internal/rest/ManagementLoggingFilterTest.java new file mode 100644 index 000000000000..4de5a4501ab1 --- /dev/null +++ b/geode-web-management/src/test/java/org/apache/geode/management/internal/rest/ManagementLoggingFilterTest.java @@ -0,0 +1,66 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + * + */ + +package org.apache.geode.management.internal.rest; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class ManagementLoggingFilterTest { + ManagementLoggingFilter filter; + + @BeforeEach + public void before() { + filter = new ManagementLoggingFilter(); + } + + @Test + public void stripFileConetent_no_java_archive() { + String payload = "{a:b}\n"; + assertThat(filter.stripMultiPartFileContent(payload)).isEqualTo(payload); + } + + @Test + public void stripFileConetent_with_java_archive() { + String payLoad = "--FLaQNajADavDbycIwdHAkrQ6YIf3ubGg\n" + + "Content-Disposition: form-data; name=\"file\"; filename=\"DeployCommandRedeployDUnitTestA.jar\"\n" + + "Content-Type: application/java-archive\n" + + "Content-Length: 731\n" + + "\n" + + "PK\u0003\u0004\u0014\u0000\b\b\b\u0000â\\ÉT\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000)\u0000\u0004\u0000DeployCommandRedeployDUnitFunctionA.classþÊ\u0000\u0000\u0095\u0092ÏnÓ@\u0010Æ¿Í?§IhChhÓ\u0092\u0096¶Ð:\tÔBp+B\u008A\u0012E EP5P$n\u008E³J·rìh½F\u0085gâ\u0000\u0017P9ð\u0000<\u0014bÖqUp\u0085H\u000F\u009Eµw\u007Fóyæ\u009Býùëû\u000F\u0000Oð°\u0080<îäQ/b\u0003\u009ByÜ-b\u000BÛ\u0005\u0018Ø1P7pÏÀ}\u0003»\f¹§Â\u0013ê\u0019CÚl\u001C3d:þ\u00883,õ\u0085Ç_\u0086\u0093!\u0097¯í¡K;Ù1W/F\fËf£\u007Fj¿·-×öÆÖ@Iá\u008D\u000F(O«0ÔÌÙa¨\u0084k\u001DJ\u007FÊ¥\u0012<8ÐÒ\u0006?ãN¨Hë±Ù÷娲§¶sÂ\u00AD1§_ZNô\u001E#V/ô\u001C%|¯ã{\u008A\u009F©(\u007FáÄ\u000E\u008Ex\u0010º**ö\u001DCÙ\u009F*1\u0011\u001FyÏ\u0097o¥ÐÒ\u0019\u0011 4.0.0 - org.apache.geode + com.vmware.gemfire geode-web-management ${version} war diff --git a/geode-web/src/test/resources/expected-pom.xml b/geode-web/src/test/resources/expected-pom.xml index a22e2deb81b3..907646f39792 100644 --- a/geode-web/src/test/resources/expected-pom.xml +++ b/geode-web/src/test/resources/expected-pom.xml @@ -17,7 +17,7 @@ limitations under the License. --> 4.0.0 - org.apache.geode + com.vmware.gemfire geode-web ${version} war diff --git a/gradle.properties b/gradle.properties index fae62013714e..9aaaec766a77 100755 --- a/gradle.properties +++ b/gradle.properties @@ -26,14 +26,14 @@ # - release # # The full version string consists of 'versionNumber + releaseQualifier + releaseType' -version = 1.16.0-build.0 +version = 10.0.0-build.0 # Default Maven targets mavenSnapshotUrl = gcs://maven.apachegeode-ci.info/snapshots mavenReleaseUrl = https://repository.apache.org/service/local/staging/deploy/maven2 # Maven also uses the project group as a prefix. -group = org.apache.geode +group = com.vmware.gemfire # 'apply from:' location for gradle scripts, relative to the project root. Specified here so that # it may be overridden by external projects or custom develop environment configurations @@ -44,8 +44,8 @@ scriptDir = gradle # `gradle -PbuildId=N ...` where N is an arbitrary string.buildId = 0 buildId = 0 -productName = Apache Geode -productOrg = Apache Software Foundation (ASF) +productName = VMware Tanzu GemFire +productOrg = VMware, Inc. minimumGradleVersion = 6.8 # Set this on the command line with -P or in ~/.gradle/gradle.properties diff --git a/settings.gradle b/settings.gradle index 74b2578816ef..f952b2f72434 100644 --- a/settings.gradle +++ b/settings.gradle @@ -74,7 +74,6 @@ include 'geode-connectors' include 'geode-http-service' include 'extensions:geode-modules' include 'extensions:geode-modules-test' -include 'extensions:geode-modules-tomcat7' include 'extensions:geode-modules-tomcat8' include 'extensions:geode-modules-tomcat9' include 'extensions:geode-modules-session-internal' @@ -109,7 +108,8 @@ include 'geode-server-all' '1.13.1', '1.13.8', '1.14.0', // Include for SSL protocol configuration changes in 1.14.0 - '1.14.4'].each { + '1.14.4', + '1.15.0'].each { include 'geode-old-versions:'.concat(it) }