diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0c8e47c9823b..a05e1e9cb09f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -10,3 +10,4 @@ /java-bigquerystorage/ @googleapis/bigquery-team @googleapis/cloud-sdk-java-team /java-bigquery/ @googleapis/bigquery-team @googleapis/cloud-sdk-java-team /java-spanner/ @googleapis/spanner-team @googleapis/cloud-sdk-java-team +/java-spanner-jdbc/ @googleapis/spanner-team @googleapis/cloud-sdk-java-team diff --git a/.github/workflows/java-spanner-jdbc-ci.yaml b/.github/workflows/java-spanner-jdbc-ci.yaml new file mode 100644 index 000000000000..03e2c8a8f4a1 --- /dev/null +++ b/.github/workflows/java-spanner-jdbc-ci.yaml @@ -0,0 +1,143 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Github action job to test core java library features on +# downstream client libraries before they are released. +on: + push: + branches: + - main + pull_request: +name: java-spanner-jdbc ci +env: + BUILD_SUBDIR: java-spanner-jdbc +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-spanner-jdbc/**' + units: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + java: [11, 17, 21, 25] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: ${{matrix.java}} + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: test + units-java8: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + # Building using Java 17 and run the tests with Java 8 runtime + name: "units (8)" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + java-version: 8 + distribution: temurin + - name: "Set jvm system property environment variable for surefire plugin (unit tests)" + # Maven surefire plugin (unit tests) allows us to specify JVM to run the tests. + # https://maven.apache.org/surefire/maven-surefire-plugin/test-mojo.html#jvm + run: echo "SUREFIRE_JVM_OPT=-Djvm=${JAVA_HOME}/bin/java -P !java17" >> $GITHUB_ENV + shell: bash + - uses: actions/setup-java@v4 + with: + java-version: 17 + distribution: temurin + - run: .kokoro/build.sh + env: + JOB_TYPE: test + windows: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: windows-latest + steps: + - name: Support longpaths + run: git config --system core.longpaths true + - name: Support longpaths + run: git config --system core.longpaths true + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: 8 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: test + dependencies: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + strategy: + matrix: + java: [17] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: ${{matrix.java}} + - run: java -version + - run: .kokoro/dependencies.sh + javadoc: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: 17 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: javadoc + lint: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/setup-java@v4 + with: + distribution: temurin + java-version: 17 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: lint + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + BASE_SHA: ${{ github.event.pull_request.base.sha }} diff --git a/.github/workflows/java-spanner-jdbc-integration-tests-against-emulator.yaml b/.github/workflows/java-spanner-jdbc-integration-tests-against-emulator.yaml new file mode 100644 index 000000000000..cddffc081038 --- /dev/null +++ b/.github/workflows/java-spanner-jdbc-integration-tests-against-emulator.yaml @@ -0,0 +1,49 @@ +on: + push: + branches: + - main + pull_request: +name: java-spanner-jdbc Integration tests against emulator +env: + BUILD_SUBDIR: java-spanner-jdbc +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-spanner-jdbc/**' + tests-against-emulator: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + + services: + emulator: + image: gcr.io/cloud-spanner-emulator/emulator:latest + ports: + - 9010:9010 + - 9020:9020 + + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-java@v5 + with: + distribution: temurin + java-version: 17 + - run: java -version + - name: Install dependencies for the integration test + run: .kokoro/build.sh + env: + JOB_TYPE: test + - run: mvn -B -Dspanner.testenv.instance="" -Penable-integration-tests -DtrimStackTrace=false -Dclirr.skip=true -Denforcer.skip=true -fae verify + working-directory: java-spanner-jdbc + env: + SPANNER_EMULATOR_HOST: localhost:9010 + GOOGLE_CLOUD_PROJECT: emulator-test-project diff --git a/.github/workflows/java-spanner-jdbc-quickperf.yaml b/.github/workflows/java-spanner-jdbc-quickperf.yaml new file mode 100644 index 000000000000..df72be92aa90 --- /dev/null +++ b/.github/workflows/java-spanner-jdbc-quickperf.yaml @@ -0,0 +1,46 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Github action job to test core java library features on +# downstream client libraries before they are released. +on: + pull_request: +name: java-spanner-jdbc quickperf +env: + BUILD_SUBDIR: java-spanner-jdbc +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-spanner-jdbc/**' + quickperf: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-java@v5 + with: + distribution: temurin + java-version: 17 + - name: Run tests + run: mvn test + working-directory: java-spanner-jdbc/samples/quickperf diff --git a/.github/workflows/java-spanner-jdbc-sample-tests.yml b/.github/workflows/java-spanner-jdbc-sample-tests.yml new file mode 100644 index 000000000000..b3f74ed088b8 --- /dev/null +++ b/.github/workflows/java-spanner-jdbc-sample-tests.yml @@ -0,0 +1,46 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Github action job to test core java library features on +# downstream client libraries before they are released. +on: + pull_request: +name: java-spanner-jdbc samples +env: + BUILD_SUBDIR: java-spanner-jdbc +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-spanner-jdbc/**' + test: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-java@v5 + with: + distribution: temurin + java-version: 8 + - name: Run sample tests + run: mvn --quiet --batch-mode test + working-directory: java-spanner-jdbc/samples/snippets diff --git a/.github/workflows/java-spanner-jdbc-spring-data-jdbc-sample.yaml b/.github/workflows/java-spanner-jdbc-spring-data-jdbc-sample.yaml new file mode 100644 index 000000000000..78e014263f6c --- /dev/null +++ b/.github/workflows/java-spanner-jdbc-spring-data-jdbc-sample.yaml @@ -0,0 +1,49 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Github action job to test core java library features on +# downstream client libraries before they are released. +on: + pull_request: +name: java-spanner-jdbc spring-data-jdbc-sample +env: + BUILD_SUBDIR: java-spanner-jdbc +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-spanner-jdbc/**' + spring-data-jdbc: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-java@v5 + with: + distribution: temurin + java-version: 17 + - name: Run tests on GoogleSQL + run: mvn test + working-directory: java-spanner-jdbc/samples/spring-data-jdbc/googlesql + - name: Run tests on PostgreSQL + run: mvn test + working-directory: java-spanner-jdbc/samples/spring-data-jdbc/postgresql diff --git a/.github/workflows/java-spanner-jdbc-spring-data-mybatis-sample.yaml b/.github/workflows/java-spanner-jdbc-spring-data-mybatis-sample.yaml new file mode 100644 index 000000000000..3673de7259cc --- /dev/null +++ b/.github/workflows/java-spanner-jdbc-spring-data-mybatis-sample.yaml @@ -0,0 +1,49 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Github action job to test core java library features on +# downstream client libraries before they are released. +on: + pull_request: +name: java-spanner-jdbc spring-data-mybatis-sample +env: + BUILD_SUBDIR: java-spanner-jdbc +jobs: + filter: + runs-on: ubuntu-latest + outputs: + library: ${{ steps.filter.outputs.library }} + steps: + - uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + library: + - 'java-spanner-jdbc/**' + spring-data-mybatis: + needs: filter + if: ${{ needs.filter.outputs.library == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-java@v5 + with: + distribution: temurin + java-version: 17 + - name: Run GoogleSQL sample tests + run: mvn test + working-directory: java-spanner-jdbc/samples/spring-data-mybatis/googlesql + - name: Run PostgreSQL sample tests + run: mvn test + working-directory: java-spanner-jdbc/samples/spring-data-mybatis/postgresql diff --git a/.kokoro/common.sh b/.kokoro/common.sh index 24648d119932..f9ef8af157bc 100644 --- a/.kokoro/common.sh +++ b/.kokoro/common.sh @@ -30,6 +30,7 @@ excluded_modules=( 'sdk-platform-java/java-showcase-3.21.0' 'sdk-platform-java/java-showcase-3.25.8' 'java-spanner' + 'java-spanner-jdbc' ) function retry_with_backoff { diff --git a/.kokoro/presubmit/spanner-jdbc-graalvm-native-presubmit.cfg b/.kokoro/presubmit/spanner-jdbc-graalvm-native-presubmit.cfg new file mode 100644 index 000000000000..0884a6087c90 --- /dev/null +++ b/.kokoro/presubmit/spanner-jdbc-graalvm-native-presubmit.cfg @@ -0,0 +1,43 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.57.0" # {x-version-update:google-cloud-shared-dependencies:current} +} + +env_vars: { + key: "JOB_TYPE" + value: "graalvm-single" +} + +# TODO: remove this after we've migrated all tests and scripts +env_vars: { + key: "GCLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_CLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_APPLICATION_CREDENTIALS" + value: "secret_manager/java-it-service-account" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "java-it-service-account" +} + +env_vars: { + key: "BUILD_SUBDIR" + value: "java-spanner-jdbc" +} + +env_vars: { + key: "INTEGRATION_TEST_ARGS" + value: "-Dtest=com.google.cloud.spanner.jdbc.it.**" +} \ No newline at end of file diff --git a/.kokoro/presubmit/spanner-jdbc-integration.cfg b/.kokoro/presubmit/spanner-jdbc-integration.cfg new file mode 100644 index 000000000000..cd2d91941676 --- /dev/null +++ b/.kokoro/presubmit/spanner-jdbc-integration.cfg @@ -0,0 +1,39 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/java11" +} + +env_vars: { + key: "JOB_TYPE" + value: "integration-single" +} + +# TODO: remove this after we've migrated all tests and scripts +env_vars: { + key: "GCLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_CLOUD_PROJECT" + value: "gcloud-devel" +} + +env_vars: { + key: "GOOGLE_APPLICATION_CREDENTIALS" + value: "secret_manager/java-it-service-account" +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "java-it-service-account" +} + + +env_vars: { + key: "BUILD_SUBDIR" + value: "java-spanner-jdbc" +} diff --git a/generation/check_non_release_please_versions.sh b/generation/check_non_release_please_versions.sh index 07295aeb238c..8ede6173a915 100755 --- a/generation/check_non_release_please_versions.sh +++ b/generation/check_non_release_please_versions.sh @@ -14,6 +14,7 @@ for pomFile in $(find . -mindepth 2 -name pom.xml | sort ); do [[ "${pomFile}" =~ .*java-bigquery.* ]] || \ [[ "${pomFile}" =~ .*sdk-platform-java.* ]] || \ [[ "${pomFile}" =~ .*java-spanner.* ]] || \ + [[ "${pomFile}" =~ .*java-spanner-jdbc.* ]] || \ [[ "${pomFile}" =~ .*.github*. ]]; then continue fi diff --git a/java-spanner-jdbc/.repo-metadata.json b/java-spanner-jdbc/.repo-metadata.json new file mode 100644 index 000000000000..797bfaa57bb9 --- /dev/null +++ b/java-spanner-jdbc/.repo-metadata.json @@ -0,0 +1,15 @@ +{ + "api_shortname": "spanner-jdbc", + "name_pretty": "Google Cloud Spanner JDBC", + "product_documentation": "https://cloud.google.com/spanner/docs/use-oss-jdbc", + "client_documentation": "https://cloud.google.com/java/docs/reference/google-cloud-spanner-jdbc/latest/history", + "release_level": "stable", + "language": "java", + "min_java_version": 8, + "repo": "googleapis/google-cloud-java", + "repo_short": "google-cloud-java", + "distribution_name": "com.google.cloud:google-cloud-spanner-jdbc", + "library_type": "OTHER", + "codeowner_team": "@googleapis/spanner-team", + "recommended_package": "com.google.cloud.spanner.jdbc" +} diff --git a/java-spanner-jdbc/CHANGELOG.md b/java-spanner-jdbc/CHANGELOG.md new file mode 100644 index 000000000000..eca8e75685bf --- /dev/null +++ b/java-spanner-jdbc/CHANGELOG.md @@ -0,0 +1,2241 @@ +# Changelog + +## [2.35.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.35.3...v2.35.4) (2026-03-04) + + +### Bug Fixes + +* Fix Column Type Name for PostgreSQL ARRAY types ([#2409](https://github.com/googleapis/java-spanner-jdbc/issues/2409)) ([c171388](https://github.com/googleapis/java-spanner-jdbc/commit/c1713882f7d945b60aa9e305234535e0b5a37e6c)) +* Fix UTC<->Timezone conversion issue for DST start and end timestamp ([#2260](https://github.com/googleapis/java-spanner-jdbc/issues/2260)) ([14837f8](https://github.com/googleapis/java-spanner-jdbc/commit/14837f8d195a338089eca1b2bfe316f030012967)) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.21.1 ([#2406](https://github.com/googleapis/java-spanner-jdbc/issues/2406)) ([96f106e](https://github.com/googleapis/java-spanner-jdbc/commit/96f106e551d3c47a557fac2ec95aadb4d9cc1990)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.85.0 ([#2395](https://github.com/googleapis/java-spanner-jdbc/issues/2395)) ([8a214f8](https://github.com/googleapis/java-spanner-jdbc/commit/8a214f8b3ddb2c823dacc82ad1559cdf0db67004)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.86.0 ([#2407](https://github.com/googleapis/java-spanner-jdbc/issues/2407)) ([b542d69](https://github.com/googleapis/java-spanner-jdbc/commit/b542d696e171b894483b8a4bff5d0c7befaddd96)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.111.1 ([#2410](https://github.com/googleapis/java-spanner-jdbc/issues/2410)) ([39dd81b](https://github.com/googleapis/java-spanner-jdbc/commit/39dd81b9b456bea03a1018125c3f5db7e4ff047a)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.111.1 ([#2411](https://github.com/googleapis/java-spanner-jdbc/issues/2411)) ([5e636ed](https://github.com/googleapis/java-spanner-jdbc/commit/5e636edb5712b90815c14f72d0d6e8d559642550)) +* Update dependency com.google.cloud:google-cloud-trace to v2.85.0 ([#2396](https://github.com/googleapis/java-spanner-jdbc/issues/2396)) ([122545c](https://github.com/googleapis/java-spanner-jdbc/commit/122545c3b0b3c660defd9aeae37a9442a16ebd8f)) +* Update dependency com.google.cloud:google-cloud-trace to v2.86.0 ([#2408](https://github.com/googleapis/java-spanner-jdbc/issues/2408)) ([6af6e48](https://github.com/googleapis/java-spanner-jdbc/commit/6af6e48cad6c6229058d68b599d96f4cdd64df40)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.57.0 ([#2405](https://github.com/googleapis/java-spanner-jdbc/issues/2405)) ([8e54669](https://github.com/googleapis/java-spanner-jdbc/commit/8e54669eae29522a15da84f4aec819ca3305a25c)) +* Update dependency net.bytebuddy:byte-buddy to v1.18.5 ([#2397](https://github.com/googleapis/java-spanner-jdbc/issues/2397)) ([6736c63](https://github.com/googleapis/java-spanner-jdbc/commit/6736c63fda79bfc1c2071b4de64e2e6bdd70594e)) +* Update dependency net.bytebuddy:byte-buddy-agent to v1.18.5 ([#2398](https://github.com/googleapis/java-spanner-jdbc/issues/2398)) ([deaef26](https://github.com/googleapis/java-spanner-jdbc/commit/deaef26f58d865a995dc99562fa058091455bcb4)) +* Update dependency org.springframework.boot:spring-boot to v4.0.3 ([#2403](https://github.com/googleapis/java-spanner-jdbc/issues/2403)) ([37aed98](https://github.com/googleapis/java-spanner-jdbc/commit/37aed98531e986827d8e5169ae434493d81059c2)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v4.0.3 ([#2404](https://github.com/googleapis/java-spanner-jdbc/issues/2404)) ([385e930](https://github.com/googleapis/java-spanner-jdbc/commit/385e930f1d102fb3a35d300f8c838fdaf6a6617c)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.11 ([#2402](https://github.com/googleapis/java-spanner-jdbc/issues/2402)) ([f451bca](https://github.com/googleapis/java-spanner-jdbc/commit/f451bca0f6aa24a3ea0e47361797186eafac34b1)) + +## [2.35.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.35.2...v2.35.3) (2026-02-13) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.84.0 ([#2382](https://github.com/googleapis/java-spanner-jdbc/issues/2382)) ([3f9a5fa](https://github.com/googleapis/java-spanner-jdbc/commit/3f9a5fa066a1ebf03472a23e0f279ed5ce47dcf8)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.111.0 ([#2377](https://github.com/googleapis/java-spanner-jdbc/issues/2377)) ([cbccc66](https://github.com/googleapis/java-spanner-jdbc/commit/cbccc6666c3b9c7348a30615b3d71a822734a55a)) +* Update dependency com.google.cloud:google-cloud-trace to v2.84.0 ([#2383](https://github.com/googleapis/java-spanner-jdbc/issues/2383)) ([cb4b6cf](https://github.com/googleapis/java-spanner-jdbc/commit/cb4b6cf39ad59bd274311be35cade7c9b55707d4)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.56.1 ([#2386](https://github.com/googleapis/java-spanner-jdbc/issues/2386)) ([cf943d8](https://github.com/googleapis/java-spanner-jdbc/commit/cf943d851bdcdf7042c63f493a1021908f984915)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.59.0 ([#2391](https://github.com/googleapis/java-spanner-jdbc/issues/2391)) ([ac66b78](https://github.com/googleapis/java-spanner-jdbc/commit/ac66b78f033837b98f68a6e722ed163f87cda41c)) +* Update dependency org.postgresql:postgresql to v42.7.10 ([#2390](https://github.com/googleapis/java-spanner-jdbc/issues/2390)) ([dbb5c7a](https://github.com/googleapis/java-spanner-jdbc/commit/dbb5c7abc57a832198131f914cd75864f1607025)) + +## [2.35.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.35.1...v2.35.2) (2026-01-28) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.56.0 ([#2373](https://github.com/googleapis/java-spanner-jdbc/issues/2373)) ([ffa398a](https://github.com/googleapis/java-spanner-jdbc/commit/ffa398a3ca079b337bf90b1344fc8eeac68c17af)) + +## [2.35.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.35.0...v2.35.1) (2026-01-26) + + +### Bug Fixes + +* Accept Arrays of Integer, Short, Byte for Array<Int64> ([#2365](https://github.com/googleapis/java-spanner-jdbc/issues/2365)) ([7429508](https://github.com/googleapis/java-spanner-jdbc/commit/7429508b82b26a55b6d7910416f3c72cfa63af2e)) + + +### Performance Improvements + +* Optimize JdbcDataSource#getConnection() ([#2371](https://github.com/googleapis/java-spanner-jdbc/issues/2371)) ([832064c](https://github.com/googleapis/java-spanner-jdbc/commit/832064ca2bca0d2d29e00c6b1417c7a4ce22f847)) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.21.0 ([#2364](https://github.com/googleapis/java-spanner-jdbc/issues/2364)) ([a4b5c21](https://github.com/googleapis/java-spanner-jdbc/commit/a4b5c21a449fca70bab45eb756addcba57f0b080)) +* Update dependency org.springframework.boot:spring-boot to v4.0.2 ([#2369](https://github.com/googleapis/java-spanner-jdbc/issues/2369)) ([6c2b28e](https://github.com/googleapis/java-spanner-jdbc/commit/6c2b28e966dbb4c12a8c877b7eac2935146eaeb5)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v4.0.2 ([#2370](https://github.com/googleapis/java-spanner-jdbc/issues/2370)) ([61714fb](https://github.com/googleapis/java-spanner-jdbc/commit/61714fb5537d3676e7942738a86f08ec0b6e2707)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.10 ([#2368](https://github.com/googleapis/java-spanner-jdbc/issues/2368)) ([db1755c](https://github.com/googleapis/java-spanner-jdbc/commit/db1755ca41b4c7fd0a232be94f66d5965677e46e)) + + +### Documentation + +* Update connection properties docs with DCP ([#2372](https://github.com/googleapis/java-spanner-jdbc/issues/2372)) ([094ab5a](https://github.com/googleapis/java-spanner-jdbc/commit/094ab5a62a723cd895a6ea54259a3bbef98d56fc)) + +## [2.35.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.34.1...v2.35.0) (2026-01-16) + + +### Features + +* Support float4 in JDBC ([#2336](https://github.com/googleapis/java-spanner-jdbc/issues/2336)) ([6bbcac6](https://github.com/googleapis/java-spanner-jdbc/commit/6bbcac65aaa02bcb9a062459de5e36630e27e88c)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.81.0 ([#2331](https://github.com/googleapis/java-spanner-jdbc/issues/2331)) ([a207dea](https://github.com/googleapis/java-spanner-jdbc/commit/a207dea27e97fde2b84da412dd701890c5d8a308)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.82.0 ([#2357](https://github.com/googleapis/java-spanner-jdbc/issues/2357)) ([6dd218f](https://github.com/googleapis/java-spanner-jdbc/commit/6dd218fcac050b196f5433e20fd9b07bf3122546)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.106.0 ([#2351](https://github.com/googleapis/java-spanner-jdbc/issues/2351)) ([c07d754](https://github.com/googleapis/java-spanner-jdbc/commit/c07d754b4308ae88ec4239dda20671b2ad05cd36)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.107.0 ([#2360](https://github.com/googleapis/java-spanner-jdbc/issues/2360)) ([e45f327](https://github.com/googleapis/java-spanner-jdbc/commit/e45f327272283a5b4a679bc97349bbe66ab3da53)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.106.0 ([#2352](https://github.com/googleapis/java-spanner-jdbc/issues/2352)) ([f7b660a](https://github.com/googleapis/java-spanner-jdbc/commit/f7b660a659eb650b1aef8891b8dfe2a031179513)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.107.0 ([#2361](https://github.com/googleapis/java-spanner-jdbc/issues/2361)) ([033eceb](https://github.com/googleapis/java-spanner-jdbc/commit/033eceb0e30e872154d1a6c43dabfd5f4794d2b5)) +* Update dependency com.google.cloud:google-cloud-trace to v2.81.0 ([#2332](https://github.com/googleapis/java-spanner-jdbc/issues/2332)) ([c389ffc](https://github.com/googleapis/java-spanner-jdbc/commit/c389ffce9fdfde1f1a24742f64ec00b4fc8845b7)) +* Update dependency com.google.cloud:google-cloud-trace to v2.82.0 ([#2358](https://github.com/googleapis/java-spanner-jdbc/issues/2358)) ([9565583](https://github.com/googleapis/java-spanner-jdbc/commit/9565583115b3feb99296de61a4c1be272790994d)) +* Update dependency com.google.cloud:grpc-gcp to v1.9.1 ([#2348](https://github.com/googleapis/java-spanner-jdbc/issues/2348)) ([7de2507](https://github.com/googleapis/java-spanner-jdbc/commit/7de25072cc5b1706c0a66a09d78e08ff67212441)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.55.1 ([#2355](https://github.com/googleapis/java-spanner-jdbc/issues/2355)) ([b278e20](https://github.com/googleapis/java-spanner-jdbc/commit/b278e203d1f7ceb077e7a13b0657aaa78016ab90)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.58.0 ([#2354](https://github.com/googleapis/java-spanner-jdbc/issues/2354)) ([efd4380](https://github.com/googleapis/java-spanner-jdbc/commit/efd43805057333ef3079c78538e0c99c767a5e01)) +* Update dependency net.bytebuddy:byte-buddy to v1.18.4 ([#2349](https://github.com/googleapis/java-spanner-jdbc/issues/2349)) ([e4daede](https://github.com/googleapis/java-spanner-jdbc/commit/e4daedea8a29038974c8bc73c0d930a997d2116e)) +* Update dependency net.bytebuddy:byte-buddy-agent to v1.18.4 ([#2350](https://github.com/googleapis/java-spanner-jdbc/issues/2350)) ([cb68853](https://github.com/googleapis/java-spanner-jdbc/commit/cb6885347527a14ffe95bd9e0ca5c18745419dfc)) +* Update dependency org.postgresql:postgresql to v42.7.9 ([#2359](https://github.com/googleapis/java-spanner-jdbc/issues/2359)) ([3c869dd](https://github.com/googleapis/java-spanner-jdbc/commit/3c869ddc3abc9f19e6e21cfa8ff7922b5bbcd65e)) +* Update dependency org.springframework.boot:spring-boot to v4.0.1 ([#2343](https://github.com/googleapis/java-spanner-jdbc/issues/2343)) ([9f84c07](https://github.com/googleapis/java-spanner-jdbc/commit/9f84c0782fccf3e93d03498b247e926ad6d24056)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v4.0.1 ([#2344](https://github.com/googleapis/java-spanner-jdbc/issues/2344)) ([661566d](https://github.com/googleapis/java-spanner-jdbc/commit/661566d90fe91a31b7152181704c5f508c3dcb36)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.9 ([#2342](https://github.com/googleapis/java-spanner-jdbc/issues/2342)) ([e1fb27a](https://github.com/googleapis/java-spanner-jdbc/commit/e1fb27a821f0fe2b61b0790be578ba6851bf3b96)) + +## [2.34.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.34.0...v2.34.1) (2025-12-16) + + +### Bug Fixes + +* Cast double Inf and NaN to float ([#2304](https://github.com/googleapis/java-spanner-jdbc/issues/2304)) ([a947024](https://github.com/googleapis/java-spanner-jdbc/commit/a94702446aee20013157b45bf1d9c3938f0b6982)), closes [#2256](https://github.com/googleapis/java-spanner-jdbc/issues/2256) + + +### Dependencies + +* Update actions/checkout action to v6 ([#2299](https://github.com/googleapis/java-spanner-jdbc/issues/2299)) ([fbc8969](https://github.com/googleapis/java-spanner-jdbc/commit/fbc8969d66f4687ce91268309a862a0cbe0a114c)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.104.0 ([#2312](https://github.com/googleapis/java-spanner-jdbc/issues/2312)) ([930797b](https://github.com/googleapis/java-spanner-jdbc/commit/930797b71f30e458dee20679178ff3510fcba919)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.105.0 ([#2325](https://github.com/googleapis/java-spanner-jdbc/issues/2325)) ([34a9768](https://github.com/googleapis/java-spanner-jdbc/commit/34a9768b203c967f4191427a815b71e1a503de8f)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.104.0 ([#2313](https://github.com/googleapis/java-spanner-jdbc/issues/2313)) ([5250e2d](https://github.com/googleapis/java-spanner-jdbc/commit/5250e2d069179bdeb773e5e0a1f2a5defd411811)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.105.0 ([#2326](https://github.com/googleapis/java-spanner-jdbc/issues/2326)) ([8f3e6b7](https://github.com/googleapis/java-spanner-jdbc/commit/8f3e6b72baccc5ae8dd5ce3c55807bc889a7f208)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.2 ([#2322](https://github.com/googleapis/java-spanner-jdbc/issues/2322)) ([2faffb4](https://github.com/googleapis/java-spanner-jdbc/commit/2faffb46c71e4b44b42dbc4bfd5d70a3e55eb5f6)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.57.0 ([#2316](https://github.com/googleapis/java-spanner-jdbc/issues/2316)) ([77aab00](https://github.com/googleapis/java-spanner-jdbc/commit/77aab00ec385a7bfe61615b6b250c0b97a9a9f07)) +* Update dependency net.bytebuddy:byte-buddy to v1.18.2 ([#2306](https://github.com/googleapis/java-spanner-jdbc/issues/2306)) ([ba7430c](https://github.com/googleapis/java-spanner-jdbc/commit/ba7430cd229214487dfe4db208cb274d4662bf19)) +* Update dependency net.bytebuddy:byte-buddy-agent to v1.18.2 ([#2307](https://github.com/googleapis/java-spanner-jdbc/issues/2307)) ([86dc72b](https://github.com/googleapis/java-spanner-jdbc/commit/86dc72b0e1d7199b229474008cb68400458f233e)) +* Update dependency org.springframework.boot:spring-boot to v4 ([#2300](https://github.com/googleapis/java-spanner-jdbc/issues/2300)) ([a6dddf2](https://github.com/googleapis/java-spanner-jdbc/commit/a6dddf2aef2e9d35ec2bf26ea1a1586ab65e122a)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v4 ([#2301](https://github.com/googleapis/java-spanner-jdbc/issues/2301)) ([92efa98](https://github.com/googleapis/java-spanner-jdbc/commit/92efa986a0880524a3d19685df0e08cf7815a69a)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.8 ([#2298](https://github.com/googleapis/java-spanner-jdbc/issues/2298)) ([5603846](https://github.com/googleapis/java-spanner-jdbc/commit/5603846fc004a0e31cbcf10bc246d477cd80d9dd)) +* Update dependency org.testcontainers:testcontainers to v2.0.3 ([#2323](https://github.com/googleapis/java-spanner-jdbc/issues/2323)) ([65302d4](https://github.com/googleapis/java-spanner-jdbc/commit/65302d44315548af3406853121e341523c89521c)) +* Update dependency org.testcontainers:testcontainers-bom to v2.0.3 ([#2324](https://github.com/googleapis/java-spanner-jdbc/issues/2324)) ([8e8b3a0](https://github.com/googleapis/java-spanner-jdbc/commit/8e8b3a0b4c613324a07609fa142330322e8eab95)) + +## [2.34.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.33.3...v2.34.0) (2025-11-18) + + +### Features + +* Add unknownLength connection property ([#2286](https://github.com/googleapis/java-spanner-jdbc/issues/2286)) ([6aceb64](https://github.com/googleapis/java-spanner-jdbc/commit/6aceb6487e46beb45277dee230a66b09e4de1671)) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.20.1 ([#2275](https://github.com/googleapis/java-spanner-jdbc/issues/2275)) ([559cdee](https://github.com/googleapis/java-spanner-jdbc/commit/559cdee7633ae508f857890770f173ac2f18d2ca)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.79.0 ([#2281](https://github.com/googleapis/java-spanner-jdbc/issues/2281)) ([3a15afa](https://github.com/googleapis/java-spanner-jdbc/commit/3a15afa13b1583e0941a909ac09acac49c532a36)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.80.0 ([#2288](https://github.com/googleapis/java-spanner-jdbc/issues/2288)) ([b859d8e](https://github.com/googleapis/java-spanner-jdbc/commit/b859d8e07ae7d9e3f71d431cc16cfa1e4b9fb2e4)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.103.0 ([#2291](https://github.com/googleapis/java-spanner-jdbc/issues/2291)) ([226850e](https://github.com/googleapis/java-spanner-jdbc/commit/226850edd1ba3d85a762872abcb70bad836b3ca1)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.103.0 ([#2292](https://github.com/googleapis/java-spanner-jdbc/issues/2292)) ([1213da6](https://github.com/googleapis/java-spanner-jdbc/commit/1213da65f745b88babe6dc2d3805534b47b34bd6)) +* Update dependency com.google.cloud:google-cloud-trace to v2.79.0 ([#2282](https://github.com/googleapis/java-spanner-jdbc/issues/2282)) ([ebbb0de](https://github.com/googleapis/java-spanner-jdbc/commit/ebbb0deb3e7c1e7f564a9a1a792ecba336d5fcb0)) +* Update dependency com.google.cloud:google-cloud-trace to v2.80.0 ([#2289](https://github.com/googleapis/java-spanner-jdbc/issues/2289)) ([d64f6c6](https://github.com/googleapis/java-spanner-jdbc/commit/d64f6c6adb2f17b53acda3006aff9dc2aabb4dfc)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.54.1 ([#2277](https://github.com/googleapis/java-spanner-jdbc/issues/2277)) ([7a48db6](https://github.com/googleapis/java-spanner-jdbc/commit/7a48db68cb769246b9307e39e37d8af12641a0fb)) +* Update dependency commons-cli:commons-cli to v1.11.0 ([#2283](https://github.com/googleapis/java-spanner-jdbc/issues/2283)) ([7582363](https://github.com/googleapis/java-spanner-jdbc/commit/75823638436e446f1e0a7904dd485b2f84b16f3d)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.56.0 ([#2278](https://github.com/googleapis/java-spanner-jdbc/issues/2278)) ([1fd49aa](https://github.com/googleapis/java-spanner-jdbc/commit/1fd49aa604a395032df675f5f9589eed9ea901c2)) +* Update dependency net.bytebuddy:byte-buddy to v1.18.1 ([#2279](https://github.com/googleapis/java-spanner-jdbc/issues/2279)) ([fa83919](https://github.com/googleapis/java-spanner-jdbc/commit/fa83919382619c3cb6abc567e7d6753a2e2ead33)) +* Update dependency net.bytebuddy:byte-buddy-agent to v1.18.1 ([#2280](https://github.com/googleapis/java-spanner-jdbc/issues/2280)) ([4237635](https://github.com/googleapis/java-spanner-jdbc/commit/4237635cc33784cecf9aa62d757fbf3fd4d1702d)) +* Update dependency org.apache.commons:commons-lang3 to v3.20.0 ([#2290](https://github.com/googleapis/java-spanner-jdbc/issues/2290)) ([eacbecb](https://github.com/googleapis/java-spanner-jdbc/commit/eacbecb7e5340d34d2de1208280dc9432144da26)) +* Update dependency org.testcontainers:testcontainers to v2.0.2 ([#2284](https://github.com/googleapis/java-spanner-jdbc/issues/2284)) ([092f5a1](https://github.com/googleapis/java-spanner-jdbc/commit/092f5a129761f0c8c9d8207c611232a086328896)) +* Update dependency org.testcontainers:testcontainers-bom to v2.0.2 ([#2285](https://github.com/googleapis/java-spanner-jdbc/issues/2285)) ([360a9fb](https://github.com/googleapis/java-spanner-jdbc/commit/360a9fbe0077abe072f2834823c9f208e2325fcf)) + +## [2.33.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.33.2...v2.33.3) (2025-10-28) + + +### Bug Fixes + +* Run DatabaseMetadata tests for PG on emulator ([#2271](https://github.com/googleapis/java-spanner-jdbc/issues/2271)) ([e7b20eb](https://github.com/googleapis/java-spanner-jdbc/commit/e7b20ebd0ec1554e969c9097c1d5b7c5b6dcc232)) + +## [2.33.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.33.1...v2.33.2) (2025-10-24) + + +### Bug Fixes + +* FindColumn is case-sensitive for unquoted identifier ([#2261](https://github.com/googleapis/java-spanner-jdbc/issues/2261)) ([71441f7](https://github.com/googleapis/java-spanner-jdbc/commit/71441f7e896d5a60c1e6bac9b9a991a9d18fbe3c)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.78.0 ([#2252](https://github.com/googleapis/java-spanner-jdbc/issues/2252)) ([37ea3f0](https://github.com/googleapis/java-spanner-jdbc/commit/37ea3f0652462470b76a5d68f54b543b8afa8a97)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.102.1 ([0d335a9](https://github.com/googleapis/java-spanner-jdbc/commit/0d335a932b8a035b90c866a8b3639d225fa329b0)) +* Update dependency com.google.cloud:google-cloud-trace to v2.78.0 ([#2253](https://github.com/googleapis/java-spanner-jdbc/issues/2253)) ([e51eaeb](https://github.com/googleapis/java-spanner-jdbc/commit/e51eaeb401a55161474c2848af51446e7669dca0)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.53.0 ([0d335a9](https://github.com/googleapis/java-spanner-jdbc/commit/0d335a932b8a035b90c866a8b3639d225fa329b0)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.55.0 ([#2251](https://github.com/googleapis/java-spanner-jdbc/issues/2251)) ([14e9ea6](https://github.com/googleapis/java-spanner-jdbc/commit/14e9ea6b9356eb59cd42a14bea4af3ca102a5cf1)) +* Update dependency org.springframework.boot:spring-boot to v3.5.7 ([#2263](https://github.com/googleapis/java-spanner-jdbc/issues/2263)) ([0a916b9](https://github.com/googleapis/java-spanner-jdbc/commit/0a916b97925b0651a8cbc69e546bf6539fc39b27)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.5.7 ([#2264](https://github.com/googleapis/java-spanner-jdbc/issues/2264)) ([3f8d0db](https://github.com/googleapis/java-spanner-jdbc/commit/3f8d0db636a6278a3eefd8de0835f776efcbb0e0)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.7 ([#2267](https://github.com/googleapis/java-spanner-jdbc/issues/2267)) ([1f79712](https://github.com/googleapis/java-spanner-jdbc/commit/1f79712792587b8827e5546064f61e8e7e0bec11)) +* Update dependency org.springframework.data:spring-data-bom to v2025.0.5 ([#2258](https://github.com/googleapis/java-spanner-jdbc/issues/2258)) ([1054cdd](https://github.com/googleapis/java-spanner-jdbc/commit/1054cddc60b811fe919a8313f433b3490e0f0cfb)) +* Update dependency org.testcontainers:testcontainers to v2 ([#2254](https://github.com/googleapis/java-spanner-jdbc/issues/2254)) ([4058ed5](https://github.com/googleapis/java-spanner-jdbc/commit/4058ed537b4c70d807d589d51410301ea73434be)) +* Update dependency org.testcontainers:testcontainers-bom to v2 ([#2255](https://github.com/googleapis/java-spanner-jdbc/issues/2255)) ([64faa98](https://github.com/googleapis/java-spanner-jdbc/commit/64faa9867b0a6d9535a03fecfcfada6d34687457)) + +## [2.33.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.33.0...v2.33.1) (2025-10-09) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.76.0 ([#2240](https://github.com/googleapis/java-spanner-jdbc/issues/2240)) ([442565e](https://github.com/googleapis/java-spanner-jdbc/commit/442565ea8bf22ee926126fbd11a68a869b0772c7)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.101.1 ([#2227](https://github.com/googleapis/java-spanner-jdbc/issues/2227)) ([275c6bd](https://github.com/googleapis/java-spanner-jdbc/commit/275c6bd7f3f58b3f980e9ffaa576d1f910ecf08b)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.102.0 ([#2244](https://github.com/googleapis/java-spanner-jdbc/issues/2244)) ([383a392](https://github.com/googleapis/java-spanner-jdbc/commit/383a39222c7526163323575734c8473a34d4098d)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.101.1 ([#2228](https://github.com/googleapis/java-spanner-jdbc/issues/2228)) ([ded7944](https://github.com/googleapis/java-spanner-jdbc/commit/ded79447b8ecd294e56560b43cb02e45464706f1)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.102.0 ([#2246](https://github.com/googleapis/java-spanner-jdbc/issues/2246)) ([7162ac2](https://github.com/googleapis/java-spanner-jdbc/commit/7162ac236a77c24b4f0faccefa2cf3bdee6b96ab)) +* Update dependency com.google.cloud:google-cloud-trace to v2.76.0 ([#2241](https://github.com/googleapis/java-spanner-jdbc/issues/2241)) ([ff2432d](https://github.com/googleapis/java-spanner-jdbc/commit/ff2432d46cd23df2230671297f06c8e6fca8f1d7)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.3 ([#2236](https://github.com/googleapis/java-spanner-jdbc/issues/2236)) ([9a44975](https://github.com/googleapis/java-spanner-jdbc/commit/9a44975e0ab7ca49514be1d9ea03e8e760a0f3bc)) +* Update dependency net.bytebuddy:byte-buddy to v1.17.8 ([#2245](https://github.com/googleapis/java-spanner-jdbc/issues/2245)) ([60a3a8f](https://github.com/googleapis/java-spanner-jdbc/commit/60a3a8f0fcf86742128f8803ee3297670b713923)) +* Update dependency net.bytebuddy:byte-buddy-agent to v1.17.8 ([#2243](https://github.com/googleapis/java-spanner-jdbc/issues/2243)) ([952c08a](https://github.com/googleapis/java-spanner-jdbc/commit/952c08afbf2f7838ad864b8bb58423e8b566d325)) + + +### Documentation + +* Add samples for transaction isolation level ([#2030](https://github.com/googleapis/java-spanner-jdbc/issues/2030)) ([ca243d1](https://github.com/googleapis/java-spanner-jdbc/commit/ca243d194ef392d44cea561a148c0376b9af630d)) + +## [2.33.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.32.3...v2.33.0) (2025-09-27) + + +### Features + +* Add transaction_timeout connection property ([#2226](https://github.com/googleapis/java-spanner-jdbc/issues/2226)) ([092df54](https://github.com/googleapis/java-spanner-jdbc/commit/092df54f1bd823ce721c29732a51f8ef5065a6f2)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.74.0 ([#2205](https://github.com/googleapis/java-spanner-jdbc/issues/2205)) ([84dd68c](https://github.com/googleapis/java-spanner-jdbc/commit/84dd68cd661623132c007ac1bed9e69950aa3cac)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.75.0 ([#2223](https://github.com/googleapis/java-spanner-jdbc/issues/2223)) ([4aaa4a1](https://github.com/googleapis/java-spanner-jdbc/commit/4aaa4a132473935746d91363cf4e8b80e08bd017)) +* Update dependency com.google.cloud:google-cloud-trace to v2.74.0 ([#2211](https://github.com/googleapis/java-spanner-jdbc/issues/2211)) ([ac95834](https://github.com/googleapis/java-spanner-jdbc/commit/ac958346f19faad5eaf8dd53ce88b995261e9897)) +* Update dependency com.google.cloud:google-cloud-trace to v2.75.0 ([#2224](https://github.com/googleapis/java-spanner-jdbc/issues/2224)) ([99392d4](https://github.com/googleapis/java-spanner-jdbc/commit/99392d4aa010331815759c45b34372f6766ec9c8)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.2 ([#2218](https://github.com/googleapis/java-spanner-jdbc/issues/2218)) ([5718b16](https://github.com/googleapis/java-spanner-jdbc/commit/5718b161ce326fac7e85fed0bda960e00f035cde)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.29 ([#2214](https://github.com/googleapis/java-spanner-jdbc/issues/2214)) ([a73a772](https://github.com/googleapis/java-spanner-jdbc/commit/a73a77245f4827afb8580912d437543b5cc59317)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.54.1 ([#2220](https://github.com/googleapis/java-spanner-jdbc/issues/2220)) ([b386483](https://github.com/googleapis/java-spanner-jdbc/commit/b38648370a09631d5f6514aad2960dd3c76af99c)) +* Update dependency org.apache.commons:commons-lang3 to v3.19.0 ([#2225](https://github.com/googleapis/java-spanner-jdbc/issues/2225)) ([1ea3eca](https://github.com/googleapis/java-spanner-jdbc/commit/1ea3eca4c7711775d5d1b89534e490a989ad4630)) +* Update dependency org.postgresql:postgresql to v42.7.8 ([#2221](https://github.com/googleapis/java-spanner-jdbc/issues/2221)) ([bcc3f3d](https://github.com/googleapis/java-spanner-jdbc/commit/bcc3f3d59c02d2d7ff70e6a65a86f0750587c52e)) +* Update dependency org.springframework.boot:spring-boot to v3.5.6 ([#2216](https://github.com/googleapis/java-spanner-jdbc/issues/2216)) ([84396e0](https://github.com/googleapis/java-spanner-jdbc/commit/84396e042d3ac2d7da35bb7900f25f59d36f83d9)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.5.6 ([#2217](https://github.com/googleapis/java-spanner-jdbc/issues/2217)) ([1805daa](https://github.com/googleapis/java-spanner-jdbc/commit/1805daa587fe5f589213b0ef23fe773137446f9a)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.6 ([#2219](https://github.com/googleapis/java-spanner-jdbc/issues/2219)) ([63525af](https://github.com/googleapis/java-spanner-jdbc/commit/63525af099a80b0ae89bd3336244a3aa4e8b71e0)) +* Update dependency org.springframework.data:spring-data-bom to v2025.0.4 ([#2204](https://github.com/googleapis/java-spanner-jdbc/issues/2204)) ([f6327c7](https://github.com/googleapis/java-spanner-jdbc/commit/f6327c79ce4d3dd30994b0c2419ae5b6b058d156)) + +## [2.32.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.32.2...v2.32.3) (2025-09-13) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.20.0 ([#2197](https://github.com/googleapis/java-spanner-jdbc/issues/2197)) ([70726b6](https://github.com/googleapis/java-spanner-jdbc/commit/70726b66b71fe1801dff6d43c321f9917f4d3e10)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.100.0 ([#2206](https://github.com/googleapis/java-spanner-jdbc/issues/2206)) ([ce63133](https://github.com/googleapis/java-spanner-jdbc/commit/ce63133c3d90873703096d7ffe62814646982d98)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.100.0 ([#2207](https://github.com/googleapis/java-spanner-jdbc/issues/2207)) ([8f3f00b](https://github.com/googleapis/java-spanner-jdbc/commit/8f3f00bceca948e8337e795d6801c27128b810c5)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.1 ([#2198](https://github.com/googleapis/java-spanner-jdbc/issues/2198)) ([6f6f80b](https://github.com/googleapis/java-spanner-jdbc/commit/6f6f80b369a2a4af23aea6ec163122692f03fa8f)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.28 ([#2203](https://github.com/googleapis/java-spanner-jdbc/issues/2203)) ([23545de](https://github.com/googleapis/java-spanner-jdbc/commit/23545deb10bc718040f56063a09ae65f906a828f)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.54.0 ([#2199](https://github.com/googleapis/java-spanner-jdbc/issues/2199)) ([b5e02dd](https://github.com/googleapis/java-spanner-jdbc/commit/b5e02dd7dd2a520b4a91a10b72dba777c5c6990d)) + +## [2.32.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.32.1...v2.32.2) (2025-08-28) + + +### Dependencies + +* Update actions/setup-java action to v5 ([#2185](https://github.com/googleapis/java-spanner-jdbc/issues/2185)) ([a0e1cec](https://github.com/googleapis/java-spanner-jdbc/commit/a0e1cec3789d12282b3a5d3b0e2353f3dfa0b68c)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.72.0 ([#2179](https://github.com/googleapis/java-spanner-jdbc/issues/2179)) ([3df3cf7](https://github.com/googleapis/java-spanner-jdbc/commit/3df3cf7c836b032968c78ed4ffefc4c0c6545494)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.73.0 ([#2189](https://github.com/googleapis/java-spanner-jdbc/issues/2189)) ([bc45d56](https://github.com/googleapis/java-spanner-jdbc/commit/bc45d567b462967a4cfbe8068027dbf14344c5a0)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.99.0 ([#2192](https://github.com/googleapis/java-spanner-jdbc/issues/2192)) ([db9732e](https://github.com/googleapis/java-spanner-jdbc/commit/db9732e11c5f83f3b2c53bb9c2b6b2a16d0579de)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.99.0 ([#2193](https://github.com/googleapis/java-spanner-jdbc/issues/2193)) ([54d482f](https://github.com/googleapis/java-spanner-jdbc/commit/54d482fc7723d63a0f1d98317c62fc798385b33a)) +* Update dependency com.google.cloud:google-cloud-trace to v2.72.0 ([#2180](https://github.com/googleapis/java-spanner-jdbc/issues/2180)) ([2ce3c39](https://github.com/googleapis/java-spanner-jdbc/commit/2ce3c39b4c5a2a42afca48de6ea065c6c6e85949)) +* Update dependency com.google.cloud:google-cloud-trace to v2.73.0 ([#2190](https://github.com/googleapis/java-spanner-jdbc/issues/2190)) ([a116a8f](https://github.com/googleapis/java-spanner-jdbc/commit/a116a8fac1773aeaf9543d92d6facbfd60a75461)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.0 ([#2184](https://github.com/googleapis/java-spanner-jdbc/issues/2184)) ([79ce6bc](https://github.com/googleapis/java-spanner-jdbc/commit/79ce6bcc3b4c46a46a09a5261019dc55ae2992b8)) +* Update dependency org.springframework.boot:spring-boot to v3.5.5 ([#2186](https://github.com/googleapis/java-spanner-jdbc/issues/2186)) ([75e71cc](https://github.com/googleapis/java-spanner-jdbc/commit/75e71ccd4f15016bf48d73f257f714cd676c19b4)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.5.5 ([#2187](https://github.com/googleapis/java-spanner-jdbc/issues/2187)) ([803f0dd](https://github.com/googleapis/java-spanner-jdbc/commit/803f0dd1c38abe2d6f78a1b3cf1676257e98ca9f)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.5 ([#2188](https://github.com/googleapis/java-spanner-jdbc/issues/2188)) ([fd383c7](https://github.com/googleapis/java-spanner-jdbc/commit/fd383c702b82b87e96cae6b77006ce6563c5a14b)) +* Update dependency org.springframework.data:spring-data-bom to v2025.0.3 ([#2181](https://github.com/googleapis/java-spanner-jdbc/issues/2181)) ([a1bab5a](https://github.com/googleapis/java-spanner-jdbc/commit/a1bab5a2d195c2974146c6d2e5756d4c265c4a21)) + +## [2.32.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.32.0...v2.32.1) (2025-08-12) + + +### Dependencies + +* Update actions/checkout action to v5 ([#2174](https://github.com/googleapis/java-spanner-jdbc/issues/2174)) ([46378ca](https://github.com/googleapis/java-spanner-jdbc/commit/46378caa33c6d78223d65cb0ea11e39784d75a9e)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.71.0 ([#2171](https://github.com/googleapis/java-spanner-jdbc/issues/2171)) ([6713d5c](https://github.com/googleapis/java-spanner-jdbc/commit/6713d5c2ef7af8fb68e3a1ada8b1f71f4137a209)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.98.1 ([#2175](https://github.com/googleapis/java-spanner-jdbc/issues/2175)) ([3a4d7ec](https://github.com/googleapis/java-spanner-jdbc/commit/3a4d7ecc907eb7c5da1bd19ce50d86cf677deaa8)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.98.1 ([#2176](https://github.com/googleapis/java-spanner-jdbc/issues/2176)) ([f44a15e](https://github.com/googleapis/java-spanner-jdbc/commit/f44a15e25b3c7b4163ae1075f6932e6e3c5cb5f1)) +* Update dependency com.google.cloud:google-cloud-trace to v2.71.0 ([#2172](https://github.com/googleapis/java-spanner-jdbc/issues/2172)) ([3159c4f](https://github.com/googleapis/java-spanner-jdbc/commit/3159c4f1c6c81db4e1095f0a52c7e7d51e51c6a2)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.51.0 ([#2168](https://github.com/googleapis/java-spanner-jdbc/issues/2168)) ([ecf1a07](https://github.com/googleapis/java-spanner-jdbc/commit/ecf1a0782defc37af9ccffe7488506e904950809)) +* Update dependency commons-cli:commons-cli to v1.10.0 ([#2167](https://github.com/googleapis/java-spanner-jdbc/issues/2167)) ([1ef709d](https://github.com/googleapis/java-spanner-jdbc/commit/1ef709d22e9131f2e84dc657cd8a2a56d682d29a)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.53.0 ([#2173](https://github.com/googleapis/java-spanner-jdbc/issues/2173)) ([a61de5c](https://github.com/googleapis/java-spanner-jdbc/commit/a61de5c62d90d16e0ad6207e24424aa57442e349)) + +## [2.32.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.31.3...v2.32.0) (2025-07-31) + + +### Features + +* A single multiplexed session is used for all operations ([#2162](https://github.com/googleapis/java-spanner-jdbc/issues/2162)) ([21438cc](https://github.com/googleapis/java-spanner-jdbc/commit/21438cc88b9b5f5cb196c2a0e1c233a7ecb07b62)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.70.0 ([#2157](https://github.com/googleapis/java-spanner-jdbc/issues/2157)) ([9679471](https://github.com/googleapis/java-spanner-jdbc/commit/9679471e9e97b1770fc41b39a0050b8f77542588)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.98.0 ([#2160](https://github.com/googleapis/java-spanner-jdbc/issues/2160)) ([5893055](https://github.com/googleapis/java-spanner-jdbc/commit/589305520aa5474f36954018a72b561a6e0d8189)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.98.0 ([#2161](https://github.com/googleapis/java-spanner-jdbc/issues/2161)) ([b5345b1](https://github.com/googleapis/java-spanner-jdbc/commit/b5345b18506c1437c3a23e4aa3b7eb54db519e74)) +* Update dependency com.google.cloud:google-cloud-trace to v2.70.0 ([#2158](https://github.com/googleapis/java-spanner-jdbc/issues/2158)) ([d0caeb9](https://github.com/googleapis/java-spanner-jdbc/commit/d0caeb9ddb586fc44405ec1c7c4790ad82cd20cb)) + +## [2.31.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.31.2...v2.31.3) (2025-07-28) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.19.2 ([#2148](https://github.com/googleapis/java-spanner-jdbc/issues/2148)) ([0ef3b4f](https://github.com/googleapis/java-spanner-jdbc/commit/0ef3b4f9c19a19731d90195a192d6d466c3eacb8)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.68.0 ([#2140](https://github.com/googleapis/java-spanner-jdbc/issues/2140)) ([62f92c0](https://github.com/googleapis/java-spanner-jdbc/commit/62f92c04bbecfd98000710843339743d2c838d7a)) +* Update dependency com.google.cloud:google-cloud-trace to v2.68.0 ([#2141](https://github.com/googleapis/java-spanner-jdbc/issues/2141)) ([1036353](https://github.com/googleapis/java-spanner-jdbc/commit/1036353c9388610325c5c83b7544ee5e9198bfa0)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.2 ([#2149](https://github.com/googleapis/java-spanner-jdbc/issues/2149)) ([81f9307](https://github.com/googleapis/java-spanner-jdbc/commit/81f930740c714d696779656d9ac658ebe93312ac)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.52.0 ([#2137](https://github.com/googleapis/java-spanner-jdbc/issues/2137)) ([f1978b5](https://github.com/googleapis/java-spanner-jdbc/commit/f1978b500e96e8d7212bb2a0ac8a354467dd6638)) +* Update dependency org.apache.commons:commons-lang3 to v3.18.0 [security] ([#2139](https://github.com/googleapis/java-spanner-jdbc/issues/2139)) ([a12e520](https://github.com/googleapis/java-spanner-jdbc/commit/a12e520092c8613d90998a3151c850bc892ba90b)) +* Update dependency org.mybatis.spring.boot:mybatis-spring-boot-starter to v3.0.5 ([#2136](https://github.com/googleapis/java-spanner-jdbc/issues/2136)) ([6fcfa9d](https://github.com/googleapis/java-spanner-jdbc/commit/6fcfa9d749e35d5ee4b67bed6f8850202cee3b5a)) +* Update dependency org.springframework.boot:spring-boot to v3.5.4 ([#2151](https://github.com/googleapis/java-spanner-jdbc/issues/2151)) ([d121993](https://github.com/googleapis/java-spanner-jdbc/commit/d1219934e7cc6c12cef0a76b2ebffa8457c90614)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.5.4 ([#2152](https://github.com/googleapis/java-spanner-jdbc/issues/2152)) ([1f3b330](https://github.com/googleapis/java-spanner-jdbc/commit/1f3b33017724a2c9c227318dee422208b077d20e)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.4 ([#2153](https://github.com/googleapis/java-spanner-jdbc/issues/2153)) ([294fb97](https://github.com/googleapis/java-spanner-jdbc/commit/294fb97de0a583af25dbfccca2eff18a8e10bcfa)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.8 ([#2147](https://github.com/googleapis/java-spanner-jdbc/issues/2147)) ([925600d](https://github.com/googleapis/java-spanner-jdbc/commit/925600d1c15cfd5c950c08ef4a56492809801008)) +* Update dependency org.springframework.data:spring-data-bom to v2025.0.2 ([#2061](https://github.com/googleapis/java-spanner-jdbc/issues/2061)) ([81382ae](https://github.com/googleapis/java-spanner-jdbc/commit/81382aeb0e50f7655c52524a41e8325f638b0f48)) + +## [2.31.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.31.1...v2.31.2) (2025-07-16) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner to v6.97.1 ([#2134](https://github.com/googleapis/java-spanner-jdbc/issues/2134)) ([90f11b5](https://github.com/googleapis/java-spanner-jdbc/commit/90f11b5f5fbfbcd418e114dfc66d3accbc1c08eb)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.97.1 ([#2135](https://github.com/googleapis/java-spanner-jdbc/issues/2135)) ([a47dbed](https://github.com/googleapis/java-spanner-jdbc/commit/a47dbedc89a9c7125f90c6c44401214fc4a9794f)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.1 ([#2131](https://github.com/googleapis/java-spanner-jdbc/issues/2131)) ([9e059c4](https://github.com/googleapis/java-spanner-jdbc/commit/9e059c404e821afff6e8ab05454308497edc7abc)) + +## [2.31.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.31.0...v2.31.1) (2025-07-01) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner to v6.96.0 ([#2117](https://github.com/googleapis/java-spanner-jdbc/issues/2117)) ([e8eb64a](https://github.com/googleapis/java-spanner-jdbc/commit/e8eb64a668c7d95076a9940fe8fa38c293594598)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.96.1 ([#2124](https://github.com/googleapis/java-spanner-jdbc/issues/2124)) ([3d5be5e](https://github.com/googleapis/java-spanner-jdbc/commit/3d5be5ecf17f8618c30c815319b90fd3c53202cc)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.96.0 ([#2118](https://github.com/googleapis/java-spanner-jdbc/issues/2118)) ([f1ca551](https://github.com/googleapis/java-spanner-jdbc/commit/f1ca55144efabb37f872909fa97a6f33a1c3c0e9)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.96.1 ([#2125](https://github.com/googleapis/java-spanner-jdbc/issues/2125)) ([6b83bcd](https://github.com/googleapis/java-spanner-jdbc/commit/6b83bcdc090017d531839fa6da555a6e4c8d428a)) +* Update dependency org.testcontainers:testcontainers to v1.21.3 ([#2122](https://github.com/googleapis/java-spanner-jdbc/issues/2122)) ([241b19d](https://github.com/googleapis/java-spanner-jdbc/commit/241b19d89f221abe7eeee29bc1e605f1edb1a69f)) +* Update dependency org.testcontainers:testcontainers-bom to v1.21.3 ([#2123](https://github.com/googleapis/java-spanner-jdbc/issues/2123)) ([92dd5ea](https://github.com/googleapis/java-spanner-jdbc/commit/92dd5eaad584c680b62ca4f1f297f78188101ab5)) + +## [2.31.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.30.5...v2.31.0) (2025-06-27) + + +### Features + +* Allow JDBC connection to configure direct access ([#2116](https://github.com/googleapis/java-spanner-jdbc/issues/2116)) ([d72242a](https://github.com/googleapis/java-spanner-jdbc/commit/d72242a9d73056394af9469ada7e6439d3678a59)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.66.0 ([#2105](https://github.com/googleapis/java-spanner-jdbc/issues/2105)) ([619bea7](https://github.com/googleapis/java-spanner-jdbc/commit/619bea764083e7b253de3b6e3a944235dab42109)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.67.0 ([#2114](https://github.com/googleapis/java-spanner-jdbc/issues/2114)) ([60d5aa7](https://github.com/googleapis/java-spanner-jdbc/commit/60d5aa7df330008085bce7fc1a70b8e8abb9f05d)) +* Update dependency com.google.cloud:google-cloud-trace to v2.66.0 ([#2106](https://github.com/googleapis/java-spanner-jdbc/issues/2106)) ([161683a](https://github.com/googleapis/java-spanner-jdbc/commit/161683aefe0c93446524c3733e9d3b8aa2e76f94)) +* Update dependency com.google.cloud:google-cloud-trace to v2.67.0 ([#2112](https://github.com/googleapis/java-spanner-jdbc/issues/2112)) ([70e21a6](https://github.com/googleapis/java-spanner-jdbc/commit/70e21a68677ac4adac436e1f4e67a1201c85d427)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.50.0 ([#2108](https://github.com/googleapis/java-spanner-jdbc/issues/2108)) ([816c2b6](https://github.com/googleapis/java-spanner-jdbc/commit/816c2b67098b622013e2c7fcfea0bc8855d427eb)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.36.0 ([#2109](https://github.com/googleapis/java-spanner-jdbc/issues/2109)) ([7a3fa70](https://github.com/googleapis/java-spanner-jdbc/commit/7a3fa7018dda8c4bda5a1fce2fb42c632058d66d)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.36.0 ([#2110](https://github.com/googleapis/java-spanner-jdbc/issues/2110)) ([89ea86b](https://github.com/googleapis/java-spanner-jdbc/commit/89ea86b2608938f062ce7ac4fa61e591811f65e4)) + +## [2.30.5](https://github.com/googleapis/java-spanner-jdbc/compare/v2.30.4...v2.30.5) (2025-06-20) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.19.1 ([#2095](https://github.com/googleapis/java-spanner-jdbc/issues/2095)) ([4d662fe](https://github.com/googleapis/java-spanner-jdbc/commit/4d662fe71fc0d03fe0d648e202a508ff18f1f97b)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.49.2 ([#2096](https://github.com/googleapis/java-spanner-jdbc/issues/2096)) ([53871df](https://github.com/googleapis/java-spanner-jdbc/commit/53871df28b4a3a98d3193f1fb66418e7106e891b)) +* Update dependency org.postgresql:postgresql to v42.7.7 [security] ([#2093](https://github.com/googleapis/java-spanner-jdbc/issues/2093)) ([a7209c4](https://github.com/googleapis/java-spanner-jdbc/commit/a7209c496ef2895c469f19447e4cc90d4e4066bf)) +* Update dependency org.springframework.boot:spring-boot to v3.5.3 ([#2099](https://github.com/googleapis/java-spanner-jdbc/issues/2099)) ([fa1dde8](https://github.com/googleapis/java-spanner-jdbc/commit/fa1dde8e806df5fcd1477ce1388507a542a73ca0)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.5.3 ([#2101](https://github.com/googleapis/java-spanner-jdbc/issues/2101)) ([2381398](https://github.com/googleapis/java-spanner-jdbc/commit/2381398c3c89bf4a2edf3d3520c62e158487ae8a)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.3 ([#2102](https://github.com/googleapis/java-spanner-jdbc/issues/2102)) ([7d865e9](https://github.com/googleapis/java-spanner-jdbc/commit/7d865e917b2610f2cae6819698c1c03505e340d2)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.7 ([#2094](https://github.com/googleapis/java-spanner-jdbc/issues/2094)) ([e273887](https://github.com/googleapis/java-spanner-jdbc/commit/e273887d61da95930e32494d5abc9aab0906f237)) +* Update dependency org.testcontainers:testcontainers to v1.21.2 ([#2097](https://github.com/googleapis/java-spanner-jdbc/issues/2097)) ([ce9298e](https://github.com/googleapis/java-spanner-jdbc/commit/ce9298ec4583649d8187206345a2e3fe245e4d32)) +* Update dependency org.testcontainers:testcontainers-bom to v1.21.2 ([#2098](https://github.com/googleapis/java-spanner-jdbc/issues/2098)) ([cd9f8a0](https://github.com/googleapis/java-spanner-jdbc/commit/cd9f8a01db5b0364bf7188a9b7f2079fb37d494c)) + +## [2.30.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.30.3...v2.30.4) (2025-06-10) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner to v6.95.1 ([#2085](https://github.com/googleapis/java-spanner-jdbc/issues/2085)) ([e255f89](https://github.com/googleapis/java-spanner-jdbc/commit/e255f897007e3651ca243efd0beae0926ab8d1b7)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.95.1 ([#2086](https://github.com/googleapis/java-spanner-jdbc/issues/2086)) ([fd03d31](https://github.com/googleapis/java-spanner-jdbc/commit/fd03d312891a06ed7465e596802f7141b2966c3b)) + +## [2.30.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.30.2...v2.30.3) (2025-06-09) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.64.0 ([#2078](https://github.com/googleapis/java-spanner-jdbc/issues/2078)) ([afe3977](https://github.com/googleapis/java-spanner-jdbc/commit/afe39772a4945254c1850d673ee14dc826eb4bc1)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.95.0 ([#2079](https://github.com/googleapis/java-spanner-jdbc/issues/2079)) ([ce8db80](https://github.com/googleapis/java-spanner-jdbc/commit/ce8db80fd7b352c565ed1a3031795e3f23d90f04)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.95.0 ([#2080](https://github.com/googleapis/java-spanner-jdbc/issues/2080)) ([1802eec](https://github.com/googleapis/java-spanner-jdbc/commit/1802eecc6b9c3d81d05caf40673b1104c019b550)) +* Update dependency com.google.cloud:google-cloud-trace to v2.64.0 ([#2081](https://github.com/googleapis/java-spanner-jdbc/issues/2081)) ([8051cc4](https://github.com/googleapis/java-spanner-jdbc/commit/8051cc41fa55abd6b9b26218bbb9900b63b21390)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.49.0 ([#2082](https://github.com/googleapis/java-spanner-jdbc/issues/2082)) ([ffdf940](https://github.com/googleapis/java-spanner-jdbc/commit/ffdf940503a5e13367d1874875eaa40570af96c4)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.35.0 ([#2072](https://github.com/googleapis/java-spanner-jdbc/issues/2072)) ([818e569](https://github.com/googleapis/java-spanner-jdbc/commit/818e5693162e25298fd3ee2580063c5c458ad638)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.35.0 ([#2073](https://github.com/googleapis/java-spanner-jdbc/issues/2073)) ([7c352f4](https://github.com/googleapis/java-spanner-jdbc/commit/7c352f403873116bd58da968c1c2613b007a43fa)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.51.0 ([#2083](https://github.com/googleapis/java-spanner-jdbc/issues/2083)) ([34a2b0a](https://github.com/googleapis/java-spanner-jdbc/commit/34a2b0a066cb74bda697dc83c061810d12f4c300)) +* Update dependency org.postgresql:postgresql to v42.7.6 ([#2074](https://github.com/googleapis/java-spanner-jdbc/issues/2074)) ([7faf24d](https://github.com/googleapis/java-spanner-jdbc/commit/7faf24dee74706af47fb61a3fb0c82b4ed91ce73)) +* Update dependency org.springframework.boot:spring-boot to v3.5.0 ([#2062](https://github.com/googleapis/java-spanner-jdbc/issues/2062)) ([d82de6f](https://github.com/googleapis/java-spanner-jdbc/commit/d82de6f6e2d662877d2985e8fe6f49f27126e094)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.5.0 ([#2063](https://github.com/googleapis/java-spanner-jdbc/issues/2063)) ([dcae562](https://github.com/googleapis/java-spanner-jdbc/commit/dcae5626e4e0721045864f29abf97571b8c1fa6c)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.5.0 ([#2067](https://github.com/googleapis/java-spanner-jdbc/issues/2067)) ([668f7f6](https://github.com/googleapis/java-spanner-jdbc/commit/668f7f6d42ff252f46a19452ffdf922eb4f163c0)) +* Update dependency org.testcontainers:testcontainers to v1.21.1 ([#2075](https://github.com/googleapis/java-spanner-jdbc/issues/2075)) ([82520a9](https://github.com/googleapis/java-spanner-jdbc/commit/82520a92ce3b798ead16d356befc46d2a6abf288)) +* Update dependency org.testcontainers:testcontainers-bom to v1.21.1 ([#2076](https://github.com/googleapis/java-spanner-jdbc/issues/2076)) ([ace1375](https://github.com/googleapis/java-spanner-jdbc/commit/ace13756d0280f2380e10c8bfc75636ac93c66c1)) + +## [2.30.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.30.1...v2.30.2) (2025-05-22) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.63.0 ([#2058](https://github.com/googleapis/java-spanner-jdbc/issues/2058)) ([da82fea](https://github.com/googleapis/java-spanner-jdbc/commit/da82feacf3de8c7d441cf8686c6f7385f5274bc9)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.94.0 ([#2064](https://github.com/googleapis/java-spanner-jdbc/issues/2064)) ([1b789e5](https://github.com/googleapis/java-spanner-jdbc/commit/1b789e50a4a3b06782923479cc1024ee7c2e102b)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.94.0 ([#2065](https://github.com/googleapis/java-spanner-jdbc/issues/2065)) ([1154c13](https://github.com/googleapis/java-spanner-jdbc/commit/1154c13ae1ca44a9a52cafc2d24628f53a9d121e)) +* Update dependency com.google.cloud:google-cloud-trace to v2.63.0 ([#2059](https://github.com/googleapis/java-spanner-jdbc/issues/2059)) ([86a49bb](https://github.com/googleapis/java-spanner-jdbc/commit/86a49bbf29d0752aa2fdcf5fe73587f72fc6630e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.48.0 ([#2057](https://github.com/googleapis/java-spanner-jdbc/issues/2057)) ([c6f47d4](https://github.com/googleapis/java-spanner-jdbc/commit/c6f47d4753e73ef58e21d5d13f4d380b67a80a55)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.50.0 ([#2051](https://github.com/googleapis/java-spanner-jdbc/issues/2051)) ([1d2a3b7](https://github.com/googleapis/java-spanner-jdbc/commit/1d2a3b728f5a56b948412bea493e2b5b70a89ab2)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.6 ([#2060](https://github.com/googleapis/java-spanner-jdbc/issues/2060)) ([91c30e4](https://github.com/googleapis/java-spanner-jdbc/commit/91c30e425fdb8a1a4a9d55fdc5df254ff17aa201)) + +## [2.30.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.30.0...v2.30.1) (2025-05-09) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.62.0 ([#2044](https://github.com/googleapis/java-spanner-jdbc/issues/2044)) ([92b082a](https://github.com/googleapis/java-spanner-jdbc/commit/92b082a11f14b253f2e3f369be711cd89f5279db)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.93.0 ([#2046](https://github.com/googleapis/java-spanner-jdbc/issues/2046)) ([7badfc7](https://github.com/googleapis/java-spanner-jdbc/commit/7badfc73a69119618749e7d61a2cfd2e2924039d)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.93.0 ([#2047](https://github.com/googleapis/java-spanner-jdbc/issues/2047)) ([0bb369c](https://github.com/googleapis/java-spanner-jdbc/commit/0bb369cd09d1132a5111d2686c32becefe166405)) +* Update dependency com.google.cloud:google-cloud-trace to v2.62.0 ([#2045](https://github.com/googleapis/java-spanner-jdbc/issues/2045)) ([7f43114](https://github.com/googleapis/java-spanner-jdbc/commit/7f43114856fa7ea7bef319c74620c55201cc9833)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.46.3 ([#2039](https://github.com/googleapis/java-spanner-jdbc/issues/2039)) ([6da7077](https://github.com/googleapis/java-spanner-jdbc/commit/6da70771a552ae1bc218691a3badb13fbb846107)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.47.0 ([#2043](https://github.com/googleapis/java-spanner-jdbc/issues/2043)) ([7208cbc](https://github.com/googleapis/java-spanner-jdbc/commit/7208cbc9f661db2d5908bcc3c824539bbd14cc6a)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.27 ([#2042](https://github.com/googleapis/java-spanner-jdbc/issues/2042)) ([e612954](https://github.com/googleapis/java-spanner-jdbc/commit/e61295402b9c5dbf5d64217450a2f567db2cff55)) + +## [2.30.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.29.1...v2.30.0) (2025-05-01) + + +### Features + +* Add UUID data type ([#2036](https://github.com/googleapis/java-spanner-jdbc/issues/2036)) ([0df29fc](https://github.com/googleapis/java-spanner-jdbc/commit/0df29fc8e88d4ad285fe22125b75b860ad21c371)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.61.0 ([#2034](https://github.com/googleapis/java-spanner-jdbc/issues/2034)) ([d5b88db](https://github.com/googleapis/java-spanner-jdbc/commit/d5b88dbc66c5e4d33493dba6308be43f1601a5af)) +* Update dependency com.google.cloud:google-cloud-spanner to v6.92.0 ([#2031](https://github.com/googleapis/java-spanner-jdbc/issues/2031)) ([4fe13a3](https://github.com/googleapis/java-spanner-jdbc/commit/4fe13a31e6fdcd65860047b9f29f370039c54db8)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.92.0 ([#2032](https://github.com/googleapis/java-spanner-jdbc/issues/2032)) ([d313e3e](https://github.com/googleapis/java-spanner-jdbc/commit/d313e3e60d23e1adeff50da496dd5207bc78fb84)) +* Update dependency com.google.cloud:google-cloud-trace to v2.61.0 ([#2035](https://github.com/googleapis/java-spanner-jdbc/issues/2035)) ([9b2712e](https://github.com/googleapis/java-spanner-jdbc/commit/9b2712e2b9950a8d1cb49c3a91b178726821d3b3)) + + +### Documentation + +* Add sample for MyBatis with GoogleSQL ([#2026](https://github.com/googleapis/java-spanner-jdbc/issues/2026)) ([5d7b9de](https://github.com/googleapis/java-spanner-jdbc/commit/5d7b9de545015655a095f4336c301148b4a9640d)) +* Add sample for Spring Data JDBC with GoogleSQL ([#2027](https://github.com/googleapis/java-spanner-jdbc/issues/2027)) ([3476106](https://github.com/googleapis/java-spanner-jdbc/commit/347610686406c7e4129e0ed101924926f4dca2b5)) + +## [2.29.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.29.0...v2.29.1) (2025-04-25) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.19.0 ([#2020](https://github.com/googleapis/java-spanner-jdbc/issues/2020)) ([0d4a663](https://github.com/googleapis/java-spanner-jdbc/commit/0d4a66366ca0e1d6849c8886a5875581473e1ffe)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.46.2 ([#2014](https://github.com/googleapis/java-spanner-jdbc/issues/2014)) ([4775d30](https://github.com/googleapis/java-spanner-jdbc/commit/4775d30b9e1448a4381545f84dae36b8dae4ec0e)) +* Update dependency org.springframework.boot:spring-boot to v3.4.5 ([#2017](https://github.com/googleapis/java-spanner-jdbc/issues/2017)) ([33e6f08](https://github.com/googleapis/java-spanner-jdbc/commit/33e6f08274612a1369aad799539f85fd43ceb93b)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.4.5 ([#2018](https://github.com/googleapis/java-spanner-jdbc/issues/2018)) ([9592099](https://github.com/googleapis/java-spanner-jdbc/commit/9592099ed5c7c1696186a34690cae3bf5a724e4f)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.4.5 ([#2019](https://github.com/googleapis/java-spanner-jdbc/issues/2019)) ([c6cf077](https://github.com/googleapis/java-spanner-jdbc/commit/c6cf077fc6bae591deed6d75ef97e5524fcffcbc)) +* Update dependency org.testcontainers:testcontainers to v1.21.0 ([#2015](https://github.com/googleapis/java-spanner-jdbc/issues/2015)) ([22c59bc](https://github.com/googleapis/java-spanner-jdbc/commit/22c59bc09f8b9b863e2e86d976e514922152c6bc)) +* Update dependency org.testcontainers:testcontainers-bom to v1.21.0 ([#2016](https://github.com/googleapis/java-spanner-jdbc/issues/2016)) ([5eab2e7](https://github.com/googleapis/java-spanner-jdbc/commit/5eab2e7b2628adcee9405da425c1ad5d3c89ef3e)) + +## [2.29.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.28.0...v2.29.0) (2025-04-22) + + +### Features + +* Support isolation level repeatable read ([#1973](https://github.com/googleapis/java-spanner-jdbc/issues/1973)) ([010a502](https://github.com/googleapis/java-spanner-jdbc/commit/010a502310db88f8c676960c641a7e9a6581bf11)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.90.0 ([#1970](https://github.com/googleapis/java-spanner-jdbc/issues/1970)) ([7153c39](https://github.com/googleapis/java-spanner-jdbc/commit/7153c39464fd9b83609bb7cc2ada5c0f9aab3af8)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.91.1 ([#1993](https://github.com/googleapis/java-spanner-jdbc/issues/1993)) ([97d4840](https://github.com/googleapis/java-spanner-jdbc/commit/97d4840bd3e7cee7f1ddb9b26f3946f1b10ed894)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.46.0 ([#1992](https://github.com/googleapis/java-spanner-jdbc/issues/1992)) ([ea0b557](https://github.com/googleapis/java-spanner-jdbc/commit/ea0b55729d1bf9e8b5994758314841591ea0c883)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.49.0 ([#1982](https://github.com/googleapis/java-spanner-jdbc/issues/1982)) ([6b7ddf4](https://github.com/googleapis/java-spanner-jdbc/commit/6b7ddf4352d4db87eab6d9f3fd4945e5650e7798)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.5 ([#1995](https://github.com/googleapis/java-spanner-jdbc/issues/1995)) ([6bedf12](https://github.com/googleapis/java-spanner-jdbc/commit/6bedf124b9da233e6f4001dff2c1b56f0249aaad)) + +## [2.28.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.27.1...v2.28.0) (2025-03-21) + + +### Features + +* Return is_autoincrement for getColumns ([#1944](https://github.com/googleapis/java-spanner-jdbc/issues/1944)) ([b9fcae9](https://github.com/googleapis/java-spanner-jdbc/commit/b9fcae942b1f52f82bbd5ae4357e1d4e84bee5a4)) + + +### Performance Improvements + +* Get database dialect using multiplexed session ([c5f3297](https://github.com/googleapis/java-spanner-jdbc/commit/c5f32979a682643e135a40dfd7681de841bce0de)) +* Skip gRPC trailers for successful queries ([c5f3297](https://github.com/googleapis/java-spanner-jdbc/commit/c5f32979a682643e135a40dfd7681de841bce0de)) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.18.3 ([#1943](https://github.com/googleapis/java-spanner-jdbc/issues/1943)) ([dc9af80](https://github.com/googleapis/java-spanner-jdbc/commit/dc9af80b20bbb798a7e6f4c6de577ab8249c426d)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.60.0 ([#1958](https://github.com/googleapis/java-spanner-jdbc/issues/1958)) ([37fca7b](https://github.com/googleapis/java-spanner-jdbc/commit/37fca7b47ca8480afc64f45db3e364218686674e)) +* Update dependency com.google.cloud:google-cloud-trace to v2.60.0 ([#1959](https://github.com/googleapis/java-spanner-jdbc/issues/1959)) ([f154740](https://github.com/googleapis/java-spanner-jdbc/commit/f15474061f8c4780fe2a8457882761598f1ecc8c)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.45.0 ([#1951](https://github.com/googleapis/java-spanner-jdbc/issues/1951)) ([00f163b](https://github.com/googleapis/java-spanner-jdbc/commit/00f163bdc016e5411d0f64af27fe353c2be75065)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.45.1 ([#1952](https://github.com/googleapis/java-spanner-jdbc/issues/1952)) ([c7c7363](https://github.com/googleapis/java-spanner-jdbc/commit/c7c7363736f105d9d0c2593d3a354d7789c1a801)) +* Update dependency com.google.cloud.opentelemetry:exporter-metrics to v0.34.0 ([#1955](https://github.com/googleapis/java-spanner-jdbc/issues/1955)) ([b52cadd](https://github.com/googleapis/java-spanner-jdbc/commit/b52caddd2a08ead671db0f64dea48957bdb4ac1c)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.34.0 ([#1956](https://github.com/googleapis/java-spanner-jdbc/issues/1956)) ([2158ff2](https://github.com/googleapis/java-spanner-jdbc/commit/2158ff241c4adf27229fa409780282be4106dd5f)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.48.0 ([#1950](https://github.com/googleapis/java-spanner-jdbc/issues/1950)) ([99b70ce](https://github.com/googleapis/java-spanner-jdbc/commit/99b70ce1bac31dee520cb1ca3743c64c296fb901)) +* Update dependency org.springframework.boot:spring-boot to v3.4.4 ([#1962](https://github.com/googleapis/java-spanner-jdbc/issues/1962)) ([86b05e2](https://github.com/googleapis/java-spanner-jdbc/commit/86b05e2dba04603198f595e91fdc2f5b66f4eb80)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.4.4 ([#1963](https://github.com/googleapis/java-spanner-jdbc/issues/1963)) ([53d3e8b](https://github.com/googleapis/java-spanner-jdbc/commit/53d3e8b120806f9603f10dec47bb6745bfed493b)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.4.4 ([#1964](https://github.com/googleapis/java-spanner-jdbc/issues/1964)) ([bf5402b](https://github.com/googleapis/java-spanner-jdbc/commit/bf5402b0e85d3a0c854401a1121bc7b82c5d06a1)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.4 ([#1953](https://github.com/googleapis/java-spanner-jdbc/issues/1953)) ([fa65ead](https://github.com/googleapis/java-spanner-jdbc/commit/fa65ead20018c083749bb1bd1cc1993f48745ef9)) +* Update dependency org.testcontainers:testcontainers to v1.20.6 ([#1946](https://github.com/googleapis/java-spanner-jdbc/issues/1946)) ([2b3e1d8](https://github.com/googleapis/java-spanner-jdbc/commit/2b3e1d839d6d8b903c5b45f84a4c6db651f732db)) +* Update dependency org.testcontainers:testcontainers-bom to v1.20.6 ([#1947](https://github.com/googleapis/java-spanner-jdbc/issues/1947)) ([0b9fb38](https://github.com/googleapis/java-spanner-jdbc/commit/0b9fb3833fb79cf211bcb1de307194ff39fe18c8)) + +## [2.27.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.27.0...v2.27.1) (2025-02-28) + + +### Bug Fixes + +* Include COLUMN_DEFAULT in the returned metadata ([#1937](https://github.com/googleapis/java-spanner-jdbc/issues/1937)) ([98eb542](https://github.com/googleapis/java-spanner-jdbc/commit/98eb542201330188570ef995e38fa2a690e1160f)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.59.0 ([#1938](https://github.com/googleapis/java-spanner-jdbc/issues/1938)) ([5458023](https://github.com/googleapis/java-spanner-jdbc/commit/54580230f61584f0691730693497171ff9bfc734)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.88.0 ([#1939](https://github.com/googleapis/java-spanner-jdbc/issues/1939)) ([3c23e90](https://github.com/googleapis/java-spanner-jdbc/commit/3c23e90b7a7dee52169db8041523f38a7ceeb6ad)) +* Update dependency com.google.cloud:google-cloud-trace to v2.59.0 ([#1940](https://github.com/googleapis/java-spanner-jdbc/issues/1940)) ([b31dd6e](https://github.com/googleapis/java-spanner-jdbc/commit/b31dd6e78b5f9b529b7cc7a9a34ea176231e22dc)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.44.0 ([#1931](https://github.com/googleapis/java-spanner-jdbc/issues/1931)) ([568a464](https://github.com/googleapis/java-spanner-jdbc/commit/568a464f29055383b7930deb42505bedad506339)) + + +### Documentation + +* Add defaultSequenceKind connection property documentation ([#1935](https://github.com/googleapis/java-spanner-jdbc/issues/1935)) ([c30b09a](https://github.com/googleapis/java-spanner-jdbc/commit/c30b09ab554d57adccaee72c36969407bbb1d4dd)) + +## [2.27.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.26.1...v2.27.0) (2025-02-21) + + +### Features + +* Add methods for unwrapping Spanner client ([#1914](https://github.com/googleapis/java-spanner-jdbc/issues/1914)) ([ee6082f](https://github.com/googleapis/java-spanner-jdbc/commit/ee6082fc455860a0f768a1798f98454bbd4cfeb0)) +* Support FOR UPDATE clauses ([#1919](https://github.com/googleapis/java-spanner-jdbc/issues/1919)) ([d296aa9](https://github.com/googleapis/java-spanner-jdbc/commit/d296aa909be23be56faf69e348263cc195708585)) + + +### Performance Improvements + +* Use last_statement optimization in autocommit ([#1911](https://github.com/googleapis/java-spanner-jdbc/issues/1911)) ([ce0c422](https://github.com/googleapis/java-spanner-jdbc/commit/ce0c42269486ddd1f4296f38dc9843e4c1ee87ef)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.58.0 ([#1909](https://github.com/googleapis/java-spanner-jdbc/issues/1909)) ([deefebe](https://github.com/googleapis/java-spanner-jdbc/commit/deefebec85a202ed516b8acc9005691e47f91486)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.87.0 ([#1925](https://github.com/googleapis/java-spanner-jdbc/issues/1925)) ([121ad50](https://github.com/googleapis/java-spanner-jdbc/commit/121ad504f9dec432405fce40bbb452400d17d0bb)) +* Update dependency com.google.cloud:google-cloud-trace to v2.58.0 ([#1910](https://github.com/googleapis/java-spanner-jdbc/issues/1910)) ([9b621df](https://github.com/googleapis/java-spanner-jdbc/commit/9b621df8076775a8b72feb953c878b919ef2a6e5)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.43.0 ([#1908](https://github.com/googleapis/java-spanner-jdbc/issues/1908)) ([c8a2217](https://github.com/googleapis/java-spanner-jdbc/commit/c8a2217c5692219d66e84971741121e725af4ceb)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.47.0 ([#1907](https://github.com/googleapis/java-spanner-jdbc/issues/1907)) ([bfc05e5](https://github.com/googleapis/java-spanner-jdbc/commit/bfc05e52540fc9eeb7523322e567637499f7bf41)) +* Update dependency org.springframework.boot:spring-boot to v3.4.3 ([#1922](https://github.com/googleapis/java-spanner-jdbc/issues/1922)) ([3e13089](https://github.com/googleapis/java-spanner-jdbc/commit/3e13089606da51452c08782e5526e7685c6b6852)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.4.3 ([#1923](https://github.com/googleapis/java-spanner-jdbc/issues/1923)) ([040b467](https://github.com/googleapis/java-spanner-jdbc/commit/040b46783debc03c8d157decde8a80c4707a6951)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.4.3 ([#1924](https://github.com/googleapis/java-spanner-jdbc/issues/1924)) ([3cdaad9](https://github.com/googleapis/java-spanner-jdbc/commit/3cdaad949e5dff2916ce69a9adae18806ec8fb26)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.3 ([#1913](https://github.com/googleapis/java-spanner-jdbc/issues/1913)) ([4744c11](https://github.com/googleapis/java-spanner-jdbc/commit/4744c1101faef14d0ce3984f3aafc6f002a38359)) +* Update dependency org.testcontainers:testcontainers to v1.20.5 ([#1926](https://github.com/googleapis/java-spanner-jdbc/issues/1926)) ([6977dfc](https://github.com/googleapis/java-spanner-jdbc/commit/6977dfcd99407c770d493fe7216cebc332ac9907)) +* Update dependency org.testcontainers:testcontainers-bom to v1.20.5 ([#1921](https://github.com/googleapis/java-spanner-jdbc/issues/1921)) ([f91415a](https://github.com/googleapis/java-spanner-jdbc/commit/f91415a75c7c97acb25188de2d68effca534ba0d)) + + +### Documentation + +* Add sample for PROTO columns ([#1918](https://github.com/googleapis/java-spanner-jdbc/issues/1918)) ([60ee659](https://github.com/googleapis/java-spanner-jdbc/commit/60ee659b62a1a940d6a1cc49138da2029e62a01e)), closes [#1916](https://github.com/googleapis/java-spanner-jdbc/issues/1916) +* Regenerate Connection properties documentation ([#1915](https://github.com/googleapis/java-spanner-jdbc/issues/1915)) ([dfddf08](https://github.com/googleapis/java-spanner-jdbc/commit/dfddf08b9af9dae358ddc6a875101f2e35fbef13)) + +## [2.26.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.26.0...v2.26.1) (2025-02-03) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.57.0 ([#1901](https://github.com/googleapis/java-spanner-jdbc/issues/1901)) ([9170f97](https://github.com/googleapis/java-spanner-jdbc/commit/9170f97171da09c730742bb8198c6893cca72205)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.86.0 ([#1903](https://github.com/googleapis/java-spanner-jdbc/issues/1903)) ([7eb727d](https://github.com/googleapis/java-spanner-jdbc/commit/7eb727d2b656d2488f2d7246793b6e09925940cd)) +* Update dependency com.google.cloud:google-cloud-trace to v2.57.0 ([#1902](https://github.com/googleapis/java-spanner-jdbc/issues/1902)) ([f7efb1b](https://github.com/googleapis/java-spanner-jdbc/commit/f7efb1b578fd0b04743090be39b7a7351dc9ba05)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.42.0 ([#1900](https://github.com/googleapis/java-spanner-jdbc/issues/1900)) ([be47dd6](https://github.com/googleapis/java-spanner-jdbc/commit/be47dd622954e4112486b2760a2e3c534a2e69b8)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.46.0 ([#1892](https://github.com/googleapis/java-spanner-jdbc/issues/1892)) ([c700c9c](https://github.com/googleapis/java-spanner-jdbc/commit/c700c9c9a4f6844360f6257a5e57aceff03cfa00)) +* Update dependency org.postgresql:postgresql to v42.7.5 ([#1894](https://github.com/googleapis/java-spanner-jdbc/issues/1894)) ([b965f02](https://github.com/googleapis/java-spanner-jdbc/commit/b965f02d6992f4f1b38d1670d389f2bc91e21ee3)) +* Update dependency org.springframework.boot:spring-boot to v3.4.2 ([#1897](https://github.com/googleapis/java-spanner-jdbc/issues/1897)) ([b567c2c](https://github.com/googleapis/java-spanner-jdbc/commit/b567c2ca8301fe97be52cddf9d223f6a5c0e9925)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.4.2 ([#1898](https://github.com/googleapis/java-spanner-jdbc/issues/1898)) ([14d2368](https://github.com/googleapis/java-spanner-jdbc/commit/14d236857a7c7ab5e3a5172c31405ba892bd39de)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.4.2 ([#1899](https://github.com/googleapis/java-spanner-jdbc/issues/1899)) ([0381a31](https://github.com/googleapis/java-spanner-jdbc/commit/0381a318da286e016291a299f30765aa4d0cc9f6)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.2 ([#1896](https://github.com/googleapis/java-spanner-jdbc/issues/1896)) ([d819620](https://github.com/googleapis/java-spanner-jdbc/commit/d8196205c34725ac48db346982ec73013a1da57e)) + +## [2.26.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.25.3...v2.26.0) (2025-01-11) + + +### Features + +* Support for spanner external host ([#1884](https://github.com/googleapis/java-spanner-jdbc/issues/1884)) ([123a7bc](https://github.com/googleapis/java-spanner-jdbc/commit/123a7bcd9825b20bfbf7e9b057c2bd0f5213cac3)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.56.0 ([#1887](https://github.com/googleapis/java-spanner-jdbc/issues/1887)) ([2016d9c](https://github.com/googleapis/java-spanner-jdbc/commit/2016d9c47a5c6d12ba101827fe933c3ddd334e78)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.85.0 ([#1889](https://github.com/googleapis/java-spanner-jdbc/issues/1889)) ([3424d38](https://github.com/googleapis/java-spanner-jdbc/commit/3424d382a89b33d0870b00f04fd10dafa62bcc3d)) +* Update dependency com.google.cloud:google-cloud-trace to v2.56.0 ([#1888](https://github.com/googleapis/java-spanner-jdbc/issues/1888)) ([ce78b18](https://github.com/googleapis/java-spanner-jdbc/commit/ce78b188e70f30776e58a1e3a7d8c4b1559e4d68)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.41.1 ([#1886](https://github.com/googleapis/java-spanner-jdbc/issues/1886)) ([73eacc6](https://github.com/googleapis/java-spanner-jdbc/commit/73eacc66bacfd5c367f764ed3db805309cb0d1c8)) + +## [2.25.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.25.2...v2.25.3) (2025-01-06) + + +### Bug Fixes + +* Clear interrupted flag after cancel ([#1880](https://github.com/googleapis/java-spanner-jdbc/issues/1880)) ([e1fd4e1](https://github.com/googleapis/java-spanner-jdbc/commit/e1fd4e131a039b80306991cc93c5c097f2538c90)), closes [#1879](https://github.com/googleapis/java-spanner-jdbc/issues/1879) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.84.0 ([#1881](https://github.com/googleapis/java-spanner-jdbc/issues/1881)) ([42ffaad](https://github.com/googleapis/java-spanner-jdbc/commit/42ffaadf0e671806269ba6c0fba8ce470911b8fe)) +* Update dependency org.springframework.boot:spring-boot to v3.4.1 ([#1873](https://github.com/googleapis/java-spanner-jdbc/issues/1873)) ([c81941c](https://github.com/googleapis/java-spanner-jdbc/commit/c81941ca62face226804619e11b3e9de9b0aa801)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.4.1 ([#1874](https://github.com/googleapis/java-spanner-jdbc/issues/1874)) ([cc3fc3e](https://github.com/googleapis/java-spanner-jdbc/commit/cc3fc3e8a2f455909eb9687a81d742250decb8c3)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.4.1 ([#1876](https://github.com/googleapis/java-spanner-jdbc/issues/1876)) ([ea02e5d](https://github.com/googleapis/java-spanner-jdbc/commit/ea02e5da9d220782c2223bc7f2d4969e70a1b868)) + + +### Documentation + +* Add sample for using array of struct query param ([#1871](https://github.com/googleapis/java-spanner-jdbc/issues/1871)) ([d7cb90d](https://github.com/googleapis/java-spanner-jdbc/commit/d7cb90d264eaf0793422d3bfcaadd5be2ebd6412)) + +## [2.25.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.25.1...v2.25.2) (2024-12-19) + + +### Performance Improvements + +* Use direct executor ([#1864](https://github.com/googleapis/java-spanner-jdbc/issues/1864)) ([6d6d500](https://github.com/googleapis/java-spanner-jdbc/commit/6d6d50006eec29bba47ebe08339ef0c440530710)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.55.0 ([#1869](https://github.com/googleapis/java-spanner-jdbc/issues/1869)) ([dee17e2](https://github.com/googleapis/java-spanner-jdbc/commit/dee17e2130819e722106100032975bbcbbef360a)) +* Update dependency com.google.cloud:google-cloud-trace to v2.55.0 ([#1870](https://github.com/googleapis/java-spanner-jdbc/issues/1870)) ([0e78510](https://github.com/googleapis/java-spanner-jdbc/commit/0e78510229378319dd67c3bc240a10b6330886e2)) + + +### Documentation + +* Auto-generate connection properties documentation ([#1860](https://github.com/googleapis/java-spanner-jdbc/issues/1860)) ([c22e654](https://github.com/googleapis/java-spanner-jdbc/commit/c22e654c23511ec108a454928da3d426e78dda0d)) +* Fix troubleshooting link ([#1861](https://github.com/googleapis/java-spanner-jdbc/issues/1861)) ([6c08ffa](https://github.com/googleapis/java-spanner-jdbc/commit/6c08ffa536b7b0f0c6c65e54391f119399ce8bbf)), closes [#1770](https://github.com/googleapis/java-spanner-jdbc/issues/1770) + +## [2.25.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.25.0...v2.25.1) (2024-12-14) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.83.0 ([#1863](https://github.com/googleapis/java-spanner-jdbc/issues/1863)) ([0e5680b](https://github.com/googleapis/java-spanner-jdbc/commit/0e5680b5dd7b433c850b389a64e8c8c1343151ed)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.41.0 ([#1857](https://github.com/googleapis/java-spanner-jdbc/issues/1857)) ([162fa47](https://github.com/googleapis/java-spanner-jdbc/commit/162fa476b6b45ec14700efe5fea8f6fad7d1c5ad)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.45.0 ([#1853](https://github.com/googleapis/java-spanner-jdbc/issues/1853)) ([350e755](https://github.com/googleapis/java-spanner-jdbc/commit/350e75519d13c81c820c51f88786ca8a61662858)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.1 ([#1859](https://github.com/googleapis/java-spanner-jdbc/issues/1859)) ([150827c](https://github.com/googleapis/java-spanner-jdbc/commit/150827c54042f9d754f1d130287f56ebfadeb7c8)) + +## [2.25.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.24.2...v2.25.0) (2024-12-04) + + +### Features + +* Add fallback to PDML mode ([#1841](https://github.com/googleapis/java-spanner-jdbc/issues/1841)) ([1e81863](https://github.com/googleapis/java-spanner-jdbc/commit/1e818634d1f4845ef96c206de26388e6c3c80bf7)) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.18.2 ([#1846](https://github.com/googleapis/java-spanner-jdbc/issues/1846)) ([1a010a1](https://github.com/googleapis/java-spanner-jdbc/commit/1a010a1ecb4e5f3c83c8fca26c64e607095f1351)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.81.2 ([#1837](https://github.com/googleapis/java-spanner-jdbc/issues/1837)) ([52180d9](https://github.com/googleapis/java-spanner-jdbc/commit/52180d9ad8ff9ae1beda42af4c16c0796948e5a0)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.82.0 ([#1847](https://github.com/googleapis/java-spanner-jdbc/issues/1847)) ([b4ea413](https://github.com/googleapis/java-spanner-jdbc/commit/b4ea4130e667f417d249edbeb560720f58a3c1aa)) +* Update dependency org.mybatis.spring.boot:mybatis-spring-boot-starter to v3.0.4 ([#1844](https://github.com/googleapis/java-spanner-jdbc/issues/1844)) ([3cd9cd6](https://github.com/googleapis/java-spanner-jdbc/commit/3cd9cd6d3d2e7be023e1ff80019bf35bfedc07f9)) +* Update dependency org.springframework.boot:spring-boot to v3.4.0 ([#1838](https://github.com/googleapis/java-spanner-jdbc/issues/1838)) ([fb20987](https://github.com/googleapis/java-spanner-jdbc/commit/fb2098723ad193bf7331578113c0ed0f2e734101)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.4.0 ([#1840](https://github.com/googleapis/java-spanner-jdbc/issues/1840)) ([3f9dbf1](https://github.com/googleapis/java-spanner-jdbc/commit/3f9dbf18415315db204d679c28d6f226b3edd7f1)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.4.0 ([#1839](https://github.com/googleapis/java-spanner-jdbc/issues/1839)) ([d681cea](https://github.com/googleapis/java-spanner-jdbc/commit/d681cea1d03f1b57d86f362a5bd2f5089ffcde4c)) +* Update dependency org.testcontainers:testcontainers to v1.20.4 ([#1835](https://github.com/googleapis/java-spanner-jdbc/issues/1835)) ([78aa4bf](https://github.com/googleapis/java-spanner-jdbc/commit/78aa4bf90e8a5339f5179fe1b95d6ed6e1b9ebbc)) +* Update dependency org.testcontainers:testcontainers-bom to v1.20.4 ([#1836](https://github.com/googleapis/java-spanner-jdbc/issues/1836)) ([c01ab98](https://github.com/googleapis/java-spanner-jdbc/commit/c01ab9800483db3eec5da0bd35acda5fb00de663)) + +## [2.24.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.24.1...v2.24.2) (2024-11-20) + + +### Dependencies + +* Bump Spanner client to 6.81.2 ([#1833](https://github.com/googleapis/java-spanner-jdbc/issues/1833)) ([3b16c90](https://github.com/googleapis/java-spanner-jdbc/commit/3b16c9071cdab9b8f8d719ba6839a8bc5e1a2e8c)) +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.18.1 ([#1815](https://github.com/googleapis/java-spanner-jdbc/issues/1815)) ([f4a23e3](https://github.com/googleapis/java-spanner-jdbc/commit/f4a23e31b63a0f832b6a17fef9fcf8f02ae327d3)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.53.0 ([#1816](https://github.com/googleapis/java-spanner-jdbc/issues/1816)) ([cdaf7ee](https://github.com/googleapis/java-spanner-jdbc/commit/cdaf7ee8c34cc0bed6256daeb2ae0c6c2eb4e4d3)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.54.0 ([#1831](https://github.com/googleapis/java-spanner-jdbc/issues/1831)) ([41eafa7](https://github.com/googleapis/java-spanner-jdbc/commit/41eafa70ef8c75bb99c7f0058d1fea7f28e8a14b)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.81.1 ([#1821](https://github.com/googleapis/java-spanner-jdbc/issues/1821)) ([c314bad](https://github.com/googleapis/java-spanner-jdbc/commit/c314bada6f631020cbf0a956c0b4d3ddd2c1b9cb)) +* Update dependency com.google.cloud:google-cloud-trace to v2.53.0 ([#1817](https://github.com/googleapis/java-spanner-jdbc/issues/1817)) ([1cc543c](https://github.com/googleapis/java-spanner-jdbc/commit/1cc543c7898fbf1a3febb8f4142b1d40ad8873f8)) +* Update dependency com.google.cloud:google-cloud-trace to v2.54.0 ([#1830](https://github.com/googleapis/java-spanner-jdbc/issues/1830)) ([5df5752](https://github.com/googleapis/java-spanner-jdbc/commit/5df57523b7d5bdf1945dca4bbc5fea4a100cf47e)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.40.0 ([#1825](https://github.com/googleapis/java-spanner-jdbc/issues/1825)) ([cec5322](https://github.com/googleapis/java-spanner-jdbc/commit/cec53225a5dcd5bd0f5046346d99c6b2f82147b5)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.33.0 ([#1832](https://github.com/googleapis/java-spanner-jdbc/issues/1832)) ([3128451](https://github.com/googleapis/java-spanner-jdbc/commit/31284512d6de19121a8dd587b46921513dc62900)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.44.0 ([#1822](https://github.com/googleapis/java-spanner-jdbc/issues/1822)) ([40bf9e4](https://github.com/googleapis/java-spanner-jdbc/commit/40bf9e49bd6dad04efbb6f5178b4fa4e6efc2106)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.44.1 ([#1823](https://github.com/googleapis/java-spanner-jdbc/issues/1823)) ([ba8edfd](https://github.com/googleapis/java-spanner-jdbc/commit/ba8edfd7a344cb6e7806b057ec4a1d460062ed38)) +* Update dependency org.springframework.data:spring-data-bom to v2024.1.0 ([#1826](https://github.com/googleapis/java-spanner-jdbc/issues/1826)) ([66a7fc5](https://github.com/googleapis/java-spanner-jdbc/commit/66a7fc58405ad3c301ef056b3d20ff1237387cd4)) + +## [2.24.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.24.0...v2.24.1) (2024-10-28) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.80.1 ([#1811](https://github.com/googleapis/java-spanner-jdbc/issues/1811)) ([d6ee2e9](https://github.com/googleapis/java-spanner-jdbc/commit/d6ee2e98b067f6b1e35d07e8f7bf0a371649f5ee)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.39.0 ([#1808](https://github.com/googleapis/java-spanner-jdbc/issues/1808)) ([efdd859](https://github.com/googleapis/java-spanner-jdbc/commit/efdd8596fe716ec925bf5dcc87a84a2bf2cdc9a2)) + + +### Documentation + +* Add metrics exporter to sample app ([#1809](https://github.com/googleapis/java-spanner-jdbc/issues/1809)) ([9b89f20](https://github.com/googleapis/java-spanner-jdbc/commit/9b89f20183783c9e169fbad408b3df93291fd18c)) + +## [2.24.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.23.0...v2.24.0) (2024-10-25) + + +### Features + +* Auto_batch_dml methods + combination with JDBC batching ([#1795](https://github.com/googleapis/java-spanner-jdbc/issues/1795)) ([98f01bb](https://github.com/googleapis/java-spanner-jdbc/commit/98f01bba45ec299182d0501c8f43e161d14cf78b)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.80.0 ([#1804](https://github.com/googleapis/java-spanner-jdbc/issues/1804)) ([55d6eef](https://github.com/googleapis/java-spanner-jdbc/commit/55d6eef81026d3dd94d80c4cab004152a82e2e67)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.38.0 ([#1800](https://github.com/googleapis/java-spanner-jdbc/issues/1800)) ([f52a087](https://github.com/googleapis/java-spanner-jdbc/commit/f52a087f72b0fab8193ecaec454c52ee71ebcda3)) +* Update dependency org.springframework.boot:spring-boot to v3.3.5 ([#1801](https://github.com/googleapis/java-spanner-jdbc/issues/1801)) ([44b756d](https://github.com/googleapis/java-spanner-jdbc/commit/44b756db18c79d2ae7fc01150cea33c411f5afbc)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.3.5 ([#1802](https://github.com/googleapis/java-spanner-jdbc/issues/1802)) ([7709c98](https://github.com/googleapis/java-spanner-jdbc/commit/7709c988043e06b15e6ace6a6759594bbfeeace8)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.3.5 ([#1803](https://github.com/googleapis/java-spanner-jdbc/issues/1803)) ([c452a37](https://github.com/googleapis/java-spanner-jdbc/commit/c452a375be26f2d43e1d76056bd45d486340c73f)) +* Update dependency org.springframework.data:spring-data-bom to v2024.0.5 ([#1793](https://github.com/googleapis/java-spanner-jdbc/issues/1793)) ([5bd5a0a](https://github.com/googleapis/java-spanner-jdbc/commit/5bd5a0a086a8e38709a851cdd2fbb2bc28dbe3a5)) +* Update dependency org.testcontainers:testcontainers to v1.20.3 ([#1798](https://github.com/googleapis/java-spanner-jdbc/issues/1798)) ([c643c33](https://github.com/googleapis/java-spanner-jdbc/commit/c643c3365f435db2b04a001ce0b9bbb997e2b4f3)) +* Update dependency org.testcontainers:testcontainers-bom to v1.20.3 ([#1799](https://github.com/googleapis/java-spanner-jdbc/issues/1799)) ([da1acfc](https://github.com/googleapis/java-spanner-jdbc/commit/da1acfc1e63d8afe4a4c5a51e2f155cd42d89e1d)) + +## [2.23.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.22.1...v2.23.0) (2024-10-12) + + +### Features + +* Auto_batch_dml connection property ([#1787](https://github.com/googleapis/java-spanner-jdbc/issues/1787)) ([8aa0edb](https://github.com/googleapis/java-spanner-jdbc/commit/8aa0edbca8545348066cdcee511f2fc692987075)) + + +### Bug Fixes + +* Fixed transaction tagging and reset datafaker version ([#1777](https://github.com/googleapis/java-spanner-jdbc/issues/1777)) ([3b117c0](https://github.com/googleapis/java-spanner-jdbc/commit/3b117c0723fb44d82170b5ab766afd6ff2c0c347)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.52.0 ([#1779](https://github.com/googleapis/java-spanner-jdbc/issues/1779)) ([ba661fe](https://github.com/googleapis/java-spanner-jdbc/commit/ba661fe9a29935b2683988c968f23dbd171c6e0f)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.78.0 ([#1781](https://github.com/googleapis/java-spanner-jdbc/issues/1781)) ([dcebfff](https://github.com/googleapis/java-spanner-jdbc/commit/dcebfffa8eb71aa5711774873dd8e451de7e9bf5)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.79.0 ([#1786](https://github.com/googleapis/java-spanner-jdbc/issues/1786)) ([2cb6c99](https://github.com/googleapis/java-spanner-jdbc/commit/2cb6c99db5c96842b08a303f59b3669deb03fb9a)) +* Update dependency com.google.cloud:google-cloud-trace to v2.52.0 ([#1780](https://github.com/googleapis/java-spanner-jdbc/issues/1780)) ([7a81155](https://github.com/googleapis/java-spanner-jdbc/commit/7a811556a6203006e6e40c54a5a8017d68db3664)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.37.0 ([#1775](https://github.com/googleapis/java-spanner-jdbc/issues/1775)) ([db48aa5](https://github.com/googleapis/java-spanner-jdbc/commit/db48aa571e913bd892708507a9da758a9a9790c0)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.33.0 ([#1778](https://github.com/googleapis/java-spanner-jdbc/issues/1778)) ([1e5428f](https://github.com/googleapis/java-spanner-jdbc/commit/1e5428f0a5cfdbd983b99c5fd3fa59833c9ddada)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.25 ([#1776](https://github.com/googleapis/java-spanner-jdbc/issues/1776)) ([fe47f12](https://github.com/googleapis/java-spanner-jdbc/commit/fe47f12a87a048f794980c871ebb79cbc971cfc5)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.43.0 ([#1782](https://github.com/googleapis/java-spanner-jdbc/issues/1782)) ([7c3c46e](https://github.com/googleapis/java-spanner-jdbc/commit/7c3c46edf0ecccbb2dcdb8c54a665815c184e34a)) + +## [2.22.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.22.0...v2.22.1) (2024-10-03) + + +### Bug Fixes + +* Remove connection-id from metrics ([#1763](https://github.com/googleapis/java-spanner-jdbc/issues/1763)) ([0e54363](https://github.com/googleapis/java-spanner-jdbc/commit/0e543635d2deb1ddb133e57604adc9f88bae4dbf)) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.18.0 ([#1762](https://github.com/googleapis/java-spanner-jdbc/issues/1762)) ([c1fa2c3](https://github.com/googleapis/java-spanner-jdbc/commit/c1fa2c34fbc6b12ca8d602a48bb02eea4fa62b2a)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.51.0 ([#1765](https://github.com/googleapis/java-spanner-jdbc/issues/1765)) ([30f22dd](https://github.com/googleapis/java-spanner-jdbc/commit/30f22ddb75c531098a04171048e38647a1a49aec)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.75.0 ([#1757](https://github.com/googleapis/java-spanner-jdbc/issues/1757)) ([02dac46](https://github.com/googleapis/java-spanner-jdbc/commit/02dac46299b566c30d8399585c2d22b0ac2a4d59)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.76.0 ([#1764](https://github.com/googleapis/java-spanner-jdbc/issues/1764)) ([3fc8344](https://github.com/googleapis/java-spanner-jdbc/commit/3fc83449f93f7a0a4604751fe9d6326242b107d9)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.77.0 ([#1771](https://github.com/googleapis/java-spanner-jdbc/issues/1771)) ([4050b9c](https://github.com/googleapis/java-spanner-jdbc/commit/4050b9c2dedd2a40dac628ad82a414d9e68ae1ba)) +* Update dependency com.google.cloud:google-cloud-trace to v2.51.0 ([#1766](https://github.com/googleapis/java-spanner-jdbc/issues/1766)) ([9b04219](https://github.com/googleapis/java-spanner-jdbc/commit/9b042197c20be094930c087f7ff674f28a8d5980)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.0 ([#1759](https://github.com/googleapis/java-spanner-jdbc/issues/1759)) ([7276ea5](https://github.com/googleapis/java-spanner-jdbc/commit/7276ea5e8358170304ad55d28cee5cd5167fb56d)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.36.1 ([#1761](https://github.com/googleapis/java-spanner-jdbc/issues/1761)) ([add2a22](https://github.com/googleapis/java-spanner-jdbc/commit/add2a2239705e6447b729008aca09fb71e2609f6)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.32.0 ([#1760](https://github.com/googleapis/java-spanner-jdbc/issues/1760)) ([7768a6b](https://github.com/googleapis/java-spanner-jdbc/commit/7768a6b2c0c744ae6a36d222e798dfc8faaf634d)) +* Update dependency net.datafaker:datafaker to v2.4.0 ([#1767](https://github.com/googleapis/java-spanner-jdbc/issues/1767)) ([7b2d639](https://github.com/googleapis/java-spanner-jdbc/commit/7b2d639b8e0834b7bec359d8f0fe234b228323dd)) +* Update dependency org.springframework.boot:spring-boot to v3.3.4 ([#1754](https://github.com/googleapis/java-spanner-jdbc/issues/1754)) ([a3f10d7](https://github.com/googleapis/java-spanner-jdbc/commit/a3f10d7faa313d9ec924fe03575c7a114d2d9d35)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.3.4 ([#1755](https://github.com/googleapis/java-spanner-jdbc/issues/1755)) ([ccf5c3c](https://github.com/googleapis/java-spanner-jdbc/commit/ccf5c3c3b31358029392a11d9f58dffec62212fd)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.3.4 ([#1756](https://github.com/googleapis/java-spanner-jdbc/issues/1756)) ([6515992](https://github.com/googleapis/java-spanner-jdbc/commit/65159923685836bfd0416b16eae01a0b9bbd0b55)) +* Update dependency org.testcontainers:testcontainers to v1.20.2 ([#1768](https://github.com/googleapis/java-spanner-jdbc/issues/1768)) ([bb65d4f](https://github.com/googleapis/java-spanner-jdbc/commit/bb65d4f7b57918edb28013c41a35efbdb8513b56)) +* Update dependency org.testcontainers:testcontainers-bom to v1.20.2 ([#1769](https://github.com/googleapis/java-spanner-jdbc/issues/1769)) ([7d32ed7](https://github.com/googleapis/java-spanner-jdbc/commit/7d32ed7dadb01c53f2962b90ffa62b904c78db7c)) + +## [2.22.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.21.0...v2.22.0) (2024-09-16) + + +### Features + +* Update DatabaseMetaData to include named schemas ([#1733](https://github.com/googleapis/java-spanner-jdbc/issues/1733)) ([b162e2a](https://github.com/googleapis/java-spanner-jdbc/commit/b162e2ad8fbcaff5c0453b20ac103cab3207a0d9)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.49.0 ([#1744](https://github.com/googleapis/java-spanner-jdbc/issues/1744)) ([25f7f61](https://github.com/googleapis/java-spanner-jdbc/commit/25f7f6104806e701ed884eaba60c676c24a0ce61)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.74.0 ([#1730](https://github.com/googleapis/java-spanner-jdbc/issues/1730)) ([c257ea5](https://github.com/googleapis/java-spanner-jdbc/commit/c257ea54b9cda17ca2f0b9f8ff77a854669314b5)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.74.1 ([#1748](https://github.com/googleapis/java-spanner-jdbc/issues/1748)) ([794ca47](https://github.com/googleapis/java-spanner-jdbc/commit/794ca4790cec8ef34c651593c7fadde366533fdb)) +* Update dependency com.google.cloud:google-cloud-trace to v2.49.0 ([#1747](https://github.com/googleapis/java-spanner-jdbc/issues/1747)) ([32defb7](https://github.com/googleapis/java-spanner-jdbc/commit/32defb7bddf3bdc91da3f131ba15ed665fef3004)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.35.0 ([#1741](https://github.com/googleapis/java-spanner-jdbc/issues/1741)) ([8e168eb](https://github.com/googleapis/java-spanner-jdbc/commit/8e168eb9bb1720f431b4f76d6ceda2d338c39715)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.42.1 ([#1740](https://github.com/googleapis/java-spanner-jdbc/issues/1740)) ([f229a62](https://github.com/googleapis/java-spanner-jdbc/commit/f229a6259a3c4fce001c26780d5182a32e7e5ffe)) +* Update dependency org.apache.commons:commons-lang3 to v3.17.0 ([#1735](https://github.com/googleapis/java-spanner-jdbc/issues/1735)) ([5dbf223](https://github.com/googleapis/java-spanner-jdbc/commit/5dbf22358c7d69481d8a568483ef340149e414c8)) +* Update dependency org.springframework.data:spring-data-bom to v2024.0.4 ([#1746](https://github.com/googleapis/java-spanner-jdbc/issues/1746)) ([e9d8cab](https://github.com/googleapis/java-spanner-jdbc/commit/e9d8cab337fbc3f3e0d98edd71bd42c9fe46b8b7)) + + +### Documentation + +* Run MyBatis sample on the Emulator and add insert-or-update sample ([#1737](https://github.com/googleapis/java-spanner-jdbc/issues/1737)) ([02534d7](https://github.com/googleapis/java-spanner-jdbc/commit/02534d72824a7a7ccb7756797163fa011d1e5e49)) +* Simplify dialect detection for MyBatis sample ([#1739](https://github.com/googleapis/java-spanner-jdbc/issues/1739)) ([01eade8](https://github.com/googleapis/java-spanner-jdbc/commit/01eade8bb6a59d169cb06e945dcf2c20be8a0c82)) +* Simplify dialect detection in Spring Data JDBC sample ([#1738](https://github.com/googleapis/java-spanner-jdbc/issues/1738)) ([44202fc](https://github.com/googleapis/java-spanner-jdbc/commit/44202fce324dafbc3538a3792986cbbaadf97209)) + +## [2.21.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.20.2...v2.21.0) (2024-08-23) + + +### Features + +* Add Quickperf for simple performance testing with JDBC ([#1619](https://github.com/googleapis/java-spanner-jdbc/issues/1619)) ([b6bbd8f](https://github.com/googleapis/java-spanner-jdbc/commit/b6bbd8f40c1ce61914e2c7b80be04abbf4e346ab)) + + +### Dependencies + +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.13.4.2 [security] ([#1710](https://github.com/googleapis/java-spanner-jdbc/issues/1710)) ([eff5df2](https://github.com/googleapis/java-spanner-jdbc/commit/eff5df22785e55a8f0974f028678883ef404b4e6)) +* Update dependency com.fasterxml.jackson.core:jackson-databind to v2.17.2 ([#1715](https://github.com/googleapis/java-spanner-jdbc/issues/1715)) ([21aa199](https://github.com/googleapis/java-spanner-jdbc/commit/21aa19970cee5ee0525c5eaae8bc334cf81d8f25)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.48.0 ([#1719](https://github.com/googleapis/java-spanner-jdbc/issues/1719)) ([a40606c](https://github.com/googleapis/java-spanner-jdbc/commit/a40606c2ef75388cfa0733c6955329225f28c71b)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.72.0 ([#1702](https://github.com/googleapis/java-spanner-jdbc/issues/1702)) ([31a961d](https://github.com/googleapis/java-spanner-jdbc/commit/31a961d29c7b51e9dcd5aac8a8a66444abbd9088)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.73.0 ([#1726](https://github.com/googleapis/java-spanner-jdbc/issues/1726)) ([f5f8051](https://github.com/googleapis/java-spanner-jdbc/commit/f5f80517425969f4c1bab4ec1c72afa1ccbb842c)) +* Update dependency com.google.cloud:google-cloud-trace to v2.48.0 ([#1720](https://github.com/googleapis/java-spanner-jdbc/issues/1720)) ([c9b646d](https://github.com/googleapis/java-spanner-jdbc/commit/c9b646d1b9c0ccef9cf8ba3bc58da686fae34bc1)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.34.0 ([#1705](https://github.com/googleapis/java-spanner-jdbc/issues/1705)) ([f3f0c10](https://github.com/googleapis/java-spanner-jdbc/commit/f3f0c10394e76389dbc4a62e5702fd5f80c57b1a)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.24 ([#1708](https://github.com/googleapis/java-spanner-jdbc/issues/1708)) ([6881512](https://github.com/googleapis/java-spanner-jdbc/commit/68815128ae2c40c224b4ab155b942e8f5313024f)) +* Update dependency commons-cli:commons-cli to v1.9.0 ([#1716](https://github.com/googleapis/java-spanner-jdbc/issues/1716)) ([6f48065](https://github.com/googleapis/java-spanner-jdbc/commit/6f48065952c2fc2716c911a45959326a6bafaa13)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.41.0 ([#1703](https://github.com/googleapis/java-spanner-jdbc/issues/1703)) ([af58b7a](https://github.com/googleapis/java-spanner-jdbc/commit/af58b7a882edae9a50fbc0d4084cb74b3727d5a6)) +* Update dependency org.apache.commons:commons-lang3 to v3.16.0 ([#1717](https://github.com/googleapis/java-spanner-jdbc/issues/1717)) ([f5229ce](https://github.com/googleapis/java-spanner-jdbc/commit/f5229ce5099b6d2d2b7c099ff4ac1319f21860df)) +* Update dependency org.postgresql:postgresql to v42.7.4 ([#1722](https://github.com/googleapis/java-spanner-jdbc/issues/1722)) ([1328213](https://github.com/googleapis/java-spanner-jdbc/commit/13282136921d0167c19cac38df0a652cc5477faa)) +* Update dependency org.springframework.boot:spring-boot to v3.3.2 ([#1718](https://github.com/googleapis/java-spanner-jdbc/issues/1718)) ([ede7211](https://github.com/googleapis/java-spanner-jdbc/commit/ede72113801de4a27492cf672a4c5e3edb37bc5e)) +* Update dependency org.springframework.boot:spring-boot to v3.3.3 ([#1723](https://github.com/googleapis/java-spanner-jdbc/issues/1723)) ([55112ac](https://github.com/googleapis/java-spanner-jdbc/commit/55112ac5f00d4a8d7726fa8bc5e9428d08d21227)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.3.3 ([#1724](https://github.com/googleapis/java-spanner-jdbc/issues/1724)) ([db60f4f](https://github.com/googleapis/java-spanner-jdbc/commit/db60f4f4f8713a30c1fe275266ff455fd03d84a4)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.3.3 ([#1725](https://github.com/googleapis/java-spanner-jdbc/issues/1725)) ([47fda8f](https://github.com/googleapis/java-spanner-jdbc/commit/47fda8f9b8cf639fe11fb3241256490f660e0d8b)) +* Update dependency org.springframework.data:spring-data-bom to v2024.0.3 ([#1704](https://github.com/googleapis/java-spanner-jdbc/issues/1704)) ([e82d839](https://github.com/googleapis/java-spanner-jdbc/commit/e82d8398eede11469c966aa11c2188c671a5f02b)) +* Update dependency org.testcontainers:testcontainers to v1.20.1 ([#1684](https://github.com/googleapis/java-spanner-jdbc/issues/1684)) ([0907305](https://github.com/googleapis/java-spanner-jdbc/commit/09073057df2cff41b7a62f56dc0cf57ed62f4801)) + +## [2.20.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.20.1...v2.20.2) (2024-08-07) + + +### Bug Fixes + +* Support getShort for DATA_TYPE in TypeInfo ([#1691](https://github.com/googleapis/java-spanner-jdbc/issues/1691)) ([828aff6](https://github.com/googleapis/java-spanner-jdbc/commit/828aff6f7015b5f91ebbc2ad54aeeecf5515a9bd)) + + +### Dependencies + +* Bump Spanner to 6.72.0 ([#1698](https://github.com/googleapis/java-spanner-jdbc/issues/1698)) ([ce00b17](https://github.com/googleapis/java-spanner-jdbc/commit/ce00b176d39fe728eadd386d4e9794dc1fc0fbca)) +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.47.0 ([#1695](https://github.com/googleapis/java-spanner-jdbc/issues/1695)) ([148c655](https://github.com/googleapis/java-spanner-jdbc/commit/148c655a8bd1309250b5158d0f20a167e89bd9fe)) +* Update dependency com.google.cloud:google-cloud-trace to v2.47.0 ([#1696](https://github.com/googleapis/java-spanner-jdbc/issues/1696)) ([c6a54f7](https://github.com/googleapis/java-spanner-jdbc/commit/c6a54f7a3045fc711fcad8d16294c67aa8eb8810)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.33.0 ([#1693](https://github.com/googleapis/java-spanner-jdbc/issues/1693)) ([08011a5](https://github.com/googleapis/java-spanner-jdbc/commit/08011a520a2fb14d06614da5e2c7b9ae47d6c6f1)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.31.0 ([#1679](https://github.com/googleapis/java-spanner-jdbc/issues/1679)) ([ddf47f4](https://github.com/googleapis/java-spanner-jdbc/commit/ddf47f4fc7e1c05e6e3086ada0ff3b2c1efeb2af)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.40.0 ([#1676](https://github.com/googleapis/java-spanner-jdbc/issues/1676)) ([faf7c49](https://github.com/googleapis/java-spanner-jdbc/commit/faf7c4908a926cd7a10ea0b860babcaa97c4c7e1)) +* Update dependency org.hamcrest:hamcrest to v3 ([#1697](https://github.com/googleapis/java-spanner-jdbc/issues/1697)) ([3933cf2](https://github.com/googleapis/java-spanner-jdbc/commit/3933cf2c4099e0d06c1dc4e4583c11ba5e4eae19)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.3.2 ([#1685](https://github.com/googleapis/java-spanner-jdbc/issues/1685)) ([6a57b49](https://github.com/googleapis/java-spanner-jdbc/commit/6a57b49a34f2b44f42f6c07eaf5da063a1d648d0)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.3.2 ([#1686](https://github.com/googleapis/java-spanner-jdbc/issues/1686)) ([2be70c7](https://github.com/googleapis/java-spanner-jdbc/commit/2be70c74f673c796d7b366b56644b6d00038bc07)) +* Update dependency org.springframework.data:spring-data-bom to v2024.0.2 ([#1680](https://github.com/googleapis/java-spanner-jdbc/issues/1680)) ([23934f7](https://github.com/googleapis/java-spanner-jdbc/commit/23934f7f38f5c99b7a6859ab69525ce368317128)) + +## [2.20.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.20.0...v2.20.1) (2024-07-04) + + +### Performance Improvements + +* Use multiplexed sessions ([#1673](https://github.com/googleapis/java-spanner-jdbc/issues/1673)) ([107ec66](https://github.com/googleapis/java-spanner-jdbc/commit/107ec661ad73648ed7689c4abafd186cc458e73e)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.46.0 ([#1668](https://github.com/googleapis/java-spanner-jdbc/issues/1668)) ([62620e0](https://github.com/googleapis/java-spanner-jdbc/commit/62620e0cc0b83bbf8b775593deac8aa4d555364e)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.71.0 ([#1672](https://github.com/googleapis/java-spanner-jdbc/issues/1672)) ([4bcfaf9](https://github.com/googleapis/java-spanner-jdbc/commit/4bcfaf9906a41cb0eb1fd3c053324e1d18606ca2)) +* Update dependency com.google.cloud:google-cloud-trace to v2.46.0 ([#1669](https://github.com/googleapis/java-spanner-jdbc/issues/1669)) ([debcfc0](https://github.com/googleapis/java-spanner-jdbc/commit/debcfc0cf8b076014487c5879a7c4df9e88c079a)) +* Update dependency com.google.cloud.opentelemetry:exporter-trace to v0.30.0 ([#1671](https://github.com/googleapis/java-spanner-jdbc/issues/1671)) ([2cdc0a3](https://github.com/googleapis/java-spanner-jdbc/commit/2cdc0a3c0c6bd61e6cd57279dd8535943cac322f)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.39.0 ([#1663](https://github.com/googleapis/java-spanner-jdbc/issues/1663)) ([9c37fe3](https://github.com/googleapis/java-spanner-jdbc/commit/9c37fe35873d3296f54612377a868a6c5bfed9c6)) + +## [2.20.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.19.4...v2.20.0) (2024-06-27) + + +### Features + +* Add OpenTelemetry tracing ([#1568](https://github.com/googleapis/java-spanner-jdbc/issues/1568)) ([1485a04](https://github.com/googleapis/java-spanner-jdbc/commit/1485a04272c270851468254bebffe4f7d846f17c)) + + +### Dependencies + +* Update dependency com.google.api.grpc:proto-google-cloud-trace-v1 to v2.45.0 ([#1659](https://github.com/googleapis/java-spanner-jdbc/issues/1659)) ([0491712](https://github.com/googleapis/java-spanner-jdbc/commit/0491712babfdc7088358f28c4f261e3ec6dd0d2b)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.70.0 ([#1657](https://github.com/googleapis/java-spanner-jdbc/issues/1657)) ([83aaec1](https://github.com/googleapis/java-spanner-jdbc/commit/83aaec17b0c28ac9ef45e99eb878d40572124f3f)) +* Update dependency com.google.cloud:google-cloud-trace to v2.45.0 ([#1661](https://github.com/googleapis/java-spanner-jdbc/issues/1661)) ([648224c](https://github.com/googleapis/java-spanner-jdbc/commit/648224cfb2310d80f781386be64be00157133a30)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.32.0 ([#1654](https://github.com/googleapis/java-spanner-jdbc/issues/1654)) ([503e281](https://github.com/googleapis/java-spanner-jdbc/commit/503e281106910dc9352deeaf790097e495df127b)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.3.1 ([#1648](https://github.com/googleapis/java-spanner-jdbc/issues/1648)) ([4df665f](https://github.com/googleapis/java-spanner-jdbc/commit/4df665fa3a0b65757fc1c00fcf7a0fbd6c0c5d2e)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.3.1 ([#1649](https://github.com/googleapis/java-spanner-jdbc/issues/1649)) ([efe87a6](https://github.com/googleapis/java-spanner-jdbc/commit/efe87a6016b8d61c42f4a4ad5b7ead180545d24e)) + +## [2.19.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.19.2...v2.19.3) (2024-06-19) + + +### Dependencies + +* Update dependency org.springframework.data:spring-data-bom to v2024.0.1 ([#1645](https://github.com/googleapis/java-spanner-jdbc/issues/1645)) ([58122ba](https://github.com/googleapis/java-spanner-jdbc/commit/58122ba6a32e97038bc49aea35c947853b05f576)) + +## [2.19.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.19.1...v2.19.2) (2024-06-13) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.69.0 ([#1641](https://github.com/googleapis/java-spanner-jdbc/issues/1641)) ([55f86df](https://github.com/googleapis/java-spanner-jdbc/commit/55f86dfa217de2e2da280fbbd33d32aa7a0daa41)) + +## [2.19.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.19.0...v2.19.1) (2024-06-05) + + +### Bug Fixes + +* Cleanup unused methods ([#1635](https://github.com/googleapis/java-spanner-jdbc/issues/1635)) ([ad0a35c](https://github.com/googleapis/java-spanner-jdbc/commit/ad0a35c82fd880ce5b705f4cb749c35664ccc604)) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.31.0 ([#1630](https://github.com/googleapis/java-spanner-jdbc/issues/1630)) ([98b530d](https://github.com/googleapis/java-spanner-jdbc/commit/98b530dce7a6131a562afdfca6b8b7c73fb83d7a)) +* Update dependency org.mybatis.dynamic-sql:mybatis-dynamic-sql to v1.5.2 ([#1633](https://github.com/googleapis/java-spanner-jdbc/issues/1633)) ([7c62ee3](https://github.com/googleapis/java-spanner-jdbc/commit/7c62ee30e81a29654934d1f77f36816e92b3470d)) + +## [2.19.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.19.0...v2.19.1) (2024-06-01) + + +### Dependencies + +* Update dependency com.google.cloud:sdk-platform-java-config to v3.31.0 ([#1630](https://github.com/googleapis/java-spanner-jdbc/issues/1630)) ([98b530d](https://github.com/googleapis/java-spanner-jdbc/commit/98b530dce7a6131a562afdfca6b8b7c73fb83d7a)) + +## [2.19.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.18.1...v2.19.0) (2024-05-31) + + +### Features + +* Add Proto Columns support in JDBC ([#1252](https://github.com/googleapis/java-spanner-jdbc/issues/1252)) ([3efa9ac](https://github.com/googleapis/java-spanner-jdbc/commit/3efa9ac7906bab35d72f2951b9945a873f48f013)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.68.0 ([#1620](https://github.com/googleapis/java-spanner-jdbc/issues/1620)) ([255eeef](https://github.com/googleapis/java-spanner-jdbc/commit/255eeefa1ce682a46a4d0467ce297acfb40b0d25)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.68.1 ([#1624](https://github.com/googleapis/java-spanner-jdbc/issues/1624)) ([f4a83ba](https://github.com/googleapis/java-spanner-jdbc/commit/f4a83ba0d4702caa433a8390ff75c955d7669b17)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.3.0 ([#1616](https://github.com/googleapis/java-spanner-jdbc/issues/1616)) ([6912711](https://github.com/googleapis/java-spanner-jdbc/commit/691271199bed294f5ba272285282be975e8bbead)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.3.0 ([#1617](https://github.com/googleapis/java-spanner-jdbc/issues/1617)) ([155d6c6](https://github.com/googleapis/java-spanner-jdbc/commit/155d6c65c4ae3c9306ce86864bb4a4b4c9569473)) + +## [2.18.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.18.0...v2.18.1) (2024-05-22) + + +### Dependencies + +* Remove open-telemetry dependency import ([#1608](https://github.com/googleapis/java-spanner-jdbc/issues/1608)) ([fcb32ef](https://github.com/googleapis/java-spanner-jdbc/commit/fcb32efb4945807e0933341874644f042b7f33af)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.67.0 ([#1613](https://github.com/googleapis/java-spanner-jdbc/issues/1613)) ([12080e0](https://github.com/googleapis/java-spanner-jdbc/commit/12080e0579269fd9b893440d2f6ff3e784fb52ec)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.1 ([#1609](https://github.com/googleapis/java-spanner-jdbc/issues/1609)) ([46aed65](https://github.com/googleapis/java-spanner-jdbc/commit/46aed65c39cd9c6e60733fdf9cfd11f770e77019)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.38.0 ([#1606](https://github.com/googleapis/java-spanner-jdbc/issues/1606)) ([b45bed5](https://github.com/googleapis/java-spanner-jdbc/commit/b45bed575bc53898205aa183d7d8554b336a68f9)) +* Update dependency org.springframework.data:spring-data-bom to v2023.1.6 ([#1610](https://github.com/googleapis/java-spanner-jdbc/issues/1610)) ([aac170d](https://github.com/googleapis/java-spanner-jdbc/commit/aac170db26f8018b44f97d17f92faecb11117269)) +* Update dependency org.springframework.data:spring-data-bom to v2024 ([#1611](https://github.com/googleapis/java-spanner-jdbc/issues/1611)) ([6669c7a](https://github.com/googleapis/java-spanner-jdbc/commit/6669c7af373160fa9396a6dbcf2adff091d3c46a)) +* Update dependency org.testcontainers:testcontainers to v1.19.8 ([#1604](https://github.com/googleapis/java-spanner-jdbc/issues/1604)) ([e155a46](https://github.com/googleapis/java-spanner-jdbc/commit/e155a4624a021cb7959e72f7eb138ff8c685eaef)) + +## [2.18.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.17.1...v2.18.0) (2024-05-04) + + +### Features + +* Allow DDL with autocommit=false ([#1600](https://github.com/googleapis/java-spanner-jdbc/issues/1600)) ([a61c25d](https://github.com/googleapis/java-spanner-jdbc/commit/a61c25d8f90460ec507b383dbaee0ca686104ba8)) +* Support concurrent transactions on the emulator ([#1601](https://github.com/googleapis/java-spanner-jdbc/issues/1601)) ([7123991](https://github.com/googleapis/java-spanner-jdbc/commit/71239912a8078569dcd985314810131e593c0ed7)) + + +### Bug Fixes + +* ClassCastException in Spring Data JDBC sample ([#1595](https://github.com/googleapis/java-spanner-jdbc/issues/1595)) ([e96a86a](https://github.com/googleapis/java-spanner-jdbc/commit/e96a86a4b82ac4b47bd1ce25e810f01299597339)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.66.0 ([#1599](https://github.com/googleapis/java-spanner-jdbc/issues/1599)) ([84ea11a](https://github.com/googleapis/java-spanner-jdbc/commit/84ea11ac27635dbe6fb101134767d14488dde8c2)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.30.0 ([#1597](https://github.com/googleapis/java-spanner-jdbc/issues/1597)) ([40a7212](https://github.com/googleapis/java-spanner-jdbc/commit/40a721237c79882e55d86d48402c64cc09782522)) +* Update dependency org.mybatis.dynamic-sql:mybatis-dynamic-sql to v1.5.1 ([#1596](https://github.com/googleapis/java-spanner-jdbc/issues/1596)) ([f54beb2](https://github.com/googleapis/java-spanner-jdbc/commit/f54beb20d6bbe3f4974385c4758ba77d31d25ce3)) + +## [2.17.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.17.0...v2.17.1) (2024-04-30) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.65.1 ([#1588](https://github.com/googleapis/java-spanner-jdbc/issues/1588)) ([1b1218a](https://github.com/googleapis/java-spanner-jdbc/commit/1b1218adaa25ecec3def66b7031c7f0e5e8c23b0)) + +## [2.17.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.16.3...v2.17.0) (2024-04-21) + + +### Features + +* Concurrent transactions on emulator ([#1578](https://github.com/googleapis/java-spanner-jdbc/issues/1578)) ([0234fb3](https://github.com/googleapis/java-spanner-jdbc/commit/0234fb3eb43695c657b845946e6d1122827dcae5)) +* Support endpoint connection URL property ([#1582](https://github.com/googleapis/java-spanner-jdbc/issues/1582)) ([b589c96](https://github.com/googleapis/java-spanner-jdbc/commit/b589c96a3187390e9ffa576d1e0ee285e223c559)) +* Support max_commit_delay ([#1581](https://github.com/googleapis/java-spanner-jdbc/issues/1581)) ([06e43c0](https://github.com/googleapis/java-spanner-jdbc/commit/06e43c05a65f25da9a4dbff73e2e75c1b5ec155b)) +* Support statement tags in hints ([#1579](https://github.com/googleapis/java-spanner-jdbc/issues/1579)) ([0c3aec1](https://github.com/googleapis/java-spanner-jdbc/commit/0c3aec1a5eec212e9ecacc7bec7f2a8980c9af78)) + +## [2.16.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.16.2...v2.16.3) (2024-04-20) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.65.0 ([#1573](https://github.com/googleapis/java-spanner-jdbc/issues/1573)) ([ecc18f1](https://github.com/googleapis/java-spanner-jdbc/commit/ecc18f1307933bb0f4f576ce2df81af226f8a2cb)) + +## [2.16.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.16.1...v2.16.2) (2024-04-19) + + +### Bug Fixes + +* Release ResultSet on Statement#close() ([#1567](https://github.com/googleapis/java-spanner-jdbc/issues/1567)) ([2258ae3](https://github.com/googleapis/java-spanner-jdbc/commit/2258ae3331a7e89036a202f243b9284108301fc0)) + + +### Dependencies + +* Update actions/checkout action to v4 ([#1547](https://github.com/googleapis/java-spanner-jdbc/issues/1547)) ([736e3af](https://github.com/googleapis/java-spanner-jdbc/commit/736e3afa54149dd11803bd715569afd9ec8e87f2)) +* Update actions/checkout action to v4 ([#1561](https://github.com/googleapis/java-spanner-jdbc/issues/1561)) ([6053d79](https://github.com/googleapis/java-spanner-jdbc/commit/6053d79816546130eca7a7016dc9299c079e411f)) +* Update actions/checkout digest to b4ffde6 ([#1546](https://github.com/googleapis/java-spanner-jdbc/issues/1546)) ([18c5ad4](https://github.com/googleapis/java-spanner-jdbc/commit/18c5ad4d4124f095547d50c0d2e154bc06380642)) +* Update actions/github-script action to v7 ([#1548](https://github.com/googleapis/java-spanner-jdbc/issues/1548)) ([d1d422c](https://github.com/googleapis/java-spanner-jdbc/commit/d1d422cdf0a74231c468262662fdf5ce4d27b8ef)) +* Update actions/setup-java action to v4 ([#1549](https://github.com/googleapis/java-spanner-jdbc/issues/1549)) ([cb2b911](https://github.com/googleapis/java-spanner-jdbc/commit/cb2b911b0b332e97f85974ec880a5ab7a12a7578)) +* Update actions/setup-java action to v4 ([#1563](https://github.com/googleapis/java-spanner-jdbc/issues/1563)) ([01d4de1](https://github.com/googleapis/java-spanner-jdbc/commit/01d4de1df21144e9c3bcf0b4e5192b12cd19dc82)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.63.0 ([#1552](https://github.com/googleapis/java-spanner-jdbc/issues/1552)) ([ac75b9f](https://github.com/googleapis/java-spanner-jdbc/commit/ac75b9faf0eaeb499428ecefda1f3285b3d28e67)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.64.0 ([#1565](https://github.com/googleapis/java-spanner-jdbc/issues/1565)) ([b57662f](https://github.com/googleapis/java-spanner-jdbc/commit/b57662fb65b74b329103ef63265192d7026b2c2d)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.28.1 ([#1560](https://github.com/googleapis/java-spanner-jdbc/issues/1560)) ([afcbe5e](https://github.com/googleapis/java-spanner-jdbc/commit/afcbe5ea5701a799729543c9564759570f05feb8)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.29.0 ([#1572](https://github.com/googleapis/java-spanner-jdbc/issues/1572)) ([3d43707](https://github.com/googleapis/java-spanner-jdbc/commit/3d437076f2c699e261daf7dcb470085765dba14f)) +* Update dependency io.opentelemetry:opentelemetry-bom to v1.37.0 ([#1562](https://github.com/googleapis/java-spanner-jdbc/issues/1562)) ([22f766f](https://github.com/googleapis/java-spanner-jdbc/commit/22f766f098944c23084776c70dbd9dba21efa59c)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.2.5 ([#1569](https://github.com/googleapis/java-spanner-jdbc/issues/1569)) ([784ac1e](https://github.com/googleapis/java-spanner-jdbc/commit/784ac1e68ac29628fe55d7b9e772326f10ffeaec)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.2.5 ([#1570](https://github.com/googleapis/java-spanner-jdbc/issues/1570)) ([f54d4dd](https://github.com/googleapis/java-spanner-jdbc/commit/f54d4dd1211508785cb899e0a3c9b585c0908421)) +* Update dependency org.springframework.data:spring-data-bom to v2023.1.5 ([#1564](https://github.com/googleapis/java-spanner-jdbc/issues/1564)) ([dbbcca3](https://github.com/googleapis/java-spanner-jdbc/commit/dbbcca342a83476b1f942aab23f21469cf6c8304)) +* Update stcarolas/setup-maven action to v5 ([#1550](https://github.com/googleapis/java-spanner-jdbc/issues/1550)) ([121d08e](https://github.com/googleapis/java-spanner-jdbc/commit/121d08e16db0bbb1f6041a201d620829e7121f4d)) + + +### Documentation + +* Create samples for quickstart guide ([#1536](https://github.com/googleapis/java-spanner-jdbc/issues/1536)) ([194c820](https://github.com/googleapis/java-spanner-jdbc/commit/194c8205dee9cc4144b18e219df43027b9f15cf2)) + +## [2.16.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.16.0...v2.16.1) (2024-03-22) + + +### Dependencies + +* Bump Spanner client to 6.62.0 ([#1539](https://github.com/googleapis/java-spanner-jdbc/issues/1539)) ([ca274fb](https://github.com/googleapis/java-spanner-jdbc/commit/ca274fb22cbbb974fe71fffbe9f8c3a56f40628c)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.28.1 ([#1537](https://github.com/googleapis/java-spanner-jdbc/issues/1537)) ([4d1d38e](https://github.com/googleapis/java-spanner-jdbc/commit/4d1d38ea4312d637d1cad61b8230d12cdbf0ba51)) +* Update dependency org.postgresql:postgresql to v42.7.3 ([#1532](https://github.com/googleapis/java-spanner-jdbc/issues/1532)) ([b09da60](https://github.com/googleapis/java-spanner-jdbc/commit/b09da609950b94157564f4f9987a3ce60221568e)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.2.4 ([#1540](https://github.com/googleapis/java-spanner-jdbc/issues/1540)) ([21faff8](https://github.com/googleapis/java-spanner-jdbc/commit/21faff8d72ad209835971162caadb694c602175d)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.2.4 ([#1541](https://github.com/googleapis/java-spanner-jdbc/issues/1541)) ([2c76488](https://github.com/googleapis/java-spanner-jdbc/commit/2c76488e5e80ecad2824311ed399142495bfc2e4)) +* Update dependency org.springframework.data:spring-data-bom to v2023.1.4 ([#1533](https://github.com/googleapis/java-spanner-jdbc/issues/1533)) ([ec7d3b0](https://github.com/googleapis/java-spanner-jdbc/commit/ec7d3b04fd0bf97ea17ebd1849d02fdfdb31ded3)) + +## [2.16.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.15.5...v2.16.0) (2024-03-07) + + +### Features + +* Support float32 ([#1518](https://github.com/googleapis/java-spanner-jdbc/issues/1518)) ([635ac41](https://github.com/googleapis/java-spanner-jdbc/commit/635ac41e054814cf3b58d37cbc42b01ac183b2a1)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.27.0 ([#1521](https://github.com/googleapis/java-spanner-jdbc/issues/1521)) ([a8eecfb](https://github.com/googleapis/java-spanner-jdbc/commit/a8eecfb3a731505ba309b4a359dea7b88990c88a)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.60.1 ([#1514](https://github.com/googleapis/java-spanner-jdbc/issues/1514)) ([cf8fe9e](https://github.com/googleapis/java-spanner-jdbc/commit/cf8fe9eea2423f64867c9ecd790916b81165e575)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.61.0 ([#1523](https://github.com/googleapis/java-spanner-jdbc/issues/1523)) ([0c1e281](https://github.com/googleapis/java-spanner-jdbc/commit/0c1e28177131f30405b09cf38fa7a78645a3508a)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.27.0 ([#1522](https://github.com/googleapis/java-spanner-jdbc/issues/1522)) ([28986f9](https://github.com/googleapis/java-spanner-jdbc/commit/28986f9dddabfe2d89b2e3155885434413c3941e)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.23 ([#1527](https://github.com/googleapis/java-spanner-jdbc/issues/1527)) ([1e7a4f7](https://github.com/googleapis/java-spanner-jdbc/commit/1e7a4f73339479134868206730c275fe752e8d0d)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.2.3 ([#1512](https://github.com/googleapis/java-spanner-jdbc/issues/1512)) ([e5825c9](https://github.com/googleapis/java-spanner-jdbc/commit/e5825c9a4aa9df68b1ca911430ef37cb6d3549c4)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.2.3 ([#1513](https://github.com/googleapis/java-spanner-jdbc/issues/1513)) ([8cfaa1a](https://github.com/googleapis/java-spanner-jdbc/commit/8cfaa1a523c32ebed307f747fd939bd900c28b34)) + +## [2.15.5](https://github.com/googleapis/java-spanner-jdbc/compare/v2.15.4...v2.15.5) (2024-02-21) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.60.0 ([#1508](https://github.com/googleapis/java-spanner-jdbc/issues/1508)) ([0f7e59c](https://github.com/googleapis/java-spanner-jdbc/commit/0f7e59c7aabdb9fe69fde23f35c5451d19332076)) +* Update dependency org.postgresql:postgresql to v42.7.2 ([#1503](https://github.com/googleapis/java-spanner-jdbc/issues/1503)) ([dd5142f](https://github.com/googleapis/java-spanner-jdbc/commit/dd5142faab667817b9fe6bff7f0388c5e43e7dee)) +* Update dependency org.postgresql:postgresql to v42.7.2 [security] ([#1507](https://github.com/googleapis/java-spanner-jdbc/issues/1507)) ([caacd05](https://github.com/googleapis/java-spanner-jdbc/commit/caacd056f245f1cf57119653627b0c5097730f41)) +* Update dependency org.springframework.data:spring-data-bom to v2023.1.3 ([#1499](https://github.com/googleapis/java-spanner-jdbc/issues/1499)) ([34151b6](https://github.com/googleapis/java-spanner-jdbc/commit/34151b6cae097c97fc49839e2594f5adbd6393c3)) + +## [2.15.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.15.3...v2.15.4) (2024-02-15) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.25.0 ([#1492](https://github.com/googleapis/java-spanner-jdbc/issues/1492)) ([21988ab](https://github.com/googleapis/java-spanner-jdbc/commit/21988ab03ddff378caf536eccf94289bb745434c)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.58.0 ([#1487](https://github.com/googleapis/java-spanner-jdbc/issues/1487)) ([c4889fa](https://github.com/googleapis/java-spanner-jdbc/commit/c4889fa9ba8d9527cd2a54488a06cde7ef94266e)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.59.0 ([#1495](https://github.com/googleapis/java-spanner-jdbc/issues/1495)) ([091170b](https://github.com/googleapis/java-spanner-jdbc/commit/091170b80a4cd95f36eaa8ae7a9d2f6c33f9b862)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.25.0 ([#1493](https://github.com/googleapis/java-spanner-jdbc/issues/1493)) ([0e1694f](https://github.com/googleapis/java-spanner-jdbc/commit/0e1694f573268b6c1c67be2d683afcbbd7cd6c61)) + +## [2.15.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.15.2...v2.15.3) (2024-02-09) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.24.0 ([#1481](https://github.com/googleapis/java-spanner-jdbc/issues/1481)) ([e3f58d1](https://github.com/googleapis/java-spanner-jdbc/commit/e3f58d1cad8806e120dda99e16536c7bbf837fe4)) + +## [2.15.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.15.1...v2.15.2) (2024-01-29) + + +### Dependencies + +* Bump Spanner client to 6.57.0 ([#1474](https://github.com/googleapis/java-spanner-jdbc/issues/1474)) ([ce5180a](https://github.com/googleapis/java-spanner-jdbc/commit/ce5180a900a6b55bba2d32f401d957f6924f8ea8)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.22.0 ([#1463](https://github.com/googleapis/java-spanner-jdbc/issues/1463)) ([d505321](https://github.com/googleapis/java-spanner-jdbc/commit/d505321ab58f3d597550d9891c11eb7a7f17c536)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.23.0 ([#1470](https://github.com/googleapis/java-spanner-jdbc/issues/1470)) ([58cf2ba](https://github.com/googleapis/java-spanner-jdbc/commit/58cf2baa1a8cecc4f84d60001d288c535e82b66d)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.22 ([#1460](https://github.com/googleapis/java-spanner-jdbc/issues/1460)) ([bbbff2f](https://github.com/googleapis/java-spanner-jdbc/commit/bbbff2ff98a0bc15ea3b5640b767e872f753452c)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.22.1 ([#1467](https://github.com/googleapis/java-spanner-jdbc/issues/1467)) ([449bae5](https://github.com/googleapis/java-spanner-jdbc/commit/449bae5079ce122be711490f8f1009f272082d13)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.2.2 ([#1424](https://github.com/googleapis/java-spanner-jdbc/issues/1424)) ([e14af90](https://github.com/googleapis/java-spanner-jdbc/commit/e14af9025a64a94cf431cc2d03bcace7eb753b32)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.2.2 ([#1425](https://github.com/googleapis/java-spanner-jdbc/issues/1425)) ([9ccb04a](https://github.com/googleapis/java-spanner-jdbc/commit/9ccb04afaaf355734e955f82716c94d229b958d3)) +* Update dependency org.springframework.data:spring-data-bom to v2023.1.2 ([#1421](https://github.com/googleapis/java-spanner-jdbc/issues/1421)) ([dc53309](https://github.com/googleapis/java-spanner-jdbc/commit/dc53309d7846e97ffb1ee2e5cdc0e6c13b648049)) + +## [2.15.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.15.0...v2.15.1) (2024-01-09) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.21.0 ([#1454](https://github.com/googleapis/java-spanner-jdbc/issues/1454)) ([d1c1d2c](https://github.com/googleapis/java-spanner-jdbc/commit/d1c1d2cf1ad86d987fc285eb97d9b9ca3c53f753)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.56.0 ([#1455](https://github.com/googleapis/java-spanner-jdbc/issues/1455)) ([90fad38](https://github.com/googleapis/java-spanner-jdbc/commit/90fad3870fb3b704d041d1a79f2e8fc6f32631d1)) + +## [2.15.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.14.6...v2.15.0) (2023-12-22) + + +### Features + +* Support PreparedStatement#getParameterMetaData() ([#1218](https://github.com/googleapis/java-spanner-jdbc/issues/1218)) ([721ff45](https://github.com/googleapis/java-spanner-jdbc/commit/721ff4552104efba47c19ef511282071c3b334c3)) + + +### Performance Improvements + +* Optimize isValid implementation ([#1444](https://github.com/googleapis/java-spanner-jdbc/issues/1444)) ([914e973](https://github.com/googleapis/java-spanner-jdbc/commit/914e973ad7fd638fabc3ec130b7618c51f01f401)), closes [#1443](https://github.com/googleapis/java-spanner-jdbc/issues/1443) + + +### Dependencies + +* Update dependency org.postgresql:postgresql to v42.7.1 ([#1441](https://github.com/googleapis/java-spanner-jdbc/issues/1441)) ([5997555](https://github.com/googleapis/java-spanner-jdbc/commit/59975553826360b86492e50b9d49c29aecc28bab)) + +## [2.14.6](https://github.com/googleapis/java-spanner-jdbc/compare/v2.14.5...v2.14.6) (2023-12-04) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.55.0 ([#1434](https://github.com/googleapis/java-spanner-jdbc/issues/1434)) ([4168611](https://github.com/googleapis/java-spanner-jdbc/commit/4168611a973f3cd35a4cb1ce56d0eefe5e1dd571)) + +## [2.14.5](https://github.com/googleapis/java-spanner-jdbc/compare/v2.14.4...v2.14.5) (2023-11-30) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.20.0 ([#1430](https://github.com/googleapis/java-spanner-jdbc/issues/1430)) ([5da2d71](https://github.com/googleapis/java-spanner-jdbc/commit/5da2d71c69036a8a4f4033b0bb00c39f98715fd1)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.54.0 ([#1420](https://github.com/googleapis/java-spanner-jdbc/issues/1420)) ([d3f5361](https://github.com/googleapis/java-spanner-jdbc/commit/d3f5361bbe03eca85c1bdba5af0a716dc923a231)) +* Update dependency org.mybatis.spring.boot:mybatis-spring-boot-starter to v3.0.3 ([#1426](https://github.com/googleapis/java-spanner-jdbc/issues/1426)) ([8667ee8](https://github.com/googleapis/java-spanner-jdbc/commit/8667ee8c16841b9a526ac7f1bd025f13f9149dc7)) +* Update dependency org.postgresql:postgresql to v42.7.0 ([#1422](https://github.com/googleapis/java-spanner-jdbc/issues/1422)) ([d107b25](https://github.com/googleapis/java-spanner-jdbc/commit/d107b25b2d6ad1dcf91a94118bd96d5f975be116)) + +## [2.14.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.14.3...v2.14.4) (2023-11-09) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.53.0 ([#1415](https://github.com/googleapis/java-spanner-jdbc/issues/1415)) ([f2b578d](https://github.com/googleapis/java-spanner-jdbc/commit/f2b578dc38bd9328022fd1ecd627d8e1d20c2dbc)) + +## [2.14.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.14.2...v2.14.3) (2023-11-01) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.19.0 ([#1411](https://github.com/googleapis/java-spanner-jdbc/issues/1411)) ([eb14ea0](https://github.com/googleapis/java-spanner-jdbc/commit/eb14ea005dbb810ee9e303971338edf0edfedb75)) + +## [2.14.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.14.1...v2.14.2) (2023-10-24) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.18.0 ([#1402](https://github.com/googleapis/java-spanner-jdbc/issues/1402)) ([764c9d2](https://github.com/googleapis/java-spanner-jdbc/commit/764c9d244df16307fe3b13e647759b777bb145ab)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.52.1 ([#1401](https://github.com/googleapis/java-spanner-jdbc/issues/1401)) ([cf577ee](https://github.com/googleapis/java-spanner-jdbc/commit/cf577ee89faa84444b37a7b0b7fe931223461836)) +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.1.5 ([#1399](https://github.com/googleapis/java-spanner-jdbc/issues/1399)) ([ca4b183](https://github.com/googleapis/java-spanner-jdbc/commit/ca4b183c828b1d3cc4496e98ec3b609f7f80c2a8)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.1.5 ([#1400](https://github.com/googleapis/java-spanner-jdbc/issues/1400)) ([9b139aa](https://github.com/googleapis/java-spanner-jdbc/commit/9b139aa200a9ddf436b089555d25274f5ef7ee4a)) + +## [2.14.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.14.0...v2.14.1) (2023-10-14) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.51.0 ([#1393](https://github.com/googleapis/java-spanner-jdbc/issues/1393)) ([74d106f](https://github.com/googleapis/java-spanner-jdbc/commit/74d106f2590abc592b501093b6a30d2493e49868)) +* Update dependency org.springframework.data:spring-data-bom to v2023.0.5 ([#1391](https://github.com/googleapis/java-spanner-jdbc/issues/1391)) ([c5a7ee4](https://github.com/googleapis/java-spanner-jdbc/commit/c5a7ee439f5d1f2242ffa2f3522b589cefb25744)) + +## [2.14.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.13.4...v2.14.0) (2023-10-12) + + +### Features + +* Support default schema and catalog for PostgreSQL databases ([#1375](https://github.com/googleapis/java-spanner-jdbc/issues/1375)) ([2737ece](https://github.com/googleapis/java-spanner-jdbc/commit/2737ecec00abd51b796e13375f2ebdfbf8e1b201)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.17.0 ([#1383](https://github.com/googleapis/java-spanner-jdbc/issues/1383)) ([f0209a7](https://github.com/googleapis/java-spanner-jdbc/commit/f0209a7be923465a30effc30ac23294299e0cd72)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.50.0 ([#1386](https://github.com/googleapis/java-spanner-jdbc/issues/1386)) ([8401ef8](https://github.com/googleapis/java-spanner-jdbc/commit/8401ef868493a83d2c8b8d68a33a118d2b94f769)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.50.1 ([#1388](https://github.com/googleapis/java-spanner-jdbc/issues/1388)) ([8ae3919](https://github.com/googleapis/java-spanner-jdbc/commit/8ae3919e352dbe02a03a41cfbc440619d299454c)) + +## [2.13.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.13.3...v2.13.4) (2023-09-28) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.49.0 ([#1376](https://github.com/googleapis/java-spanner-jdbc/issues/1376)) ([cc28b44](https://github.com/googleapis/java-spanner-jdbc/commit/cc28b44595d9a3cb112a35081fda98200d529ebf)) + +## [2.13.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.13.2...v2.13.3) (2023-09-27) + + +### Dependencies + +* Remove specific JDBC version from samples ([#1371](https://github.com/googleapis/java-spanner-jdbc/issues/1371)) ([b30e391](https://github.com/googleapis/java-spanner-jdbc/commit/b30e391792f2c2811038b35a065b35104bc614e7)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.48.0 ([#1370](https://github.com/googleapis/java-spanner-jdbc/issues/1370)) ([376e1c3](https://github.com/googleapis/java-spanner-jdbc/commit/376e1c3ccdd71351a5d6151ce19b9f88df163776)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.21.1 ([#1372](https://github.com/googleapis/java-spanner-jdbc/issues/1372)) ([bf64add](https://github.com/googleapis/java-spanner-jdbc/commit/bf64add3e9ce8148d2fc3ad010b8abd446208e4f)) +* Update dependency org.springframework.boot:spring-boot-starter-parent to v3.1.4 ([#1366](https://github.com/googleapis/java-spanner-jdbc/issues/1366)) ([749d2c3](https://github.com/googleapis/java-spanner-jdbc/commit/749d2c3698c900560b6f85247b0a41a85cd55ac8)) +* Update dependency org.springframework.data:spring-data-bom to v2023.0.4 ([#1367](https://github.com/googleapis/java-spanner-jdbc/issues/1367)) ([916ad4a](https://github.com/googleapis/java-spanner-jdbc/commit/916ad4a9e07b3afc15e53664f175db9e58f06376)) + + +### Documentation + +* Add sample for Spring Data MyBatis ([#1352](https://github.com/googleapis/java-spanner-jdbc/issues/1352)) ([ce52d07](https://github.com/googleapis/java-spanner-jdbc/commit/ce52d07c308bcde0ed1b0c9f4d3556db2590f722)) + +## [2.13.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.13.1...v2.13.2) (2023-09-26) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.16.0 ([#1358](https://github.com/googleapis/java-spanner-jdbc/issues/1358)) ([c4c4925](https://github.com/googleapis/java-spanner-jdbc/commit/c4c492576d3e6c192a1855e8d6b3474bb2ad0c22)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.16.1 ([#1363](https://github.com/googleapis/java-spanner-jdbc/issues/1363)) ([d574dbb](https://github.com/googleapis/java-spanner-jdbc/commit/d574dbb761fa7d0a7d1977844b48b8e4904f1bb0)) +* Update dependency com.spotify.fmt:fmt-maven-plugin to v2.21.1 ([#1359](https://github.com/googleapis/java-spanner-jdbc/issues/1359)) ([70af99e](https://github.com/googleapis/java-spanner-jdbc/commit/70af99e96451fb0158abb45580eaae09ad0b6210)) + +## [2.13.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.13.0...v2.13.1) (2023-09-21) + + +### Dependencies + +* Update dependency org.springframework.boot:spring-boot-starter-data-jdbc to v3.1.4 ([#1353](https://github.com/googleapis/java-spanner-jdbc/issues/1353)) ([88cd905](https://github.com/googleapis/java-spanner-jdbc/commit/88cd905bece9c8da7f26b637392e35ab2536edeb)) + +## [2.13.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.12.1...v2.13.0) (2023-09-15) + + +### Features + +* Support partitioned queries ([#1300](https://github.com/googleapis/java-spanner-jdbc/issues/1300)) ([c50da41](https://github.com/googleapis/java-spanner-jdbc/commit/c50da41e688ff48f8967c0f114f5bac8eaac49f9)) + + +### Bug Fixes + +* Comments should be sent to Spanner for PostgreSQL databases ([#1331](https://github.com/googleapis/java-spanner-jdbc/issues/1331)) ([7c9e781](https://github.com/googleapis/java-spanner-jdbc/commit/7c9e781bf45b112266e278e1df1586e56043698e)) + + +### Documentation + +* Create Spring Data JDBC sample ([#1334](https://github.com/googleapis/java-spanner-jdbc/issues/1334)) ([cefea55](https://github.com/googleapis/java-spanner-jdbc/commit/cefea55086eb191f71a1a493e046cb136f9f9f87)) + + +### Dependencies + +* Update actions/checkout action to v4 - abandoned ([#1333](https://github.com/googleapis/java-spanner-jdbc/issues/1333)) ([ce82b42](https://github.com/googleapis/java-spanner-jdbc/commit/ce82b42d3abb8de0f8b3ee2915c2008673775ea1)) +* Update dependency org.springframework.data:spring-data-bom to v2023.0.4 ([#1347](https://github.com/googleapis/java-spanner-jdbc/issues/1347)) ([893f61a](https://github.com/googleapis/java-spanner-jdbc/commit/893f61ab04e32c690f1ff9fc813bd2ba6ebca328)) + +## [2.12.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.12.0...v2.12.1) (2023-09-12) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.46.0 ([#1338](https://github.com/googleapis/java-spanner-jdbc/issues/1338)) ([0bcb5dc](https://github.com/googleapis/java-spanner-jdbc/commit/0bcb5dc9d0c6e4d3878ceb748c09e87c75d88675)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.47.0 ([#1341](https://github.com/googleapis/java-spanner-jdbc/issues/1341)) ([0010650](https://github.com/googleapis/java-spanner-jdbc/commit/00106505771ed75f83ceaf181f45f19e4251cd78)) + +## [2.12.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.9...v2.12.0) (2023-08-28) + + +### Features + +* Return generated keys ([#1310](https://github.com/googleapis/java-spanner-jdbc/issues/1310)) ([9b5ab37](https://github.com/googleapis/java-spanner-jdbc/commit/9b5ab377587de09004474cb1cf488919fc83d6cb)) + + +### Bug Fixes + +* Session leak for invalid update ([#1323](https://github.com/googleapis/java-spanner-jdbc/issues/1323)) ([a7d0fbb](https://github.com/googleapis/java-spanner-jdbc/commit/a7d0fbb529ff71b45d6ddbbad8fc3be43e7c966f)) + +## [2.11.9](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.8...v2.11.9) (2023-08-21) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.45.3 ([#1325](https://github.com/googleapis/java-spanner-jdbc/issues/1325)) ([d96d278](https://github.com/googleapis/java-spanner-jdbc/commit/d96d278f58942c38b30dc0a4e1f9f92aefdc760b)) + +## [2.11.8](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.7...v2.11.8) (2023-08-15) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.45.2 ([#1318](https://github.com/googleapis/java-spanner-jdbc/issues/1318)) ([e924178](https://github.com/googleapis/java-spanner-jdbc/commit/e9241787b94cb614f658f5e6c977ffc008fd3397)) + +## [2.11.7](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.6...v2.11.7) (2023-08-13) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.45.1 ([#1312](https://github.com/googleapis/java-spanner-jdbc/issues/1312)) ([2e99e35](https://github.com/googleapis/java-spanner-jdbc/commit/2e99e357c9688c89881433e77b3167924442abaa)) + +## [2.11.6](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.5...v2.11.6) (2023-08-05) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.14.0 ([#1303](https://github.com/googleapis/java-spanner-jdbc/issues/1303)) ([8afb5ba](https://github.com/googleapis/java-spanner-jdbc/commit/8afb5ba0db1e21e317799db8e82f96820c01c6dd)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.45.0 ([#1305](https://github.com/googleapis/java-spanner-jdbc/issues/1305)) ([aac0d68](https://github.com/googleapis/java-spanner-jdbc/commit/aac0d68c11bada6d338483208be631cbb9ef8ed7)) + +## [2.11.5](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.4...v2.11.5) (2023-07-27) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.1 ([#1291](https://github.com/googleapis/java-spanner-jdbc/issues/1291)) ([aa40c60](https://github.com/googleapis/java-spanner-jdbc/commit/aa40c60df7dcf7fbf45c4c942be2dc31130a3d6b)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.44.0 ([#1295](https://github.com/googleapis/java-spanner-jdbc/issues/1295)) ([ce257f8](https://github.com/googleapis/java-spanner-jdbc/commit/ce257f8acad39581770a28a81d902251673db79f)) + +## [2.11.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.3...v2.11.4) (2023-07-12) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.43.2 ([#1284](https://github.com/googleapis/java-spanner-jdbc/issues/1284)) ([5d7c3a5](https://github.com/googleapis/java-spanner-jdbc/commit/5d7c3a55a3b37d7268222672fe21a75d019512fb)) + +## [2.11.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.2...v2.11.3) (2023-07-09) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.0 ([#1278](https://github.com/googleapis/java-spanner-jdbc/issues/1278)) ([acb2626](https://github.com/googleapis/java-spanner-jdbc/commit/acb2626a6d34f876da8e552cf98ef5a75c57b758)) + +## [2.11.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.1...v2.11.2) (2023-06-26) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.43.1 ([#1271](https://github.com/googleapis/java-spanner-jdbc/issues/1271)) ([c549901](https://github.com/googleapis/java-spanner-jdbc/commit/c5499012dff10c5999cf046f3e1076e17c973662)) + +## [2.11.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.11.0...v2.11.1) (2023-06-26) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.12.0 ([#1267](https://github.com/googleapis/java-spanner-jdbc/issues/1267)) ([bb23df0](https://github.com/googleapis/java-spanner-jdbc/commit/bb23df01bf401310c91c3fc2069a2ea16c70f5a4)) + +## [2.11.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.10.0...v2.11.0) (2023-06-12) + + +### Features + +* Support untyped NULL value parameters ([#1224](https://github.com/googleapis/java-spanner-jdbc/issues/1224)) ([80d2b9d](https://github.com/googleapis/java-spanner-jdbc/commit/80d2b9d3e4c3265522bbb20766bff1f164617711)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.11.0 ([#1254](https://github.com/googleapis/java-spanner-jdbc/issues/1254)) ([41f40fc](https://github.com/googleapis/java-spanner-jdbc/commit/41f40fce634cea205d5e5a9c1eb567ecb97ff655)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.42.3 ([#1248](https://github.com/googleapis/java-spanner-jdbc/issues/1248)) ([397d573](https://github.com/googleapis/java-spanner-jdbc/commit/397d5738a8126aaf090d533d0f20efb74a77a788)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.43.0 ([#1255](https://github.com/googleapis/java-spanner-jdbc/issues/1255)) ([ffe36b6](https://github.com/googleapis/java-spanner-jdbc/commit/ffe36b6b2087157c8d895fa348cff614435a4735)) + +## [2.10.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.16...v2.10.0) (2023-05-30) + + +### Features + +* Support Savepoint ([#1212](https://github.com/googleapis/java-spanner-jdbc/issues/1212)) ([6833696](https://github.com/googleapis/java-spanner-jdbc/commit/683369633627367342b3a40e3abba4fa81069724)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.10.1 ([#1239](https://github.com/googleapis/java-spanner-jdbc/issues/1239)) ([8f7e7a7](https://github.com/googleapis/java-spanner-jdbc/commit/8f7e7a79be6d7326d7e6bdd6018bb76a695cb1b8)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.42.2 ([#1237](https://github.com/googleapis/java-spanner-jdbc/issues/1237)) ([97961b2](https://github.com/googleapis/java-spanner-jdbc/commit/97961b2c501d428575e283485386e04f4673d968)) + +## [2.9.16](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.15...v2.9.16) (2023-05-15) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.42.0 ([#1231](https://github.com/googleapis/java-spanner-jdbc/issues/1231)) ([011570f](https://github.com/googleapis/java-spanner-jdbc/commit/011570ffb1d18fafe0a322d5582c5f7206349e09)) + +## [2.9.15](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.14...v2.9.15) (2023-05-15) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.9.0 ([#1227](https://github.com/googleapis/java-spanner-jdbc/issues/1227)) ([329f258](https://github.com/googleapis/java-spanner-jdbc/commit/329f25862a0f7118cc0419568eca3a72a053a055)) + +## [2.9.14](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.13...v2.9.14) (2023-05-02) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.41.0 ([#1216](https://github.com/googleapis/java-spanner-jdbc/issues/1216)) ([9d0be37](https://github.com/googleapis/java-spanner-jdbc/commit/9d0be37fcea0ab90f8408b433295f7640f059f0a)) + +## [2.9.13](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.12...v2.9.13) (2023-04-27) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.8.0 ([#1211](https://github.com/googleapis/java-spanner-jdbc/issues/1211)) ([a7e762d](https://github.com/googleapis/java-spanner-jdbc/commit/a7e762dddc2e0165bbf8e3e722df3b46c2b9a089)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.40.1 ([#1207](https://github.com/googleapis/java-spanner-jdbc/issues/1207)) ([44ea7f8](https://github.com/googleapis/java-spanner-jdbc/commit/44ea7f8520230c7a9c632e42b7b668f179d4aa95)) + +## [2.9.12](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.11...v2.9.12) (2023-04-19) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.7.0 ([#1204](https://github.com/googleapis/java-spanner-jdbc/issues/1204)) ([370e237](https://github.com/googleapis/java-spanner-jdbc/commit/370e237da3cd04a0ad50ba306cc2bcb7e3a8ec22)) + +## [2.9.11](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.10...v2.9.11) (2023-04-03) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.38.1 ([#1162](https://github.com/googleapis/java-spanner-jdbc/issues/1162)) ([e114284](https://github.com/googleapis/java-spanner-jdbc/commit/e114284185fba67b983b0839b787affcfd741bb4)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.38.2 ([#1165](https://github.com/googleapis/java-spanner-jdbc/issues/1165)) ([8cbf519](https://github.com/googleapis/java-spanner-jdbc/commit/8cbf519905456b69e223d1f3d3e7d29b83ffd477)) + +## [2.9.10](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.9...v2.9.10) (2023-03-29) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.6.0 ([#1160](https://github.com/googleapis/java-spanner-jdbc/issues/1160)) ([392819e](https://github.com/googleapis/java-spanner-jdbc/commit/392819e7705cd6a2b978b69c09fdb79f534a1215)) + +## [2.9.9](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.8...v2.9.9) (2023-03-20) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.5.0 ([#1153](https://github.com/googleapis/java-spanner-jdbc/issues/1153)) ([81969b5](https://github.com/googleapis/java-spanner-jdbc/commit/81969b512072443dac418dc82180cb49ee4113c1)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.38.0 ([#1154](https://github.com/googleapis/java-spanner-jdbc/issues/1154)) ([fe81de9](https://github.com/googleapis/java-spanner-jdbc/commit/fe81de965c170315554f438488ac8b1ef4667999)) + +## [2.9.8](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.7...v2.9.8) (2023-03-04) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.4.0 ([#1145](https://github.com/googleapis/java-spanner-jdbc/issues/1145)) ([34864e7](https://github.com/googleapis/java-spanner-jdbc/commit/34864e73fc78399d08be7972c220e4794ac16dfb)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.37.0 ([#1146](https://github.com/googleapis/java-spanner-jdbc/issues/1146)) ([541a1f0](https://github.com/googleapis/java-spanner-jdbc/commit/541a1f0323c5d7d72cb33acd696660dd94224858)) + +## [2.9.7](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.6...v2.9.7) (2023-02-21) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.36.1 ([#1138](https://github.com/googleapis/java-spanner-jdbc/issues/1138)) ([b7b9916](https://github.com/googleapis/java-spanner-jdbc/commit/b7b99166febdb833e7af17a6c3f38ac7e3f8f767)) + +## [2.9.6](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.5...v2.9.6) (2023-02-20) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.3.0 ([#1133](https://github.com/googleapis/java-spanner-jdbc/issues/1133)) ([57d8c6d](https://github.com/googleapis/java-spanner-jdbc/commit/57d8c6d93d681644fac1e18cc9ba1ab80595feb4)) + +## [2.9.5](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.4...v2.9.5) (2023-02-09) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.36.0 ([#1121](https://github.com/googleapis/java-spanner-jdbc/issues/1121)) ([dcb5826](https://github.com/googleapis/java-spanner-jdbc/commit/dcb5826b17b30303b8d6115bf55a4ede0e13ef84)) + +## [2.9.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.3...v2.9.4) (2023-02-08) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.2.0 ([#1118](https://github.com/googleapis/java-spanner-jdbc/issues/1118)) ([56ed82b](https://github.com/googleapis/java-spanner-jdbc/commit/56ed82b26e40ccfa88e046eb39f8b146bd647e16)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.35.2 ([#1114](https://github.com/googleapis/java-spanner-jdbc/issues/1114)) ([6575d0c](https://github.com/googleapis/java-spanner-jdbc/commit/6575d0c980fa894a5d50b978f8cc93974e804eb2)) +* Use perfmark-api version via shared dependencies BOM ([#1111](https://github.com/googleapis/java-spanner-jdbc/issues/1111)) ([beb5298](https://github.com/googleapis/java-spanner-jdbc/commit/beb52985676b0e981072ee7df60a4cf9f6139624)) + +## [2.9.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.2...v2.9.3) (2023-01-25) + + +### Bug Fixes + +* **java:** Skip fixing poms for special modules ([#1744](https://github.com/googleapis/java-spanner-jdbc/issues/1744)) ([#1108](https://github.com/googleapis/java-spanner-jdbc/issues/1108)) ([2915d76](https://github.com/googleapis/java-spanner-jdbc/commit/2915d766bf6578f1921f8ffe38d6b9be61c63813)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.2 ([#1109](https://github.com/googleapis/java-spanner-jdbc/issues/1109)) ([3614824](https://github.com/googleapis/java-spanner-jdbc/commit/3614824692899165d38ffd12de8cf3543560ea5c)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.35.1 ([#1106](https://github.com/googleapis/java-spanner-jdbc/issues/1106)) ([3bd7d6b](https://github.com/googleapis/java-spanner-jdbc/commit/3bd7d6b72ee87b3b83a50e2459f2d4e3b0fd87a2)) + +## [2.9.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.1...v2.9.2) (2023-01-14) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.35.0 ([#1097](https://github.com/googleapis/java-spanner-jdbc/issues/1097)) ([d8d11c3](https://github.com/googleapis/java-spanner-jdbc/commit/d8d11c3d46197680ffa28c85261736651a64eb63)) + +## [2.9.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.9.0...v2.9.1) (2023-01-12) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.1 ([#1090](https://github.com/googleapis/java-spanner-jdbc/issues/1090)) ([ef9ccf8](https://github.com/googleapis/java-spanner-jdbc/commit/ef9ccf84b78057edc338def2cea69b85ec6a75bf)) + +## [2.9.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.8.0...v2.9.0) (2022-12-14) + + +### Features + +* Add tests for DML with Returning clause ([#936](https://github.com/googleapis/java-spanner-jdbc/issues/936)) ([8a86467](https://github.com/googleapis/java-spanner-jdbc/commit/8a86467c6db7a4e99fdf23cdbce2d78382f8cde9)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.1.0 ([#1069](https://github.com/googleapis/java-spanner-jdbc/issues/1069)) ([c2ff33a](https://github.com/googleapis/java-spanner-jdbc/commit/c2ff33a8f6a7051e1639fee235e1b6bba5916c3a)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.34.1 ([#1072](https://github.com/googleapis/java-spanner-jdbc/issues/1072)) ([0045a5e](https://github.com/googleapis/java-spanner-jdbc/commit/0045a5e51c5a40523f70ec22c9b75bc8707dccb7)) + +## [2.8.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.13...v2.8.0) (2022-11-17) + + +### Features + +* Jsonb data type support ([#926](https://github.com/googleapis/java-spanner-jdbc/issues/926)) ([cefc290](https://github.com/googleapis/java-spanner-jdbc/commit/cefc290d343a2a973e1efbeee33c349fbf98060c)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.6 ([#1044](https://github.com/googleapis/java-spanner-jdbc/issues/1044)) ([3b00962](https://github.com/googleapis/java-spanner-jdbc/commit/3b0096215b63df415a44df1e2f7cb765f9022630)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.33.0 ([#1051](https://github.com/googleapis/java-spanner-jdbc/issues/1051)) ([e728ac1](https://github.com/googleapis/java-spanner-jdbc/commit/e728ac1eee0987a59ce57bc4c2f76e0c42b840a9)) +* Update dependency io.perfmark:perfmark-api to v0.26.0 ([#1045](https://github.com/googleapis/java-spanner-jdbc/issues/1045)) ([87d578c](https://github.com/googleapis/java-spanner-jdbc/commit/87d578c7408586118f1941b976330357ca658d15)) + +## [2.7.13](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.12...v2.7.13) (2022-10-31) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.32.0 ([#1033](https://github.com/googleapis/java-spanner-jdbc/issues/1033)) ([bbbd2c6](https://github.com/googleapis/java-spanner-jdbc/commit/bbbd2c65d75a5805965fa7f9b9dc820e62f34ad3)) + +## [2.7.12](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.11...v2.7.12) (2022-10-27) + + +### Bug Fixes + +* **java:** Initialize classes at build-time to address native image 22.2.0 issues ([#1026](https://github.com/googleapis/java-spanner-jdbc/issues/1026)) ([8010da5](https://github.com/googleapis/java-spanner-jdbc/commit/8010da5ef539d509eb7f96d9b89edf35b2e809ad)) + +## [2.7.11](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.10...v2.7.11) (2022-10-20) + + +### Dependencies + +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.16 ([#1017](https://github.com/googleapis/java-spanner-jdbc/issues/1017)) ([ee7888c](https://github.com/googleapis/java-spanner-jdbc/commit/ee7888c1720aa84ca2d4278a9e52f111c298ea9d)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.16 ([#1018](https://github.com/googleapis/java-spanner-jdbc/issues/1018)) ([3d5b100](https://github.com/googleapis/java-spanner-jdbc/commit/3d5b1004eb3d4e61f954b523ef2c45d59f0fbfe0)) + +## [2.7.10](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.9...v2.7.10) (2022-10-18) + + +### Dependencies + +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.15 ([#1009](https://github.com/googleapis/java-spanner-jdbc/issues/1009)) ([8a6da6c](https://github.com/googleapis/java-spanner-jdbc/commit/8a6da6cbd95a23f6601775fb7147567ca1017119)) +* Update dependency org.graalvm.buildtools:native-maven-plugin to v0.9.15 ([#1010](https://github.com/googleapis/java-spanner-jdbc/issues/1010)) ([b686d20](https://github.com/googleapis/java-spanner-jdbc/commit/b686d20f86f2408d714e8e0335332635f10971cb)) + +## [2.7.9](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.8...v2.7.9) (2022-10-06) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.31.2 ([#1002](https://github.com/googleapis/java-spanner-jdbc/issues/1002)) ([f2ac8e3](https://github.com/googleapis/java-spanner-jdbc/commit/f2ac8e392e972d326bc5702855385471febea87c)) + +## [2.7.8](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.7...v2.7.8) (2022-10-03) + + +### Bug Fixes + +* Upgrade native image plugin to 0.9.14 to unblock graalvm 22.2 update ([#998](https://github.com/googleapis/java-spanner-jdbc/issues/998)) ([e5ed330](https://github.com/googleapis/java-spanner-jdbc/commit/e5ed3300780eec0a32c0f135415063f65fb5ca82)) + + +### Dependencies + +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.4 ([#1001](https://github.com/googleapis/java-spanner-jdbc/issues/1001)) ([feafe10](https://github.com/googleapis/java-spanner-jdbc/commit/feafe1076c7e272bfe69fe055f742072b5aac763)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.30.1 ([#973](https://github.com/googleapis/java-spanner-jdbc/issues/973)) ([205f312](https://github.com/googleapis/java-spanner-jdbc/commit/205f312ab3466e9efb3fc3c79ddc9644adcca527)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.30.2 ([#976](https://github.com/googleapis/java-spanner-jdbc/issues/976)) ([037a33a](https://github.com/googleapis/java-spanner-jdbc/commit/037a33a932d2a50412bc53cc05cf512a9d2a4548)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.31.0 ([#1000](https://github.com/googleapis/java-spanner-jdbc/issues/1000)) ([59d69fb](https://github.com/googleapis/java-spanner-jdbc/commit/59d69fb6548d7e1d23fc8285faa975befd00323a)) +* Update dependency org.graalvm.buildtools:junit-platform-native to v0.9.14 ([#977](https://github.com/googleapis/java-spanner-jdbc/issues/977)) ([32c881e](https://github.com/googleapis/java-spanner-jdbc/commit/32c881ea6530d23385f0c001e99d767146544210)) +* Update dependency org.junit.vintage:junit-vintage-engine to v5.9.1 ([#974](https://github.com/googleapis/java-spanner-jdbc/issues/974)) ([fab57d9](https://github.com/googleapis/java-spanner-jdbc/commit/fab57d9adea37ca4c3aa9aeb8d11e086ba86e538)) + +## [2.7.7](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.6...v2.7.7) (2022-09-16) + + +### Bug Fixes + +* Types.BOOLEAN and Types.BIT should have identical behaviour for nullability ([#920](https://github.com/googleapis/java-spanner-jdbc/issues/920)) ([42e5903](https://github.com/googleapis/java-spanner-jdbc/commit/42e590343ccbe294301a7b9933bd5db1830c8877)) + + +### Dependencies + +* Google-cloud-spanner-bom 6.30.0 ([#967](https://github.com/googleapis/java-spanner-jdbc/issues/967)) ([9385a5d](https://github.com/googleapis/java-spanner-jdbc/commit/9385a5dc52704de6fb1fe1c31f2c4ba2ad84d547)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.2 ([#963](https://github.com/googleapis/java-spanner-jdbc/issues/963)) ([811d96b](https://github.com/googleapis/java-spanner-jdbc/commit/811d96b0a7f90df7ec4d680aad085eec9d331a5e)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.0.3 ([#966](https://github.com/googleapis/java-spanner-jdbc/issues/966)) ([f8b88cd](https://github.com/googleapis/java-spanner-jdbc/commit/f8b88cd95c2bf02ce178a387ad82b149a9f4dbca)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.29.0 ([#943](https://github.com/googleapis/java-spanner-jdbc/issues/943)) ([9754023](https://github.com/googleapis/java-spanner-jdbc/commit/9754023b4d16aa78361d5be465d24b0481b84293)) +* Update dependency com.google.cloud:google-cloud-spanner-bom to v6.29.1 ([#961](https://github.com/googleapis/java-spanner-jdbc/issues/961)) ([a3e1fc6](https://github.com/googleapis/java-spanner-jdbc/commit/a3e1fc6b627e8f4da0191787e432d52b8174067a)) + +## [2.7.6](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.5...v2.7.6) (2022-08-11) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v3 ([#921](https://github.com/googleapis/java-spanner-jdbc/issues/921)) ([2aa0a40](https://github.com/googleapis/java-spanner-jdbc/commit/2aa0a40619e9f743877cff410db17968c374dc52)) + +## [2.7.5](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.4...v2.7.5) (2022-08-05) + + +### Bug Fixes + +* enable longpaths support for windows test ([#1485](https://github.com/googleapis/java-spanner-jdbc/issues/1485)) ([#908](https://github.com/googleapis/java-spanner-jdbc/issues/908)) ([2e53ade](https://github.com/googleapis/java-spanner-jdbc/commit/2e53adef54a6336bbdafcdb2dd5ee2011c07dc6f)) +* pr to troubleshoot native image tests ([#912](https://github.com/googleapis/java-spanner-jdbc/issues/912)) ([4e78071](https://github.com/googleapis/java-spanner-jdbc/commit/4e78071c7451b194439b7b0b300488ec50c9cd1e)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.27.0 ([#898](https://github.com/googleapis/java-spanner-jdbc/issues/898)) ([c536dd6](https://github.com/googleapis/java-spanner-jdbc/commit/c536dd64f61f7a1f74d3c876156ffb2d99172ef1)) +* update dependency org.graalvm.buildtools:junit-platform-native to v0.9.13 ([#922](https://github.com/googleapis/java-spanner-jdbc/issues/922)) ([125a972](https://github.com/googleapis/java-spanner-jdbc/commit/125a972f902dab45833baeee1538bc773a69d4b6)) +* update dependency org.junit.vintage:junit-vintage-engine to v5.9.0 ([#924](https://github.com/googleapis/java-spanner-jdbc/issues/924)) ([9d6d313](https://github.com/googleapis/java-spanner-jdbc/commit/9d6d3135a3e2fbc3e377f5459924ed8869498c36)) + +## [2.7.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.3...v2.7.4) (2022-06-30) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.13.0 ([#899](https://github.com/googleapis/java-spanner-jdbc/issues/899)) ([0286068](https://github.com/googleapis/java-spanner-jdbc/commit/02860683f00678804c58ae2c0b213bc934441397)) + +## [2.7.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.2...v2.7.3) (2022-05-31) + + +### Bug Fixes + +* **java:** adding resource and reflection configurations for native image testing ([#809](https://github.com/googleapis/java-spanner-jdbc/issues/809)) ([6126d4f](https://github.com/googleapis/java-spanner-jdbc/commit/6126d4f55dde76c8b945999008bbee78203a1b75)) + +## [2.7.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.1...v2.7.2) (2022-05-31) + + +### Dependencies + +* google-cloud-spanner-bom 6.25.5 ([#887](https://github.com/googleapis/java-spanner-jdbc/issues/887)) ([2ec08bf](https://github.com/googleapis/java-spanner-jdbc/commit/2ec08bf722d68013b72d648c7facf80a7342dafc)) + +### [2.7.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.7.0...v2.7.1) (2022-05-27) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.25.2 ([#881](https://github.com/googleapis/java-spanner-jdbc/issues/881)) ([b1980b6](https://github.com/googleapis/java-spanner-jdbc/commit/b1980b68ec73bcf137546d167679bb7ae063cebf)) + +## [2.7.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.6.4...v2.7.0) (2022-05-24) + + +### Features + +* add build scripts for native image testing in Java 17 ([#1440](https://github.com/googleapis/java-spanner-jdbc/issues/1440)) ([#875](https://github.com/googleapis/java-spanner-jdbc/issues/875)) ([600e401](https://github.com/googleapis/java-spanner-jdbc/commit/600e4017e0b2e52e7a2f42ffca88b1326be03a31)) + + +### Dependencies + +* bump Spanner to 6.23.3 ([#862](https://github.com/googleapis/java-spanner-jdbc/issues/862)) ([b7b8efa](https://github.com/googleapis/java-spanner-jdbc/commit/b7b8efa80cdef9a827c85d469176463001c14b94)), closes [#788](https://github.com/googleapis/java-spanner-jdbc/issues/788) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.12.0 ([#874](https://github.com/googleapis/java-spanner-jdbc/issues/874)) ([d403f54](https://github.com/googleapis/java-spanner-jdbc/commit/d403f5414cf593e239140f5bbf89b06608167fbf)) +* update opencensus.version to v0.31.1 ([#865](https://github.com/googleapis/java-spanner-jdbc/issues/865)) ([61ba9be](https://github.com/googleapis/java-spanner-jdbc/commit/61ba9be15018c198a00d8f2e69121470c3da2ce0)) + +### [2.6.4](https://github.com/googleapis/java-spanner-jdbc/compare/v2.6.3...v2.6.4) (2022-04-21) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.10.0 ([#798](https://github.com/googleapis/java-spanner-jdbc/issues/798)) ([a77024c](https://github.com/googleapis/java-spanner-jdbc/commit/a77024cd0611e69c501d466c6759329f69cb1953)) + +### [2.6.3](https://github.com/googleapis/java-spanner-jdbc/compare/v2.6.2...v2.6.3) (2022-03-29) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.9.0 ([#789](https://github.com/googleapis/java-spanner-jdbc/issues/789)) ([5fd7287](https://github.com/googleapis/java-spanner-jdbc/commit/5fd7287861029acc1179cac8b604e435aa8b0666)) + +### [2.6.2](https://github.com/googleapis/java-spanner-jdbc/compare/v2.6.1...v2.6.2) (2022-03-14) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.21.2 ([#783](https://github.com/googleapis/java-spanner-jdbc/issues/783)) ([1625ad0](https://github.com/googleapis/java-spanner-jdbc/commit/1625ad0e5a827e6cbf68bf6a3bd18eb4b02fc62b)) + +### [2.6.1](https://github.com/googleapis/java-spanner-jdbc/compare/v2.6.0...v2.6.1) (2022-03-02) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.8.0 ([#770](https://github.com/googleapis/java-spanner-jdbc/issues/770)) ([4f32b2d](https://github.com/googleapis/java-spanner-jdbc/commit/4f32b2d6031870c386b37f574d2107f446ed7db7)) + +## [2.6.0](https://github.com/googleapis/java-spanner-jdbc/compare/v2.5.11...v2.6.0) (2022-02-24) + + +### Features + +* add support for PostgreSQL dialect ([#739](https://github.com/googleapis/java-spanner-jdbc/issues/739)) ([f9daa19](https://github.com/googleapis/java-spanner-jdbc/commit/f9daa19453b33252bf61160ff9cde1c37284ca2b)) + + +### Bug Fixes + +* create specific metadata queries for PG ([#759](https://github.com/googleapis/java-spanner-jdbc/issues/759)) ([caffda0](https://github.com/googleapis/java-spanner-jdbc/commit/caffda03e528da6a3c2c17b7058eb5d29f5086f9)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.20.0 ([#758](https://github.com/googleapis/java-spanner-jdbc/issues/758)) ([311d1ca](https://github.com/googleapis/java-spanner-jdbc/commit/311d1cabff7e7e2f5cf2cdcdda90ba536eadfa68)) + +### [2.5.11](https://github.com/googleapis/java-spanner-jdbc/compare/v2.5.10...v2.5.11) (2022-02-11) + + +### Dependencies + +* update actions/github-script action to v6 ([#745](https://github.com/googleapis/java-spanner-jdbc/issues/745)) ([2ccd5b8](https://github.com/googleapis/java-spanner-jdbc/commit/2ccd5b8ac878c81535c14e404aeaf67e6e41a464)) + +### [2.5.10](https://github.com/googleapis/java-spanner-jdbc/compare/v2.5.9...v2.5.10) (2022-02-09) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.18.0 ([#734](https://github.com/googleapis/java-spanner-jdbc/issues/734)) ([52f407a](https://github.com/googleapis/java-spanner-jdbc/commit/52f407a5e73d13fdeb9b5438d6e5cbd026cb3942)) + +### [2.5.9](https://github.com/googleapis/java-spanner-jdbc/compare/v2.5.8...v2.5.9) (2022-02-03) + + +### Bug Fixes + +* **java:** replace excludedGroup with exclude ([#720](https://github.com/googleapis/java-spanner-jdbc/issues/720)) ([7f13c88](https://github.com/googleapis/java-spanner-jdbc/commit/7f13c88f8c9e509de8c82cb788ab9b4964806381)) + + +### Dependencies + +* **java:** update actions/github-script action to v5 ([#1339](https://github.com/googleapis/java-spanner-jdbc/issues/1339)) ([#725](https://github.com/googleapis/java-spanner-jdbc/issues/725)) ([4f96ec1](https://github.com/googleapis/java-spanner-jdbc/commit/4f96ec1c864b176564ac3200565b5ea524d8adfb)) +* update actions/github-script action to v5 ([#724](https://github.com/googleapis/java-spanner-jdbc/issues/724)) ([5c1d6ff](https://github.com/googleapis/java-spanner-jdbc/commit/5c1d6ff72ba81dac101904f1ebd63e4a09b47c64)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.7.0 ([#728](https://github.com/googleapis/java-spanner-jdbc/issues/728)) ([b0a32d8](https://github.com/googleapis/java-spanner-jdbc/commit/b0a32d807cdf2458b60437ade38fa46511254701)) +* update opencensus.version to v0.31.0 ([#727](https://github.com/googleapis/java-spanner-jdbc/issues/727)) ([fce0770](https://github.com/googleapis/java-spanner-jdbc/commit/fce077056fbf55395a736dc1f58f8ecbc89eb10d)) + +### [2.5.8](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.7...v2.5.8) (2022-01-07) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.17.4 ([#709](https://www.github.com/googleapis/java-spanner-jdbc/issues/709)) ([bd12d7c](https://www.github.com/googleapis/java-spanner-jdbc/commit/bd12d7c33b18ceb1df417df8e275ffa745b195b2)) + +### [2.5.7](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.6...v2.5.7) (2022-01-07) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.6.0 ([#704](https://www.github.com/googleapis/java-spanner-jdbc/issues/704)) ([bae659c](https://www.github.com/googleapis/java-spanner-jdbc/commit/bae659cac5a010c17767cdf4b3569e654efb605c)) + +### [2.5.6](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.5...v2.5.6) (2021-12-17) + + +### Bug Fixes + +* **java:** add -ntp flag to native image testing command ([#1299](https://www.github.com/googleapis/java-spanner-jdbc/issues/1299)) ([#688](https://www.github.com/googleapis/java-spanner-jdbc/issues/688)) ([4438aca](https://www.github.com/googleapis/java-spanner-jdbc/commit/4438aca73b9c8b33fa1edd23f823d87a093a6d59)) + + +### Dependencies + +* update OpenCensus API to 0.30.0 ([#694](https://www.github.com/googleapis/java-spanner-jdbc/issues/694)) ([345f136](https://www.github.com/googleapis/java-spanner-jdbc/commit/345f1366a7dd96f3b28afd353c5c23ebeff60c6b)) + +### [2.5.5](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.4...v2.5.5) (2021-12-03) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.5.1 ([#684](https://www.github.com/googleapis/java-spanner-jdbc/issues/684)) ([a2582d3](https://www.github.com/googleapis/java-spanner-jdbc/commit/a2582d3fbd3f0ea093477914e3a09af235e76595)) + +### [2.5.4](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.3...v2.5.4) (2021-11-17) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.16.0 ([#673](https://www.github.com/googleapis/java-spanner-jdbc/issues/673)) ([b4cc056](https://www.github.com/googleapis/java-spanner-jdbc/commit/b4cc0568e440b6a377cb4d8224c46057cd3ce1ee)) + +### [2.5.3](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.2...v2.5.3) (2021-11-15) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.5.0 ([#668](https://www.github.com/googleapis/java-spanner-jdbc/issues/668)) ([d453234](https://www.github.com/googleapis/java-spanner-jdbc/commit/d45323445d3e4a0753bed6cfe858fa891bca468e)) + +### [2.5.2](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.1...v2.5.2) (2021-11-11) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.15.2 ([#664](https://www.github.com/googleapis/java-spanner-jdbc/issues/664)) ([9f22c33](https://www.github.com/googleapis/java-spanner-jdbc/commit/9f22c331ee4c7340ed6f1b9f91a44ce1e4c5b792)) + +### [2.5.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.5.0...v2.5.1) (2021-10-27) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.15.1 ([#652](https://www.github.com/googleapis/java-spanner-jdbc/issues/652)) ([37d42d9](https://www.github.com/googleapis/java-spanner-jdbc/commit/37d42d91e49da9d30ca0d06b6a01bbe918fc3ab6)) + +## [2.5.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.4.5...v2.5.0) (2021-10-25) + + +### Features + +* support VIEW in metadata queries ([#633](https://www.github.com/googleapis/java-spanner-jdbc/issues/633)) ([b929191](https://www.github.com/googleapis/java-spanner-jdbc/commit/b929191a7b6699f9daf9a7c06097e9794c79ff8d)), closes [#632](https://www.github.com/googleapis/java-spanner-jdbc/issues/632) + + +### Bug Fixes + +* **java:** java 17 dependency arguments ([#1266](https://www.github.com/googleapis/java-spanner-jdbc/issues/1266)) ([#645](https://www.github.com/googleapis/java-spanner-jdbc/issues/645)) ([0474502](https://www.github.com/googleapis/java-spanner-jdbc/commit/0474502936ff1a43244fcb830fecfc5f42895899)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.4.0 ([#641](https://www.github.com/googleapis/java-spanner-jdbc/issues/641)) ([ab26010](https://www.github.com/googleapis/java-spanner-jdbc/commit/ab26010ba107d4ba9591d661743ad542ae3b227f)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.13.0 ([#637](https://www.github.com/googleapis/java-spanner-jdbc/issues/637)) ([d981c8c](https://www.github.com/googleapis/java-spanner-jdbc/commit/d981c8c744829aa039b16df0b150caf49a99f1cc)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.14.0 ([#647](https://www.github.com/googleapis/java-spanner-jdbc/issues/647)) ([3cda837](https://www.github.com/googleapis/java-spanner-jdbc/commit/3cda83737c25c8878d19bee1727de84e086065b6)) +* upgrade Mockito to support Java17 ([#635](https://www.github.com/googleapis/java-spanner-jdbc/issues/635)) ([d78792f](https://www.github.com/googleapis/java-spanner-jdbc/commit/d78792f7acdd2fdac8a655fba06789ba50457679)) + +### [2.4.5](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.4.4...v2.4.5) (2021-09-29) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.12.5 ([#622](https://www.github.com/googleapis/java-spanner-jdbc/issues/622)) ([b255c54](https://www.github.com/googleapis/java-spanner-jdbc/commit/b255c5434141a900dca30c1e5dbe465b10b88718)) + +### [2.4.4](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.4.3...v2.4.4) (2021-09-23) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.3.0 ([#614](https://www.github.com/googleapis/java-spanner-jdbc/issues/614)) ([259e395](https://www.github.com/googleapis/java-spanner-jdbc/commit/259e395a2bd1db967a5686b321e752fcab92b500)) + +### [2.4.3](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.4.2...v2.4.3) (2021-09-14) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.1 ([#606](https://www.github.com/googleapis/java-spanner-jdbc/issues/606)) ([36c1791](https://www.github.com/googleapis/java-spanner-jdbc/commit/36c17916e2891d6c13ea6437a328dae8e16ffc13)) + +### [2.4.2](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.4.1...v2.4.2) (2021-09-02) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.12.2 ([#591](https://www.github.com/googleapis/java-spanner-jdbc/issues/591)) ([0f39b23](https://www.github.com/googleapis/java-spanner-jdbc/commit/0f39b23d66c8d3fb9314affbb67a47c394db3b46)) + +### [2.4.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.4.0...v2.4.1) (2021-08-31) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.2.0 ([#586](https://www.github.com/googleapis/java-spanner-jdbc/issues/586)) ([5c681bf](https://www.github.com/googleapis/java-spanner-jdbc/commit/5c681bf4a1c46535759cf6a0820798141051c1d0)) + +## [2.4.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.3.5...v2.4.0) (2021-08-27) + + +### Features + +* support JSON data type ([#447](https://www.github.com/googleapis/java-spanner-jdbc/issues/447)) ([ca1c906](https://www.github.com/googleapis/java-spanner-jdbc/commit/ca1c906e1ed3cad6444068ab9c8465401d6d3074)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.12.1 ([#577](https://www.github.com/googleapis/java-spanner-jdbc/issues/577)) ([a78b177](https://www.github.com/googleapis/java-spanner-jdbc/commit/a78b177f97c298a5b43fcadbca125e957e9f781a)) + +### [2.3.5](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.3.4...v2.3.5) (2021-08-24) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.12.0 ([#568](https://www.github.com/googleapis/java-spanner-jdbc/issues/568)) ([c032204](https://www.github.com/googleapis/java-spanner-jdbc/commit/c032204445d61c60385232216cdb52c217a85725)) + +### [2.3.4](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.3.3...v2.3.4) (2021-08-23) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2.1.0 ([#563](https://www.github.com/googleapis/java-spanner-jdbc/issues/563)) ([b0959a4](https://www.github.com/googleapis/java-spanner-jdbc/commit/b0959a412bae1a8024de92d5f9699d49863fb088)) + +### [2.3.3](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.3.2...v2.3.3) (2021-08-19) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.11.1 ([#556](https://www.github.com/googleapis/java-spanner-jdbc/issues/556)) ([36f0d32](https://www.github.com/googleapis/java-spanner-jdbc/commit/36f0d32aec65f098f7091d44a0e4acc98104aeb9)) + +### [2.3.2](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.3.1...v2.3.2) (2021-08-12) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.11.0 ([#549](https://www.github.com/googleapis/java-spanner-jdbc/issues/549)) ([2639e40](https://www.github.com/googleapis/java-spanner-jdbc/commit/2639e40ebbde19966653f06f8b664106568f6bac)) + +### [2.3.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.3.0...v2.3.1) (2021-08-12) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.10.1 ([#538](https://www.github.com/googleapis/java-spanner-jdbc/issues/538)) ([75507c4](https://www.github.com/googleapis/java-spanner-jdbc/commit/75507c4a42c3b051e8a14c1233b3b2526c0d3ccc)) + +## [2.3.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.2.6...v2.3.0) (2021-08-11) + + +### Features + +* add support for tagging to JDBC connection ([#270](https://www.github.com/googleapis/java-spanner-jdbc/issues/270)) ([a4bd82c](https://www.github.com/googleapis/java-spanner-jdbc/commit/a4bd82c8e4ce8b7179b943ac06b049598276f1b4)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v2 ([#544](https://www.github.com/googleapis/java-spanner-jdbc/issues/544)) ([366430d](https://www.github.com/googleapis/java-spanner-jdbc/commit/366430dc270edd09de1a0749ba360f312897b1aa)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.10.0 ([#537](https://www.github.com/googleapis/java-spanner-jdbc/issues/537)) ([8655ae5](https://www.github.com/googleapis/java-spanner-jdbc/commit/8655ae5955f5385a9d6445e13264427d73c4d37e)) + +### [2.2.6](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.2.5...v2.2.6) (2021-07-06) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.9.1 ([#525](https://www.github.com/googleapis/java-spanner-jdbc/issues/525)) ([37023b8](https://www.github.com/googleapis/java-spanner-jdbc/commit/37023b8295c97304aa0a55a28f71905fcbf5b93f)) + +### [2.2.5](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.2.4...v2.2.5) (2021-07-06) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.9.0 ([#521](https://www.github.com/googleapis/java-spanner-jdbc/issues/521)) ([8d840ac](https://www.github.com/googleapis/java-spanner-jdbc/commit/8d840ac855f4466c1d53a3b38d964e213708e5e5)) + +### [2.2.4](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.2.3...v2.2.4) (2021-07-02) + + +### Bug Fixes + +* Add `shopt -s nullglob` to dependencies script ([#514](https://www.github.com/googleapis/java-spanner-jdbc/issues/514)) ([ae51b24](https://www.github.com/googleapis/java-spanner-jdbc/commit/ae51b241148606ffddeb0a703b853de67710e48b)) +* prevent relocating urls that start with com like /computeMetadata/ ([#511](https://www.github.com/googleapis/java-spanner-jdbc/issues/511)) ([1178a1d](https://www.github.com/googleapis/java-spanner-jdbc/commit/1178a1d35b4b0032acf71b3dbf862d4f9fb9399c)) +* Update dependencies.sh to not break on mac ([#506](https://www.github.com/googleapis/java-spanner-jdbc/issues/506)) ([e205c0c](https://www.github.com/googleapis/java-spanner-jdbc/commit/e205c0c8eba6ac23d747c433b42d8e2365528bd8)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.4.0 ([#518](https://www.github.com/googleapis/java-spanner-jdbc/issues/518)) ([045b858](https://www.github.com/googleapis/java-spanner-jdbc/commit/045b8586a7ca7b0e2bd341b27ca3e8a3530c992a)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.7.0 ([#513](https://www.github.com/googleapis/java-spanner-jdbc/issues/513)) ([e1affe3](https://www.github.com/googleapis/java-spanner-jdbc/commit/e1affe358a812a45b9d2c0c9ccd0b00e3aa3791e)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.8.0 ([#517](https://www.github.com/googleapis/java-spanner-jdbc/issues/517)) ([c9013ff](https://www.github.com/googleapis/java-spanner-jdbc/commit/c9013ff48269b158121e4c65c545be30752c31fb)) + +### [2.2.3](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.2.2...v2.2.3) (2021-06-15) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.6.1 ([#502](https://www.github.com/googleapis/java-spanner-jdbc/issues/502)) ([41a9cd4](https://www.github.com/googleapis/java-spanner-jdbc/commit/41a9cd49fed468f410ad226555f7b9ba46d857b3)) + +### [2.2.2](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.2.1...v2.2.2) (2021-06-10) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.6.0 ([#498](https://www.github.com/googleapis/java-spanner-jdbc/issues/498)) ([5849a97](https://www.github.com/googleapis/java-spanner-jdbc/commit/5849a970087d3fa1d1b42092b4568602563a1dbd)) + +### [2.2.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.2.0...v2.2.1) (2021-06-04) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.3.0 ([#490](https://www.github.com/googleapis/java-spanner-jdbc/issues/490)) ([bf0c9d6](https://www.github.com/googleapis/java-spanner-jdbc/commit/bf0c9d6bf612b50a59ea2d530430ccace79aaf35)) + +## [2.2.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.1.0...v2.2.0) (2021-05-26) + + +### Features + +* add `gcf-owl-bot[bot]` to `ignoreAuthors` ([#474](https://www.github.com/googleapis/java-spanner-jdbc/issues/474)) ([c14f17b](https://www.github.com/googleapis/java-spanner-jdbc/commit/c14f17b411b15e778a68ce998de04732b159d7ac)) + + +### Documentation + +* document connection properties in README ([#478](https://www.github.com/googleapis/java-spanner-jdbc/issues/478)) ([3ccc543](https://www.github.com/googleapis/java-spanner-jdbc/commit/3ccc5433bec261b18d2536b04590e7645e47ed9b)), closes [#456](https://www.github.com/googleapis/java-spanner-jdbc/issues/456) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.2.0 ([#473](https://www.github.com/googleapis/java-spanner-jdbc/issues/473)) ([a6cc069](https://www.github.com/googleapis/java-spanner-jdbc/commit/a6cc0697ed5916c665f007a1bf16660b8b91f9f9)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.5.0 ([#483](https://www.github.com/googleapis/java-spanner-jdbc/issues/483)) ([e7fec30](https://www.github.com/googleapis/java-spanner-jdbc/commit/e7fec30f2f2c5518821d5348d448f102301d65c3)) + +## [2.1.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.0.2...v2.1.0) (2021-05-18) + + +### Features + +* allow get/set Spanner Value instances ([#454](https://www.github.com/googleapis/java-spanner-jdbc/issues/454)) ([d6935b8](https://www.github.com/googleapis/java-spanner-jdbc/commit/d6935b863349c58cfdd44d6ce20dba6f5dbc1472)), closes [#452](https://www.github.com/googleapis/java-spanner-jdbc/issues/452) + + +### Bug Fixes + +* NPE was thrown when getting an array of structs from a ResultSet ([#445](https://www.github.com/googleapis/java-spanner-jdbc/issues/445)) ([1dfb37b](https://www.github.com/googleapis/java-spanner-jdbc/commit/1dfb37b27ee661718fe80be0bf260c40f4b15582)), closes [#444](https://www.github.com/googleapis/java-spanner-jdbc/issues/444) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1.1.0 ([#463](https://www.github.com/googleapis/java-spanner-jdbc/issues/463)) ([f148c71](https://www.github.com/googleapis/java-spanner-jdbc/commit/f148c71bef2b762d7b4475ba7f28443c7938c394)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.4.0 ([#453](https://www.github.com/googleapis/java-spanner-jdbc/issues/453)) ([7dac8b3](https://www.github.com/googleapis/java-spanner-jdbc/commit/7dac8b3e43625aa28be214bd735fc3386770de04)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.4.4 ([#464](https://www.github.com/googleapis/java-spanner-jdbc/issues/464)) ([eeb31c0](https://www.github.com/googleapis/java-spanner-jdbc/commit/eeb31c050fda116203d9da5c4a80c7f1c6a6cac4)) + +### [2.0.2](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.0.1...v2.0.2) (2021-04-26) + + +### Bug Fixes + +* release scripts from issuing overlapping phases ([#434](https://www.github.com/googleapis/java-spanner-jdbc/issues/434)) ([b2eec0f](https://www.github.com/googleapis/java-spanner-jdbc/commit/b2eec0f079e64f5c21b89bbc0b02e3e981d6469a)) +* typo ([#431](https://www.github.com/googleapis/java-spanner-jdbc/issues/431)) ([a0b158b](https://www.github.com/googleapis/java-spanner-jdbc/commit/a0b158bf9931d610779dec51ca61107078e9398e)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.1 ([#438](https://www.github.com/googleapis/java-spanner-jdbc/issues/438)) ([aa56b5c](https://www.github.com/googleapis/java-spanner-jdbc/commit/aa56b5c1d5e3b1ccdaa0d5b877deccbda5aa0061)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v1 ([#441](https://www.github.com/googleapis/java-spanner-jdbc/issues/441)) ([df7f0e7](https://www.github.com/googleapis/java-spanner-jdbc/commit/df7f0e796c03f9607e57b4b6ba999c92ea14c58d)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.2.1 ([#430](https://www.github.com/googleapis/java-spanner-jdbc/issues/430)) ([212d9d0](https://www.github.com/googleapis/java-spanner-jdbc/commit/212d9d05c4f28ade71ab5484792188b11a5bcd8b)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.3.3 ([#439](https://www.github.com/googleapis/java-spanner-jdbc/issues/439)) ([a128c4c](https://www.github.com/googleapis/java-spanner-jdbc/commit/a128c4cbe0e6b66f9276b71f7733a46645186e88)) + +### [2.0.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v2.0.0...v2.0.1) (2021-04-13) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.21.0 ([#423](https://www.github.com/googleapis/java-spanner-jdbc/issues/423)) ([e0cf14a](https://www.github.com/googleapis/java-spanner-jdbc/commit/e0cf14a4dd087532924f49bb8e0431e1d681c7e8)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6.2.0 ([#420](https://www.github.com/googleapis/java-spanner-jdbc/issues/420)) ([fdd8809](https://www.github.com/googleapis/java-spanner-jdbc/commit/fdd880943394e4760c26eadc3a87d5a298591eb1)) + +## [2.0.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.21.0...v2.0.0) (2021-03-24) + + +### ⚠ BREAKING CHANGES + +* upgrade to Java 8 and JDBC 4.2 (#397) + +### Features + +* upgrade to Java 8 and JDBC 4.2 ([#397](https://www.github.com/googleapis/java-spanner-jdbc/issues/397)) ([7eedfbc](https://www.github.com/googleapis/java-spanner-jdbc/commit/7eedfbc78dad15e598d4b678027094ce1467e7f1)) + + +### Performance Improvements + +* use PLAN mode to get result metadata ([#388](https://www.github.com/googleapis/java-spanner-jdbc/issues/388)) ([8c7b665](https://www.github.com/googleapis/java-spanner-jdbc/commit/8c7b665c0c16dbec65da5040da038a320efa0a4a)) + + +### Documentation + +* add reference to jar-with-dependencies in readme ([#404](https://www.github.com/googleapis/java-spanner-jdbc/issues/404)) ([12c3235](https://www.github.com/googleapis/java-spanner-jdbc/commit/12c3235f4799cd2e74d523d1149c55573437c7ad)), closes [#399](https://www.github.com/googleapis/java-spanner-jdbc/issues/399) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-spanner-bom to v5.1.0 ([#393](https://www.github.com/googleapis/java-spanner-jdbc/issues/393)) ([9b36a54](https://www.github.com/googleapis/java-spanner-jdbc/commit/9b36a546963b044fc9eaf60667ab013afca6bc54)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v5.2.0 ([#398](https://www.github.com/googleapis/java-spanner-jdbc/issues/398)) ([8482652](https://www.github.com/googleapis/java-spanner-jdbc/commit/8482652e6d8933903ab8ccaece8bbe3224d080b5)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v6 ([#403](https://www.github.com/googleapis/java-spanner-jdbc/issues/403)) ([3e0fbd1](https://www.github.com/googleapis/java-spanner-jdbc/commit/3e0fbd1706fc269cd7bfcd2258181487cc40cece)) + +## [1.21.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.20.1...v1.21.0) (2021-03-10) + + +### Features + +* add support for CommitStats ([#261](https://www.github.com/googleapis/java-spanner-jdbc/issues/261)) ([b32e7ae](https://www.github.com/googleapis/java-spanner-jdbc/commit/b32e7aebd4c8d24d052e4616b5dd7735878e01c3)) +* allow using UUID in PreparedStatement ([#365](https://www.github.com/googleapis/java-spanner-jdbc/issues/365)) ([4cbee6d](https://www.github.com/googleapis/java-spanner-jdbc/commit/4cbee6dcc2dfea2515437b55b0ecfc956205d739)), closes [#364](https://www.github.com/googleapis/java-spanner-jdbc/issues/364) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.1 ([#384](https://www.github.com/googleapis/java-spanner-jdbc/issues/384)) ([f0cdf11](https://www.github.com/googleapis/java-spanner-jdbc/commit/f0cdf117e20325601f1d1c13641267add5b39955)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v5 ([#386](https://www.github.com/googleapis/java-spanner-jdbc/issues/386)) ([910c50c](https://www.github.com/googleapis/java-spanner-jdbc/commit/910c50c611cb30b96a2d5d0472afd2d8e3687013)) + +### [1.20.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.20.0...v1.20.1) (2021-02-26) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.20.0 ([#374](https://www.github.com/googleapis/java-spanner-jdbc/issues/374)) ([398d886](https://www.github.com/googleapis/java-spanner-jdbc/commit/398d8864a72029a62b0a3adfaeafaeae76af3e1a)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v4.0.2 ([#369](https://www.github.com/googleapis/java-spanner-jdbc/issues/369)) ([3a984c2](https://www.github.com/googleapis/java-spanner-jdbc/commit/3a984c26076f187a710b59c8487e330636319f7c)) + +## [1.20.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.19.0...v1.20.0) (2021-02-23) + + +### Features + +* allow setting min/max sessions ([#335](https://www.github.com/googleapis/java-spanner-jdbc/issues/335)) ([a5862a5](https://www.github.com/googleapis/java-spanner-jdbc/commit/a5862a5572721fc898cf9b5f4ab8b99631848110)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.19.0 ([fa3721b](https://www.github.com/googleapis/java-spanner-jdbc/commit/fa3721b5d42e0f71247937366f1fa9e3a454beef)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v4 ([#359](https://www.github.com/googleapis/java-spanner-jdbc/issues/359)) ([49aa337](https://www.github.com/googleapis/java-spanner-jdbc/commit/49aa337dae08c4c39a7c32c14f92a7c858ad3dc8)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v4.0.1 ([#366](https://www.github.com/googleapis/java-spanner-jdbc/issues/366)) ([fa3721b](https://www.github.com/googleapis/java-spanner-jdbc/commit/fa3721b5d42e0f71247937366f1fa9e3a454beef)) + +## [1.19.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.18.3...v1.19.0) (2021-02-15) + + +### Features + +* allow unknown properties in connection url with lenient mode ([#284](https://www.github.com/googleapis/java-spanner-jdbc/issues/284)) ([0e557ef](https://www.github.com/googleapis/java-spanner-jdbc/commit/0e557ef7657cae04d263daa6717ee34290338b7a)) +* Support Array conversion to ResultSet ([#326](https://www.github.com/googleapis/java-spanner-jdbc/issues/326)) ([6ea0a26](https://www.github.com/googleapis/java-spanner-jdbc/commit/6ea0a26ca82565858d8049cc5403a4475edcce33)) +* support creating shaded jars ([#333](https://www.github.com/googleapis/java-spanner-jdbc/issues/333)) ([8b4e50d](https://www.github.com/googleapis/java-spanner-jdbc/commit/8b4e50d10a9121334be3d8b5ed0d8fc9ff63c182)), closes [#316](https://www.github.com/googleapis/java-spanner-jdbc/issues/316) +* support default ClientInfo properties ([#324](https://www.github.com/googleapis/java-spanner-jdbc/issues/324)) ([250c4c1](https://www.github.com/googleapis/java-spanner-jdbc/commit/250c4c127f75cc4979e511e2459813f22fec67de)) + + +### Bug Fixes + +* getting resultset metadata twice could skip row ([#323](https://www.github.com/googleapis/java-spanner-jdbc/issues/323)) ([f8149af](https://www.github.com/googleapis/java-spanner-jdbc/commit/f8149afb63b9a66e89119290c594b50e599f351a)) +* Return entire stack trace for deadline exceeded error ([#347](https://www.github.com/googleapis/java-spanner-jdbc/issues/347)) ([2f94976](https://www.github.com/googleapis/java-spanner-jdbc/commit/2f94976514bfd08afaacc25e802ef1c9717aa75a)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.18.0 ([#320](https://www.github.com/googleapis/java-spanner-jdbc/issues/320)) ([e1cd90d](https://www.github.com/googleapis/java-spanner-jdbc/commit/e1cd90d8afbfa725a92186b85bd446413c8ed4bc)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v3.3.1 ([#319](https://www.github.com/googleapis/java-spanner-jdbc/issues/319)) ([7cd990b](https://www.github.com/googleapis/java-spanner-jdbc/commit/7cd990b5ba49f05fba4b1a1ce49f8de133b04868)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v3.3.2 ([#325](https://www.github.com/googleapis/java-spanner-jdbc/issues/325)) ([9d65dab](https://www.github.com/googleapis/java-spanner-jdbc/commit/9d65dab248efb5c8e8c5ad56775731891e225b3e)) + + +### Documentation + +* fix javadoc formatting ([#343](https://www.github.com/googleapis/java-spanner-jdbc/issues/343)) ([2ac1964](https://www.github.com/googleapis/java-spanner-jdbc/commit/2ac19641d9496eca33f57a034367a4f17bc14f1c)) + +### [1.18.3](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.18.2...v1.18.3) (2020-12-16) + + +### Dependencies + +* update spanner to 3.1.2 ([#306](https://www.github.com/googleapis/java-spanner-jdbc/issues/306)) ([596e8ed](https://www.github.com/googleapis/java-spanner-jdbc/commit/596e8ed01dc8ffc01c37b233f688d163b8693f85)) + +### [1.18.2](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.18.1...v1.18.2) (2020-12-16) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.17.0 ([#302](https://www.github.com/googleapis/java-spanner-jdbc/issues/302)) ([9a2efa1](https://www.github.com/googleapis/java-spanner-jdbc/commit/9a2efa14ad402130ca542d5b8b9f9bbb58587404)) + +### [1.18.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.18.0...v1.18.1) (2020-12-14) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.1 ([5d5e5fc](https://www.github.com/googleapis/java-spanner-jdbc/commit/5d5e5fccef229e4edd9d34a93553a85a1e97b14f)) + +## [1.18.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.17.3...v1.18.0) (2020-12-10) + + +### Features + +* expose more methods from Connection in JDBC ([#255](https://www.github.com/googleapis/java-spanner-jdbc/issues/255)) ([697837c](https://www.github.com/googleapis/java-spanner-jdbc/commit/697837ce0ce646a9ca45a0afc3a2e1e368c712f7)), closes [#253](https://www.github.com/googleapis/java-spanner-jdbc/issues/253) +* report whether column is generated in JDBC metadata ([#291](https://www.github.com/googleapis/java-spanner-jdbc/issues/291)) ([9aa9a1f](https://www.github.com/googleapis/java-spanner-jdbc/commit/9aa9a1f8f673554ae71e78937007166f220dd255)), closes [#290](https://www.github.com/googleapis/java-spanner-jdbc/issues/290) + + +### Documentation + +* add connection example to readme ([#281](https://www.github.com/googleapis/java-spanner-jdbc/issues/281)) ([00314e6](https://www.github.com/googleapis/java-spanner-jdbc/commit/00314e643ee6570ed6025630616ad0df70789447)) +* fix product docs link ([#282](https://www.github.com/googleapis/java-spanner-jdbc/issues/282)) ([0065a9b](https://www.github.com/googleapis/java-spanner-jdbc/commit/0065a9b319b09e71bf285f85c33514442a163dea)) + + +### Dependencies + +* do not re-declare grpc dependencies as test dependencies ([#278](https://www.github.com/googleapis/java-spanner-jdbc/issues/278)) ([4bc59f8](https://www.github.com/googleapis/java-spanner-jdbc/commit/4bc59f8d7f27cee0bbc54b91271e2aadd7cb31da)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.16.0 ([#286](https://www.github.com/googleapis/java-spanner-jdbc/issues/286)) ([2d804f5](https://www.github.com/googleapis/java-spanner-jdbc/commit/2d804f5b52271356598588764c77d1a13f3b7183)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v3 ([#260](https://www.github.com/googleapis/java-spanner-jdbc/issues/260)) ([40cdbc0](https://www.github.com/googleapis/java-spanner-jdbc/commit/40cdbc01c91c153c8c3fd36cf7bf91d80b187f03)) +* update dependency com.google.cloud:google-cloud-spanner-bom to v3.0.5 ([#287](https://www.github.com/googleapis/java-spanner-jdbc/issues/287)) ([9cef4d5](https://www.github.com/googleapis/java-spanner-jdbc/commit/9cef4d57f6b63caba71ba77160677f73569a8fea)) + +### [1.17.3](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.17.2...v1.17.3) (2020-11-17) + + +### Documentation + +* add package-info to jdbc ([#264](https://www.github.com/googleapis/java-spanner-jdbc/issues/264)) ([a2d26a1](https://www.github.com/googleapis/java-spanner-jdbc/commit/a2d26a1a9d1595c5a4d766419d3f46619d8d6c71)) +* add simple connection sample to readme ([#263](https://www.github.com/googleapis/java-spanner-jdbc/issues/263)) ([3a305ba](https://www.github.com/googleapis/java-spanner-jdbc/commit/3a305ba00b9739ceb17c879f82319b0a6b2a3f9f)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.15.0 ([#237](https://www.github.com/googleapis/java-spanner-jdbc/issues/237)) ([6487a24](https://www.github.com/googleapis/java-spanner-jdbc/commit/6487a24def74e630ee43ec058a267f8d0889c336)) +* use google-cloud-spanner-bom ([#258](https://www.github.com/googleapis/java-spanner-jdbc/issues/258)) ([c9906c9](https://www.github.com/googleapis/java-spanner-jdbc/commit/c9906c9440d1574ae74d735aad8b3c255704d59d)) + +### [1.17.2](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.17.1...v1.17.2) (2020-10-29) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.10.0 ([#219](https://www.github.com/googleapis/java-spanner-jdbc/issues/219)) ([0ab5c5b](https://www.github.com/googleapis/java-spanner-jdbc/commit/0ab5c5b5bee3324cb641f0505068ff99bf3d204d)) +* update dependency junit:junit to v4.13.1 ([#232](https://www.github.com/googleapis/java-spanner-jdbc/issues/232)) ([a6c09d7](https://www.github.com/googleapis/java-spanner-jdbc/commit/a6c09d73d707bece320f59dcab98dfde6802a5b3)) + +### [1.17.1](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.17.0...v1.17.1) (2020-09-21) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.9.0 ([#199](https://www.github.com/googleapis/java-spanner-jdbc/issues/199)) ([59a7d07](https://www.github.com/googleapis/java-spanner-jdbc/commit/59a7d07c284210033bd1d587b09c44d9c271a52e)) + +## [1.17.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.16.0...v1.17.0) (2020-09-04) + + +### Features + +* add support for NUMERIC type ([#185](https://www.github.com/googleapis/java-spanner-jdbc/issues/185)) ([4579249](https://www.github.com/googleapis/java-spanner-jdbc/commit/457924980ab0f10fcbb61a0cf1442069f4d0b8b4)) + + +### Dependencies + +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.3 ([#180](https://www.github.com/googleapis/java-spanner-jdbc/issues/180)) ([b446d48](https://www.github.com/googleapis/java-spanner-jdbc/commit/b446d48e40973ef03ec1d3c470a338c371b967a1)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.4 ([#187](https://www.github.com/googleapis/java-spanner-jdbc/issues/187)) ([ddb96f2](https://www.github.com/googleapis/java-spanner-jdbc/commit/ddb96f2424c11d0cde3a4b702a1e3599c0489e96)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.6 ([#189](https://www.github.com/googleapis/java-spanner-jdbc/issues/189)) ([2259332](https://www.github.com/googleapis/java-spanner-jdbc/commit/2259332c7657cd160aef889f88649713dd2fe61e)) + +## [1.16.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.15.0...v1.16.0) (2020-07-08) + + +### Features + +* publish shaded jar on maven central ([#83](https://www.github.com/googleapis/java-spanner-jdbc/issues/83)) ([2a7c53e](https://www.github.com/googleapis/java-spanner-jdbc/commit/2a7c53e5d503eefc42e7927e58430f8d24fe5b48)) +* **deps:** adopt flatten plugin and google-cloud-shared-dependencies ([#162](https://www.github.com/googleapis/java-spanner-jdbc/issues/162)) ([6715a8b](https://www.github.com/googleapis/java-spanner-jdbc/commit/6715a8b24066595036c6228df18cca084a4bb1ad)) + + +### Bug Fixes + +* add missing documentation for connection properties ([#155](https://www.github.com/googleapis/java-spanner-jdbc/issues/155)) ([9b53df4](https://www.github.com/googleapis/java-spanner-jdbc/commit/9b53df4ea33926d9fa0955a4445c2ea6790ac3dc)), closes [#152](https://www.github.com/googleapis/java-spanner-jdbc/issues/152) +* ResultSet#get(...) methods should auto convert values ([#143](https://www.github.com/googleapis/java-spanner-jdbc/issues/143)) ([bc7d5bd](https://www.github.com/googleapis/java-spanner-jdbc/commit/bc7d5bd6205b23c99d01d2895ffb5c48ba423ea3)) +* return empty catalog name ([#174](https://www.github.com/googleapis/java-spanner-jdbc/issues/174)) ([cedd167](https://www.github.com/googleapis/java-spanner-jdbc/commit/cedd167c5973fe50e0205ae641f6580ebd627884)) +* test allowed a too old staleness ([#131](https://www.github.com/googleapis/java-spanner-jdbc/issues/131)) ([8a5e443](https://www.github.com/googleapis/java-spanner-jdbc/commit/8a5e44321b6587e1f719f4189dfe2af3482e47cc)) + + +### Dependencies + +* update core dependencies ([#105](https://www.github.com/googleapis/java-spanner-jdbc/issues/105)) ([d7c7095](https://www.github.com/googleapis/java-spanner-jdbc/commit/d7c7095e0f22cd477f56419e8300d67d48eb8484)) +* update core dependencies to v1.29.0 ([#121](https://www.github.com/googleapis/java-spanner-jdbc/issues/121)) ([1324769](https://www.github.com/googleapis/java-spanner-jdbc/commit/13247691db249a6bdd56ac1f5b03837ebfb0624f)) +* update core dependencies to v1.93.4 ([#111](https://www.github.com/googleapis/java-spanner-jdbc/issues/111)) ([a44b498](https://www.github.com/googleapis/java-spanner-jdbc/commit/a44b498be79189aa3ae2f9a32c4105a41d81922b)) +* update core dependencies to v29 (major) ([#114](https://www.github.com/googleapis/java-spanner-jdbc/issues/114)) ([143e6b6](https://www.github.com/googleapis/java-spanner-jdbc/commit/143e6b645fd09b91e9b3d8d1db2e522a04103c1e)) +* update dependency com.google.api:api-common to v1.9.0 ([#100](https://www.github.com/googleapis/java-spanner-jdbc/issues/100)) ([dc0793c](https://www.github.com/googleapis/java-spanner-jdbc/commit/dc0793ce0ea3ec1faaebcf59989e0b0977deffcf)) +* update dependency com.google.api.grpc:proto-google-common-protos to v1.18.0 ([#128](https://www.github.com/googleapis/java-spanner-jdbc/issues/128)) ([3f00adb](https://www.github.com/googleapis/java-spanner-jdbc/commit/3f00adbe0d1d317dfefe5ec3ee5a0be9fe0f5923)) +* update dependency com.google.cloud:google-cloud-shared-dependencies to v0.8.2 ([#175](https://www.github.com/googleapis/java-spanner-jdbc/issues/175)) ([a553f79](https://www.github.com/googleapis/java-spanner-jdbc/commit/a553f7919314152583b6aae9a98a450d3c50a8fc)) +* update dependency com.google.cloud.samples:shared-configuration to v1.0.13 ([#99](https://www.github.com/googleapis/java-spanner-jdbc/issues/99)) ([63717c2](https://www.github.com/googleapis/java-spanner-jdbc/commit/63717c2fc9cc2b3a43a6b0412fefca7bbfd7e12d)) +* update dependency org.threeten:threetenbp to v1.4.3 ([#94](https://www.github.com/googleapis/java-spanner-jdbc/issues/94)) ([cb7229f](https://www.github.com/googleapis/java-spanner-jdbc/commit/cb7229fbbf3e1d71b8a4331eb5ab889af5d4fd31)) +* update dependency org.threeten:threetenbp to v1.4.4 ([#124](https://www.github.com/googleapis/java-spanner-jdbc/issues/124)) ([4d3daa4](https://www.github.com/googleapis/java-spanner-jdbc/commit/4d3daa484394f790e07557175aa7311b248da6f7)) +* upgrade to latest bom and remove dependency exclusions ([#168](https://www.github.com/googleapis/java-spanner-jdbc/issues/168)) ([291189c](https://www.github.com/googleapis/java-spanner-jdbc/commit/291189cec8e9d166fb8df9d26a6381be9cbded9d)) + +## [1.15.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.14.0...v1.15.0) (2020-03-24) + + +### Features + +* add support for QueryOptions ([#76](https://www.github.com/googleapis/java-spanner-jdbc/issues/76)) ([b3f4cf7](https://www.github.com/googleapis/java-spanner-jdbc/commit/b3f4cf7852a2fd5f22660cc3f25a6253b9a118ab)) + + +### Dependencies + +* update spanner.version to v1.52.0 ([#95](https://www.github.com/googleapis/java-spanner-jdbc/issues/95)) ([cdf9d30](https://www.github.com/googleapis/java-spanner-jdbc/commit/cdf9d30e8ca387d87a6ffe00fa09818d135547f4)) + +## [1.14.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/v1.13.0...v1.14.0) (2020-03-18) + + +### Features + +* add support for foreign keys ([#78](https://www.github.com/googleapis/java-spanner-jdbc/issues/78)) ([9e770f2](https://www.github.com/googleapis/java-spanner-jdbc/commit/9e770f281c03a1e9c034e5ff3ddee44fa20a7b30)), closes [#77](https://www.github.com/googleapis/java-spanner-jdbc/issues/77) + + +### Bug Fixes + +* add missing netty-shaded lib for über-jar ([#80](https://www.github.com/googleapis/java-spanner-jdbc/issues/80)) ([3d6f356](https://www.github.com/googleapis/java-spanner-jdbc/commit/3d6f35669671194e6772fe327ce48f27e5bf4643)) +* fix deprecation warnings in JDBC (test) files ([#81](https://www.github.com/googleapis/java-spanner-jdbc/issues/81)) ([a5e031d](https://www.github.com/googleapis/java-spanner-jdbc/commit/a5e031d3183f8fe88a621500f235ca2b0242f50b)) +* include Spanner gRPC test dependencies ([#63](https://www.github.com/googleapis/java-spanner-jdbc/issues/63)) ([a34bfc0](https://www.github.com/googleapis/java-spanner-jdbc/commit/a34bfc0ff1c2ddeef077dbfae4c56bdd53febcb2)) + + +### Dependencies + +* update core dependencies ([1ae098e](https://www.github.com/googleapis/java-spanner-jdbc/commit/1ae098e924c2a488cfddd0a3aee9511274b7a515)) +* update core dependencies ([#40](https://www.github.com/googleapis/java-spanner-jdbc/issues/40)) ([18c3a1b](https://www.github.com/googleapis/java-spanner-jdbc/commit/18c3a1b069cb507a91d0320e64a8bf8ae8efe394)) +* update core dependencies ([#73](https://www.github.com/googleapis/java-spanner-jdbc/issues/73)) ([cfa1539](https://www.github.com/googleapis/java-spanner-jdbc/commit/cfa153997599c36f1243e87f1ea0760694657dfe)) +* update core dependencies to v1.27.1 ([#61](https://www.github.com/googleapis/java-spanner-jdbc/issues/61)) ([181991b](https://www.github.com/googleapis/java-spanner-jdbc/commit/181991bda1f66de707d27dad9658b9177626595a)) +* update core dependencies to v1.27.2 ([#71](https://www.github.com/googleapis/java-spanner-jdbc/issues/71)) ([12425fc](https://www.github.com/googleapis/java-spanner-jdbc/commit/12425fcb4382449e4a7a0edad4c812b7ce15aa71)) +* update core dependencies to v1.54.0 ([#72](https://www.github.com/googleapis/java-spanner-jdbc/issues/72)) ([5676021](https://www.github.com/googleapis/java-spanner-jdbc/commit/567602177e05fa198eaa011fbca05cfe4b72fb13)) +* update core dependencies to v1.92.5 ([#53](https://www.github.com/googleapis/java-spanner-jdbc/issues/53)) ([604ee2b](https://www.github.com/googleapis/java-spanner-jdbc/commit/604ee2b75204ad52eaf724c3fb71e8c13540af7c)) +* update core transport dependencies to v1.34.1 ([#43](https://www.github.com/googleapis/java-spanner-jdbc/issues/43)) ([2b6f04d](https://www.github.com/googleapis/java-spanner-jdbc/commit/2b6f04da3aeebac778fb664c4564fb8b58bf3be4)) +* update core transport dependencies to v1.34.2 ([#62](https://www.github.com/googleapis/java-spanner-jdbc/issues/62)) ([8739015](https://www.github.com/googleapis/java-spanner-jdbc/commit/8739015f62289adb92fd55b19a5bff8762da20a9)) +* update dependency com.google.api-client:google-api-client-bom to v1.30.8 ([#46](https://www.github.com/googleapis/java-spanner-jdbc/issues/46)) ([ef891b0](https://www.github.com/googleapis/java-spanner-jdbc/commit/ef891b000045d1f39f91b6a0ed3abaab19c5f05e)) +* update dependency com.google.api-client:google-api-client-bom to v1.30.9 ([#74](https://www.github.com/googleapis/java-spanner-jdbc/issues/74)) ([3b62299](https://www.github.com/googleapis/java-spanner-jdbc/commit/3b622999b9f9645a6086e5efd3206f4d7b0806bc)) +* update dependency com.google.truth:truth to v1.0.1 ([#32](https://www.github.com/googleapis/java-spanner-jdbc/issues/32)) ([5205863](https://www.github.com/googleapis/java-spanner-jdbc/commit/52058636e10951e883523204f0f161db8a972d62)) +* update protobuf.version to v3.11.3 ([#48](https://www.github.com/googleapis/java-spanner-jdbc/issues/48)) ([0779fcb](https://www.github.com/googleapis/java-spanner-jdbc/commit/0779fcb0bfe935c3c302fa8442f733c7e3629761)) +* update protobuf.version to v3.11.4 ([#64](https://www.github.com/googleapis/java-spanner-jdbc/issues/64)) ([f485cff](https://www.github.com/googleapis/java-spanner-jdbc/commit/f485cfffa0de27ce35f5d16c689c31c6ea22138e)) +* update spanner.version to v1.51.0 ([#75](https://www.github.com/googleapis/java-spanner-jdbc/issues/75)) ([4fff168](https://www.github.com/googleapis/java-spanner-jdbc/commit/4fff168eae61fb55933cf3afd67f24ca65dfde54)) + +## [1.13.0](https://www.github.com/googleapis/java-spanner-jdbc/compare/1.12.0...v1.13.0) (2020-01-28) + + +### Features + +* allow using existing OAuth token for JDBC connection ([#37](https://www.github.com/googleapis/java-spanner-jdbc/issues/37)) ([b368b84](https://www.github.com/googleapis/java-spanner-jdbc/commit/b368b8407b3e2884458d956a9207cb4e12e37848)), closes [#29](https://www.github.com/googleapis/java-spanner-jdbc/issues/29) + + +### Bug Fixes + +* add support for WITH clauses ([#42](https://www.github.com/googleapis/java-spanner-jdbc/issues/42)) ([7f4bea4](https://www.github.com/googleapis/java-spanner-jdbc/commit/7f4bea43c42df68258f776944ea744069a1a218e)) +* allow dots and colons in project id ([#36](https://www.github.com/googleapis/java-spanner-jdbc/issues/36)) ([5957008](https://www.github.com/googleapis/java-spanner-jdbc/commit/59570085403fa7002616dd535df4666a384c3438)), closes [#33](https://www.github.com/googleapis/java-spanner-jdbc/issues/33) + + +### Dependencies + +* update core dependencies ([#25](https://www.github.com/googleapis/java-spanner-jdbc/issues/25)) ([9f4f4ad](https://www.github.com/googleapis/java-spanner-jdbc/commit/9f4f4ad1b076bd3131296c9f7f6558f2bc885d42)) +* update dependency org.threeten:threetenbp to v1.4.1 ([7cc951b](https://www.github.com/googleapis/java-spanner-jdbc/commit/7cc951b340b072e7853df868aaf7c17f854a69f5)) diff --git a/java-spanner-jdbc/README.md b/java-spanner-jdbc/README.md new file mode 100644 index 000000000000..874179672ab0 --- /dev/null +++ b/java-spanner-jdbc/README.md @@ -0,0 +1,273 @@ +# Google Google Cloud Spanner JDBC Client for Java + +Java idiomatic client for [Google Cloud Spanner JDBC][product-docs]. + +[![Maven][maven-version-image]][maven-version-link] +![Stability][stability-image] + +- [Product Documentation][product-docs] +- [Client Library Documentation][javadocs] + + +## Quickstart + + +If you are using Maven, add this to your pom.xml file: + + + +```xml + + com.google.cloud + google-cloud-spanner-jdbc + 2.35.4 + +``` + + + +If you are using Gradle without BOM, add this to your dependencies + + +```Groovy +implementation 'com.google.cloud:google-cloud-spanner-jdbc:2.35.4' +``` + + +If you are using SBT, add this to your dependencies + + +```Scala +libraryDependencies += "com.google.cloud" % "google-cloud-spanner-jdbc" % "2.35.4" +``` + + +## Authentication + +See the [Authentication][authentication] section in the base directory's README. + +## Authorization + +The client application making API calls must be granted [authorization scopes][auth-scopes] required for the desired Google Cloud Spanner JDBC APIs, and the authenticated principal must have the [IAM role(s)][predefined-iam-roles] required to access GCP resources using the Google Cloud Spanner JDBC API calls. + +## Getting Started + +### Prerequisites + +You will need a [Google Cloud Platform Console][developer-console] project with the Google Cloud Spanner JDBC [API enabled][enable-api]. + +[Follow these instructions][create-project] to get your project set up. You will also need to set up the local development environment by +[installing the Google Cloud SDK][cloud-sdk] and running the following commands in command line: +`gcloud auth login` and `gcloud config set project [YOUR PROJECT ID]`. + +### Installation and setup + +You'll need to obtain the `google-cloud-spanner-jdbc` library. See the [Quickstart](#quickstart) section +to add `google-cloud-spanner-jdbc` as a dependency in your code. + +## About Google Cloud Spanner JDBC + + +[Google Cloud Spanner JDBC][product-docs] + +See the [Google Cloud Spanner JDBC client library docs][javadocs] to learn how to +use this Google Cloud Spanner JDBC Client Library. + + +### Creating a JDBC Connection + +The following example shows how to create a JDBC connection to Cloud Spanner and execute a simple query. + +```java +String projectId = "my-project"; +String instanceId = "my-instance"; +String databaseId = "my-database"; + +try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + projectId, instanceId, databaseId))) { + try (Statement statement = connection.createStatement()) { + try (ResultSet rs = statement.executeQuery("SELECT CURRENT_TIMESTAMP()")) { + while (rs.next()) { + System.out.printf( + "Connected to Cloud Spanner at [%s]%n", rs.getTimestamp(1).toString()); + } + } + } +} +``` + +### Connection URL Properties + +The Cloud Spanner JDBC driver supports the following connection URL properties. Note that all of +these can also be supplied in a Properties instance that is passed to the +`DriverManager#getConnection(String url, Properties properties)` method. + +See [Supported Connection Properties](documentation/connection_properties.md) for a full list of all +supported connection properties. + +#### Commonly Used Properties +- default_isolation_level (String): Spanner supports isolation levels REPEATABLE_READ or SERIALIZABLE. SERIALIZABLE is the default. Using isolation level REPEATABLE_READ improves performance by reducing the amount of locks that are taken by transactions that execute a large number of queries in read/write transactions. See https://cloud.google.com/spanner/docs/isolation-levels for more information on the supported isolation levels in Spanner. +- credentials (String): URL for the credentials file to use for the connection. If you do not specify any credentials at all, the default credentials of the environment as returned by `GoogleCredentials#getApplicationDefault()` is used. Example: `jdbc:cloudspanner:/projects/my-project/instances/my-instance/databases/my-db;credentials=/path/to/credentials.json` +- autocommit (boolean): Sets the initial autocommit mode for the connection. Default is true. +- readonly (boolean): Sets the initial readonly mode for the connection. Default is false. +- autoConfigEmulator (boolean): Automatically configure the connection to try to connect to the Cloud Spanner emulator. You do not need to specify any host or port in the connection string as long as the emulator is running on the default host/port (localhost:9010). The instance and database in the connection string will automatically be created if these do not yet exist on the emulator. This means that you do not need to execute any `gcloud` commands on the emulator to create the instance and database before you can connect to it. Example: `jdbc:cloudspanner:/projects/test-project/instances/test-instance/databases/test-db;autoConfigEmulator=true` +- usePlainText (boolean): Sets whether the JDBC connection should establish an unencrypted connection to a (local) server. This option can only be used when connecting to a local emulator that does not require an encrypted connection, and that does not require authentication. Example: `jdbc:cloudspanner://localhost:9010/projects/test-project/instances/test-instance/databases/test-db;usePlainText=true` +- optimizerVersion (String): Sets the default query optimizer version to use for this connection. See also https://cloud.google.com/spanner/docs/query-optimizer/query-optimizer-versions. + +#### Advanced Properties +- DEPRECATED minSessions (int): Sets the minimum number of sessions in the backing session pool. Defaults to 100. This configuration option is no longer needed, as the JDBC driver by default uses a [single multiplexed session for all operations](https://cloud.google.com/spanner/docs/sessions#multiplexed_sessions). +- DEPRECATED maxSessions (int): Sets the maximum number of sessions in the backing session pool. Defaults to 400. This configuration option is no longer needed, as the JDBC driver by default uses a [single multiplexed session for all operations](https://cloud.google.com/spanner/docs/sessions#multiplexed_sessions). +- numChannels (int): Sets the number of gRPC channels to use. Defaults to 4. +- retryAbortsInternally (boolean): The JDBC driver will by default automatically retry aborted transactions internally. This is done by keeping track of all statements and the results of these during a transaction, and if the transaction is aborted by Cloud Spanner, it will replay the statements on a new transaction and compare the results with the initial attempt. Disable this option if you want to handle aborted transactions in your own application. +- autocommit_dml_mode (string): Determines the transaction type that is used to execute + [DML statements](https://cloud.google.com/spanner/docs/dml-tasks#using-dml) when the connection is + in auto-commit mode. The following values are supported: + - TRANSACTIONAL (default): Uses atomic read/write transactions. + - PARTITIONED_NON_ATOMIC: Use Partitioned DML for DML statements in auto-commit mode. Use this mode + to execute DML statements that exceed the transaction mutation limit in Spanner. + - TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC: Execute DML statements using atomic read/write + transactions. If this fails because the mutation limit on Spanner has been exceeded, the DML statement + is retried using a Partitioned DML transaction. +- auto_batch_dml (boolean): Automatically buffer DML statements and execute them as one batch, + instead of executing them on Spanner directly. The buffered DML statements are executed on Spanner + in one batch when a query is executed, or when the transaction is committed. This option can for + example be used in combination with Hibernate to automatically group more (small) DML statements + into one batch. +- oauthToken (string): A valid pre-existing OAuth token to use for authentication for this connection. Setting this property will take precedence over any value set for a credentials file. +- lenient (boolean): Enable this to force the JDBC driver to ignore unknown properties in the connection URL. Some applications automatically add additional properties to the URL that are not recognized by the JDBC driver. Normally, the JDBC driver will reject this, unless `lenient` mode is enabled. +- enableDirectAccess (boolean): Sets whether the JDBC connection should establish connection using Directpath. Setting this property will enable client to establish connection directly to Spanner if client is running in GCP VM, Otherwise it will fall back standard network path. + +For a full list of supported connection properties, see +[Supported Connection Properties](documentation/connection_properties.md). + +### Jar with Dependencies +A single jar with all dependencies can be downloaded from https://repo1.maven.org/maven2/com/google/cloud/google-cloud-spanner-jdbc/latest +or be built with the command `mvn package` (select the jar that is named `google-cloud-spanner-jdbc--single-jar-with-dependencies.jar`). + +### Creating a Shaded Jar + +A jar with all dependencies included is automatically generated when you execute `mvn package`. +The dependencies in this jar are not shaded. To create a jar with shaded dependencies you must +activate the `shade` profile like this: + + ``` + mvn package -Pshade + ``` + +## Samples + +See the [samples](/samples) directory for various examples for using the Spanner JDBC driver. + +- [snippets](/samples/snippets): Contains small code snippets for commonly used JDBC and Spanner + features. Refer to these snippets for examples on how to execute DDL and DML batches, use various + data types with the JDBC driver, execute various types of transactions (read/write, read-only, + Partitioned DML), use request and transaction tags, etc. +- [spring-data-jdbc](/samples/spring-data-jdbc): Contains a sample application that uses Spring Data + JDBC in combination with a Spanner PostgreSQL database. +- [spring-data-mybatis](/samples/spring-data-mybatis): Contains a sample application that uses + Spring Data MyBatis in combination with a Spanner PostgreSQL database. +- [quickperf](/samples/quickperf): Contains a simple benchmarking application. + + +## Troubleshooting + +To get help, follow the instructions in the [shared Troubleshooting document][troubleshooting]. + +## Supported Java Versions + +Java 8 or above is required for using this client. + +Google's Java client libraries, +[Google Cloud Client Libraries][cloudlibs] +and +[Google Cloud API Libraries][apilibs], +follow the +[Oracle Java SE support roadmap][oracle] +(see the Oracle Java SE Product Releases section). + +### For new development + +In general, new feature development occurs with support for the lowest Java +LTS version covered by Oracle's Premier Support (which typically lasts 5 years +from initial General Availability). If the minimum required JVM for a given +library is changed, it is accompanied by a [semver][semver] major release. + +Java 11 and (in September 2021) Java 17 are the best choices for new +development. + +### Keeping production systems current + +Google tests its client libraries with all current LTS versions covered by +Oracle's Extended Support (which typically lasts 8 years from initial +General Availability). + +#### Legacy support + +Google's client libraries support legacy versions of Java runtimes with long +term stable libraries that don't receive feature updates on a best efforts basis +as it may not be possible to backport all patches. + +Google provides updates on a best efforts basis to apps that continue to use +Java 7, though apps might need to upgrade to current versions of the library +that supports their JVM. + +#### Where to find specific information + +The latest versions and the supported Java versions are identified on +the individual GitHub repository `github.com/GoogleAPIs/java-SERVICENAME` +and on [google-cloud-java][g-c-j]. + +## Versioning + + +This library follows [Semantic Versioning](http://semver.org/). + + + +## Contributing + + +Contributions to this library are always welcome and highly encouraged. + +See [CONTRIBUTING][contributing] for more information how to get started. + +Please note that this project is released with a Contributor Code of Conduct. By participating in +this project you agree to abide by its terms. See [Code of Conduct][code-of-conduct] for more +information. + + +## License + +Apache 2.0 - See [LICENSE][license] for more information. + +Java is a registered trademark of Oracle and/or its affiliates. + +[product-docs]: https://cloud.google.com/spanner/docs/use-oss-jdbc +[javadocs]: https://googleapis.dev/java/google-cloud-spanner-jdbc/latest/index.html +[stability-image]: https://img.shields.io/badge/stability-ga-green +[maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-spanner-jdbc.svg +[maven-version-link]: https://search.maven.org/search?q=g:com.google.cloud%20AND%20a:google-cloud-spanner-jdbc&core=gav +[authentication]: https://github.com/googleapis/google-cloud-java#authentication +[auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes +[predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles +[iam-policy]: https://cloud.google.com/iam/docs/overview#cloud-iam-policy +[developer-console]: https://console.developers.google.com/ +[create-project]: https://cloud.google.com/resource-manager/docs/creating-managing-projects +[cloud-sdk]: https://cloud.google.com/sdk/ +[troubleshooting]: https://github.com/googleapis/google-cloud-java/blob/main/TROUBLESHOOTING.md +[contributing]: https://github.com/googleapis/java-spanner-jdbc/blob/main/CONTRIBUTING.md +[code-of-conduct]: https://github.com/googleapis/java-spanner-jdbc/blob/main/CODE_OF_CONDUCT.md#contributor-code-of-conduct +[license]: https://github.com/googleapis/java-spanner-jdbc/blob/main/LICENSE + + +[libraries-bom]: https://github.com/GoogleCloudPlatform/cloud-opensource-java/wiki/The-Google-Cloud-Platform-Libraries-BOM +[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png + +[semver]: https://semver.org/ +[cloudlibs]: https://cloud.google.com/apis/docs/client-libraries-explained +[apilibs]: https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries +[oracle]: https://www.oracle.com/java/technologies/java-se-support-roadmap.html +[g-c-j]: http://github.com/googleapis/google-cloud-java diff --git a/java-spanner-jdbc/clirr-ignored-differences.xml b/java-spanner-jdbc/clirr-ignored-differences.xml new file mode 100644 index 000000000000..f8e168c5f0b7 --- /dev/null +++ b/java-spanner-jdbc/clirr-ignored-differences.xml @@ -0,0 +1,130 @@ + + + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + com.google.cloud.spanner.Dialect getDialect() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + com.google.cloud.spanner.connection.SavepointSupport getSavepointSupport() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setSavepointSupport(com.google.cloud.spanner.connection.SavepointSupport) + + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + int getMaxPartitionedParallelism() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + int getMaxPartitions() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + boolean isAutoPartitionMode() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + boolean isDataBoostEnabled() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setAutoPartitionMode(boolean) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setDataBoostEnabled(boolean) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setMaxPartitionedParallelism(int) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setMaxPartitions(int) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + byte[] getProtoDescriptors() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setProtoDescriptors(byte[]) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setProtoDescriptors(java.io.InputStream) + + + 8001 + com/google/cloud/spanner/connection/ConnectionHelper + + + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + boolean isAutoBatchDml() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setAutoBatchDml(boolean) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + long getAutoBatchDmlUpdateCount() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setAutoBatchDmlUpdateCount(long) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + boolean isAutoBatchDmlUpdateCountVerification() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + void setAutoBatchDmlUpdateCountVerification(boolean) + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + com.google.cloud.spanner.DatabaseClient getDatabaseClient() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + com.google.cloud.spanner.Spanner getSpanner() + + + 7012 + com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection + com.google.cloud.spanner.DatabaseId getDatabaseId() + + + 8001 + com/google/cloud/spanner/connection/ConnectionPropertiesHelper + + diff --git a/java-spanner-jdbc/documentation/connection_properties.md b/java-spanner-jdbc/documentation/connection_properties.md new file mode 100644 index 000000000000..99e3e1d169c4 --- /dev/null +++ b/java-spanner-jdbc/documentation/connection_properties.md @@ -0,0 +1,73 @@ +# Supported Connection Properties + +This file contains all supported connection properties for the Spanner JDBC driver. These properties can be specified both in the connection URL and in the Properties map that is used to create a connection. + +The 'Context' value indicates whether the property can only be set when a connection is created (STARTUP), or whether the value can also be changed after a connection has been created. + +| Name | Description | Default | Enum values | Context | +|------|-------------|---------|-------------|---------| +| auto_batch_dml | Automatically buffer DML statements that are executed on this connection and execute them as one batch when a non-DML statement is executed, or when the current transaction is committed. The update count that is returned for DML statements that are buffered is by default 1. This default can be changed by setting the connection variable auto_batch_dml_update_count to value other than 1. This setting is only in read/write transactions. DML statements in auto-commit mode are executed directly. | false | true, false | USER | +| auto_batch_dml_update_count | DML statements that are executed when auto_batch_dml is set to true, are not directly sent to Spanner, but are buffered in the client until the batch is flushed. This property determines the update count that is returned for these DML statements. The default is 1, as that is the update count that is expected by most ORMs (e.g. Hibernate). | 1 | | USER | +| auto_batch_dml_update_count_verification | The update count that is returned for DML statements that are buffered during an automatic DML batch is by default 1. This value can be changed by setting the connection variable auto_batch_dml_update_count. The update counts that are returned by Spanner when the DML statements are actually executed are verified against the update counts that were returned when they were buffered. If these do not match, a com.google.cloud.spanner.DmlBatchUpdateCountVerificationFailedException will be thrown. You can disable this verification by setting auto_batch_dml_update_count_verification to false. | true | true, false | USER | +| autocommit | Should the connection start in autocommit (true/false) | true | true, false | USER | +| autocommit_dml_mode | Determines the transaction type that is used to execute DML statements when the connection is in auto-commit mode. | TRANSACTIONAL | TRANSACTIONAL, PARTITIONED_NON_ATOMIC, TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC, null | USER | +| autoconfigemulator | Automatically configure the connection to try to connect to the Cloud Spanner emulator (true/false). The instance and database in the connection string will automatically be created if these do not yet exist on the emulator. Add dialect=postgresql to the connection string to make sure that the database that is created uses the PostgreSQL dialect. | false | true, false | STARTUP | +| autopartitionmode | Execute all queries on this connection as partitioned queries. Executing a query that cannot be partitioned will fail. Executing a query in a read/write transaction will also fail. | false | true, false | USER | +| batch_dml_update_count | The update count that is returned for DML statements that are executed in an explicit DML batch. The default is -1 | -1 | | USER | +| channelprovider | The name of the channel provider class. The name must reference an implementation of ExternalChannelProvider. If this property is not set, the connection will use the default grpc channel provider. | | | STARTUP | +| clientcertificate | Specifies the file path to the client certificate required for establishing an mTLS connection. | | | STARTUP | +| clientkey | Specifies the file path to the client private key required for establishing an mTLS connection. | | | STARTUP | +| connection_state_type | The type of connection state to use for this connection. Can only be set at start up. If no value is set, then the database dialect default will be used, which is NON_TRANSACTIONAL for GoogleSQL and TRANSACTIONAL for PostgreSQL. | | TRANSACTIONAL, NON_TRANSACTIONAL | STARTUP | +| credentials | The location of the credentials file to use for this connection. If neither this property or encoded credentials are set, the connection will use the default Google Cloud credentials for the runtime environment. WARNING: Using this property without proper validation can expose the application to security risks. It is intended for use with credentials from a trusted source only, as it could otherwise allow end-users to supply arbitrary credentials. For more information, seehttps://cloud.google.com/docs/authentication/client-libraries#external-credentials | | | STARTUP | +| credentialsprovider | The class name of the com.google.api.gax.core.CredentialsProvider implementation that should be used to obtain credentials for connections. | | | STARTUP | +| databaserole | Sets the database role to use for this connection. The default is privileges assigned to IAM role | | | STARTUP | +| databoostenabled | Enable data boost for all partitioned queries that are executed by this connection. This setting is only used for partitioned queries and is ignored by all other statements. | false | true, false | USER | +| dcpinitialchannels | The initial number of channels in the dynamic channel pool. Only used when enableDynamicChannelPool is true. The default is SpannerOptions.DEFAULT_DYNAMIC_POOL_INITIAL_SIZE (4). | | | STARTUP | +| dcpmaxchannels | The maximum number of channels in the dynamic channel pool. Only used when enableDynamicChannelPool is true. The default is SpannerOptions.DEFAULT_DYNAMIC_POOL_MAX_CHANNELS (10). | | | STARTUP | +| dcpminchannels | The minimum number of channels in the dynamic channel pool. Only used when enableDynamicChannelPool is true. The default is SpannerOptions.DEFAULT_DYNAMIC_POOL_MIN_CHANNELS (2). | | | STARTUP | +| ddlintransactionmode | Determines how the connection should handle DDL statements in a read/write transaction. | ALLOW_IN_EMPTY_TRANSACTION | FAIL, ALLOW_IN_EMPTY_TRANSACTION, AUTO_COMMIT_TRANSACTION | USER | +| default_isolation_level | The transaction isolation level that is used by default for read/write transactions. The default is isolation_level_unspecified, which means that the connection will use the default isolation level of the database that it is connected to. | ISOLATION_LEVEL_UNSPECIFIED | ISOLATION_LEVEL_UNSPECIFIED, SERIALIZABLE, REPEATABLE_READ | USER | +| defaultsequencekind | The default sequence kind that should be used for the database. This property is only used when a DDL statement that requires a default sequence kind is executed on this connection. | | | USER | +| delaytransactionstartuntilfirstwrite | Enabling this option will delay the actual start of a read/write transaction until the first write operation is seen in that transaction. All reads that happen before the first write in a transaction will instead be executed as if the connection was in auto-commit mode. Enabling this option will make read/write transactions lose their SERIALIZABLE isolation level. Read operations that are executed after the first write operation in a read/write transaction will be executed using the read/write transaction. Enabling this mode can reduce locking and improve performance for applications that can handle the lower transaction isolation semantics. | false | true, false | USER | +| dialect | Sets the dialect to use for new databases that are created by this connection. | GOOGLE_STANDARD_SQL | GOOGLE_STANDARD_SQL, POSTGRESQL | STARTUP | +| directed_read | The directed read options to apply to read-only transactions. | | | USER | +| enableapitracing | Add OpenTelemetry traces for each individual RPC call. Enable this to get a detailed view of each RPC that is being executed by your application, or if you want to debug potential latency problems caused by RPCs that are being retried. | | true, false | STARTUP | +| enabledirectaccess | Configure the connection to try to connect to Spanner using DirectPath (true/false). The client will try to connect to Spanner using a direct Google network connection. DirectPath will work only if the client is trying to establish a connection from a Google Cloud VM. Otherwise it will automatically fallback to the standard network path. NOTE: The default for this property is currently false, but this could be changed in the future. | | true, false | STARTUP | +| enabledynamicchannelpool | Enable dynamic channel pooling for automatic gRPC channel scaling. When enabled, the client will automatically scale the number of channels based on load. Setting numChannels will disable dynamic channel pooling even if this is set to true. The default is currently false (disabled), but this may change to true in a future version. Set this property explicitly to ensure consistent behavior. | | true, false | STARTUP | +| enableendtoendtracing | Enable end-to-end tracing (true/false) to generate traces for both the time that is spent in the client, as well as time that is spent in the Spanner server. Server side traces can only go to Google Cloud Trace, so to see end to end traces, the application should configure an exporter that exports the traces to Google Cloud Trace. | false | true, false | STARTUP | +| enableextendedtracing | Include the SQL string in the OpenTelemetry traces that are generated by this connection. The SQL string is added as the standard OpenTelemetry attribute 'db.statement'. | | true, false | STARTUP | +| encodedcredentials | Base64-encoded credentials to use for this connection. If neither this property or a credentials location are set, the connection will use the default Google Cloud credentials for the runtime environment. WARNING: Enabling this property without proper validation can expose the application to security risks. It is intended for use with credentials from a trusted source only, as it could otherwise allow end-users to supply arbitrary credentials. For more information, seehttps://cloud.google.com/docs/authentication/client-libraries#external-credentials | | | STARTUP | +| endpoint | The endpoint that the JDBC driver should connect to. The default is the default Spanner production endpoint when autoConfigEmulator=false, and the default Spanner emulator endpoint (localhost:9010) when autoConfigEmulator=true. This property takes precedence over any host name at the start of the connection URL. | | | STARTUP | +| grpc_interceptor_provider | The class name of a com.google.api.gax.grpc.GrpcInterceptorProvider implementation that should be used to provide interceptors for the underlying Spanner client. This is a guarded property that can only be set if the Java System Property ENABLE_GRPC_INTERCEPTOR_PROVIDER has been set to true. This property should only be set to true on systems where an untrusted user cannot modify the connection URL, as using this property will dynamically invoke the constructor of the class specified. This means that any user that can modify the connection URL, can also dynamically invoke code on the host where the application is running. | | | STARTUP | +| isexperimentalhost | Set this value to true for communication with a Experimental Host. | false | true, false | STARTUP | +| keeptransactionalive | Enabling this option will trigger the connection to keep read/write transactions alive by executing a SELECT 1 query once every 10 seconds if no other statements are being executed. This option should be used with caution, as it can keep transactions alive and hold on to locks longer than intended. This option should typically be used for CLI-type application that might wait for user input for a longer period of time. | false | true, false | USER | +| lenient | Silently ignore unknown properties in the connection string/properties (true/false) | false | true, false | STARTUP | +| maxcommitdelay | The max delay that Spanner may apply to commit requests to improve throughput. | | | USER | +| maxpartitionedparallelism | The max partitions hint value to use for partitioned queries. Use 0 if you do not want to specify a hint. | 1 | | USER | +| maxpartitions | The max partitions hint value to use for partitioned queries. Use 0 if you do not want to specify a hint. | 0 | | USER | +| maxsessions | The maximum number of sessions in the backing session pool. The default is 400. | | | STARTUP | +| minsessions | The minimum number of sessions in the backing session pool. The default is 100. | | | STARTUP | +| numchannels | The number of gRPC channels to use to communicate with Cloud Spanner. The default is 4. | | | STARTUP | +| oauthtoken | A valid pre-existing OAuth token to use for authentication for this connection. Setting this property will take precedence over any value set for a credentials file. | | | STARTUP | +| optimizerstatisticspackage | Sets the query optimizer statistics package to use for this connection. | | | USER | +| optimizerversion | Sets the default query optimizer version to use for this connection. | | | USER | +| read_lock_mode | This option controls the locking behavior for read operations and queries within a read/write transaction. It works in conjunction with the transaction's isolation level. PESSIMISTIC: Read locks are acquired immediately on read. This mode only applies to SERIALIZABLE isolation. This mode prevents concurrent modifications by locking data throughout the transaction. This reduces commit-time aborts due to conflicts, but can increase how long transactions wait for locks and the overall contention. OPTIMISTIC: Locks for reads within the transaction are not acquired on read. Instead, the locks are acquired on commit to validate that read/queried data has not changed since the transaction started. If a conflict is detected, the transaction will fail. This mode only applies to SERIALIZABLE isolation. This mode defers locking until commit, which can reduce contention and improve throughput. However, be aware that this increases the risk of transaction aborts if there's significant write competition on the same data. READ_LOCK_MODE_UNSPECIFIED: This is the default if no mode is set. The locking behavior depends on the isolation level: REPEATABLE_READ: Locking semantics default to OPTIMISTIC. However, validation checks at commit are only performed for queries using SELECT FOR UPDATE, statements with {@code LOCK_SCANNED_RANGES} hints, and DML statements. For all other isolation levels: If the read lock mode is not set, it defaults to PESSIMISTIC locking. | READ_LOCK_MODE_UNSPECIFIED | READ_LOCK_MODE_UNSPECIFIED, PESSIMISTIC, OPTIMISTIC | USER | +| read_only_staleness | The read-only staleness to use for read-only transactions and single-use queries. | strong | | USER | +| readonly | Should the connection start in read-only mode (true/false) | false | true, false | USER | +| retryabortsinternally | Should the connection automatically retry Aborted errors (true/false) | true | true, false | USER | +| returncommitstats | Request that Spanner returns commit statistics for read/write transactions (true/false) | false | true, false | USER | +| routetoleader | Should read/write transactions and partitioned DML be routed to leader region (true/false) | true | true, false | STARTUP | +| rpcpriority | Sets the priority for all RPC invocations from this connection (HIGH/MEDIUM/LOW). The default is HIGH. | | LOW, MEDIUM, HIGH, UNSPECIFIED, null | USER | +| savepoint_support | Determines the behavior of the connection when savepoints are used. | FAIL_AFTER_ROLLBACK | ENABLED, FAIL_AFTER_ROLLBACK, DISABLED | USER | +| statement_timeout | Adds a timeout to all statements executed on this connection. This property is only used when a statement timeout is specified. | | | USER | +| tracing_prefix | The prefix that will be prepended to all OpenTelemetry traces that are generated by a Connection. | CloudSpanner | | STARTUP | +| trackconnectionleaks | Capture the call stack of the thread that created a connection. This will pre-create a LeakedConnectionException already when a connection is created. This can be disabled, for example if a monitoring system logs the pre-created exception. If disabled, the LeakedConnectionException will only be created when an actual connection leak is detected. The stack trace of the exception will in that case not contain the call stack of when the connection was created. | true | true, false | STARTUP | +| tracksessionleaks | Capture the call stack of the thread that checked out a session of the session pool. This will pre-create a LeakedSessionException already when a session is checked out. This can be disabled, for example if a monitoring system logs the pre-created exception. If disabled, the LeakedSessionException will only be created when an actual session leak is detected. The stack trace of the exception will in that case not contain the call stack of when the session was checked out. | true | true, false | STARTUP | +| transaction_timeout | Timeout for read/write transactions. | | | USER | +| universedomain | Configure the connection to try to connect to Spanner using a different partner Google Universe than GDU (googleapis.com). | googleapis.com | | STARTUP | +| unknownlength | Spanner does not return the length of the selected columns in query results. When returning meta-data about these columns through functions like ResultSetMetaData.getColumnDisplaySize and ResultSetMetaData.getPrecision, we must provide a value. Various client tools and applications have different ideas about what they would like to see. This property specifies the length to return for types of unknown length. | 50 | | USER | +| useautosavepointsforemulator | Automatically creates savepoints for each statement in a read/write transaction when using the Emulator. This is no longer needed when using Emulator version 1.5.23 or higher. | false | true, false | STARTUP | +| useplaintext | Use a plain text communication channel (i.e. non-TLS) for communicating with the server (true/false). Set this value to true for communication with the Cloud Spanner emulator. | false | true, false | STARTUP | +| useragent | The custom user-agent property name to use when communicating with Cloud Spanner. This property is intended for internal library usage, and should not be set by applications. | | | STARTUP | +| usevirtualgrpctransportthreads | Use a virtual thread instead of a platform thread for the gRPC executor (true/false). This option only has any effect if the application is running on Java 21 or higher. In all other cases, the option is ignored. | false | true, false | STARTUP | +| usevirtualthreads | Use a virtual thread instead of a platform thread for each connection (true/false). This option only has any effect if the application is running on Java 21 or higher. In all other cases, the option is ignored. | false | true, false | STARTUP | diff --git a/java-spanner-jdbc/documentation/img/example-aborted-transaction.png b/java-spanner-jdbc/documentation/img/example-aborted-transaction.png new file mode 100644 index 000000000000..92a5c002f0b9 Binary files /dev/null and b/java-spanner-jdbc/documentation/img/example-aborted-transaction.png differ diff --git a/java-spanner-jdbc/documentation/img/example-api-tracing.png b/java-spanner-jdbc/documentation/img/example-api-tracing.png new file mode 100644 index 000000000000..251ed702cf87 Binary files /dev/null and b/java-spanner-jdbc/documentation/img/example-api-tracing.png differ diff --git a/java-spanner-jdbc/documentation/img/example-gfe-latency.png b/java-spanner-jdbc/documentation/img/example-gfe-latency.png new file mode 100644 index 000000000000..62f24b48567a Binary files /dev/null and b/java-spanner-jdbc/documentation/img/example-gfe-latency.png differ diff --git a/java-spanner-jdbc/documentation/img/example-search-for-aborted-transactions.png b/java-spanner-jdbc/documentation/img/example-search-for-aborted-transactions.png new file mode 100644 index 000000000000..af9c304d7b1f Binary files /dev/null and b/java-spanner-jdbc/documentation/img/example-search-for-aborted-transactions.png differ diff --git a/java-spanner-jdbc/documentation/img/example-search-for-tag.png b/java-spanner-jdbc/documentation/img/example-search-for-tag.png new file mode 100644 index 000000000000..237fedc2d6b8 Binary files /dev/null and b/java-spanner-jdbc/documentation/img/example-search-for-tag.png differ diff --git a/java-spanner-jdbc/documentation/img/example-tracing.png b/java-spanner-jdbc/documentation/img/example-tracing.png new file mode 100644 index 000000000000..0291f5cd7c3a Binary files /dev/null and b/java-spanner-jdbc/documentation/img/example-tracing.png differ diff --git a/java-spanner-jdbc/documentation/latency-debugging-guide.md b/java-spanner-jdbc/documentation/latency-debugging-guide.md new file mode 100644 index 000000000000..0d2084c3f110 --- /dev/null +++ b/java-spanner-jdbc/documentation/latency-debugging-guide.md @@ -0,0 +1,295 @@ +# Spanner JDBC Driver - Latency Debugging Guide + +The Spanner JDBC driver supports OpenTelemetry tracing. Tracing can be used to investigate slow +queries and transactions and to determine whether transactions or requests are being retried. In +addition, all metrics described in [Latency points in a Spanner request](https://cloud.google.com/spanner/docs/latency-points) +are also collected by the JDBC driver and can be used for debugging. + +## Isolation Level + +A common reason for high latency in read/write transactions is lock contention. Spanner by default +uses isolation level `SERIALIZABLE`. This causes all queries in read/write transactions to take +locks for all rows that are scanned by a query. Using isolation level `REPEATABLE_READ` reduces the +number of locks that are taken during a read/write transaction, and can significantly improve +performance for applications that execute many and/or large queries in read/write transactions. + +Enable isolation level `REPEATABLE_READ` by default for all transactions that are executed by the +JDBC driver by setting the `default_isolation_level` connection property like this in the connection +URL: + +```java +String projectId = "my-project"; +String instanceId = "my-instance"; +String databaseId = "my-database"; +String isolationLevel = "REPEATABLE_READ"; + +try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s?default_isolation_level=%s", + projectId, instanceId, databaseId, isolationLevel))) { + try (Statement statement = connection.createStatement()) { + try (ResultSet rs = statement.executeQuery("SELECT CURRENT_TIMESTAMP()")) { + while (rs.next()) { + System.out.printf( + "Connected to Cloud Spanner at [%s]%n", rs.getTimestamp(1).toString()); + } + } + } +} +``` + +See https://cloud.google.com/spanner/docs/isolation-levels for more information on the supported +isolation levels in Spanner. + +## Configuration + +You can configure the OpenTelemetry instance that should be used in two ways: +1. Register a global OpenTelemetry instance. This instance will automatically be picked up by the Spanner JDBC driver. +2. Add an OpenTelemetry instance with the key `openTelemetry` to the `java.util.Properties` instance that is used to create the JDBC connection. + +The OpenTelemetry instance that you use for the JDBC driver can be the same as an instance that you +use for the application. The traces that are created by the JDBC driver will then become child spans +of the traces that your application produces. + +By default, the traces that are generated by the Spanner JDBC driver do not include the SQL +statement. You can include the SQL statement with the traces by adding the property `enableExtendedTracing=true` +to the JDBC connection URL. + +Tip: You can also enable extended tracing by setting the environment variable `SPANNER_ENABLE_EXTENDED_TRACING=true` + +### Api Tracing + +The traces that are generated by the JDBC driver by default stop at the level of a statement that is +executed by the JDBC driver. One statement is normally translated to one RPC call. However, there +can be additional RPC calls being executed in the following cases: +1. The JDBC driver inlines the `BeginTransaction` call with the first statement in a transaction. + This is not possible in specific cases, such as if a transaction only contains mutations, or if + the first statement in a transaction returns an error. In those cases, the JDBC driver executes + and additional `BeginTransaction` RPC. +2. The JDBC driver retries RPCs that return `UNAVAILABLE` errors. These retries are transparent to + the caller, and the observed behavior from the outside is that the statement execution took a + longer time than expected. Enable API tracing to get insights into whether an RPC was retried or + not. + +You can enable API tracing by adding the property `enableApiTracing=true` to the JDBC connection URL. + +Tip: You can also enable API tracing by setting the environment variable `SPANNER_ENABLE_API_TRACING=true` + +### Example Using Global OpenTelemetry +Create and register a global OpenTelemetry object before creating a JDBC connection. +See also the [Spring Data JDBC Sample](../samples/spring-data-jdbc) for an example for how to +configure OpenTelemetry in combination with Spring Data. + +```java +TraceConfiguration traceConfiguration = TraceConfiguration.builder().setProjectId("my-project").build(); +SpanExporter traceExporter = TraceExporter.createWithConfiguration(traceConfiguration); +OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + .setSampler(Sampler.traceIdRatioBased(0.05)) + .setResource( + Resource.builder() + .put("service.name", "my-unique-service-name") + .build()) + .addSpanProcessor(BatchSpanProcessor.builder(traceExporter).build()) + .build()) + .buildAndRegisterGlobal(); +String projectId = "my-project"; +String instanceId = "my-instance"; +String databaseId = "my-database"; +// Setting this to true instructs the JDBC driver to include the SQL statement with the traces. +boolean enableExtendedTracing = true; + +try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s?enableExtendedTracing=%s", + projectId, instanceId, databaseId, enableExtendedTracing))) { + try (Statement statement = connection.createStatement()) { + try (ResultSet rs = statement.executeQuery("SELECT CURRENT_TIMESTAMP()")) { + while (rs.next()) { + System.out.printf( + "Connected to Cloud Spanner at [%s]%n", rs.getTimestamp(1).toString()); + } + } + } +} +``` + +### Example Using an OpenTelemetry instance in Properties +Create an OpenTelemetry object and supply it as part of the properties for a JDBC connection. + +```java +TraceConfiguration traceConfiguration = TraceConfiguration.builder().setProjectId("my-project").build(); +SpanExporter traceExporter = TraceExporter.createWithConfiguration(traceConfiguration); +OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + .setSampler(Sampler.traceIdRatioBased(0.05)) + .setResource( + Resource.builder() + .put("service.name", "my-unique-service-name") + .build()) + .addSpanProcessor(BatchSpanProcessor.builder(traceExporter).build()) + .build()) + .build(); +String projectId = "my-project"; +String instanceId = "my-instance"; +String databaseId = "my-database"; +// Setting this to true instructs the JDBC driver to include the SQL statement with the traces. +boolean enableExtendedTracing = true; + +Properties properties = new Properties(); +properties.put(JdbcDriver.OPEN_TELEMETRY_PROPERTY_KEY, openTelemetry); +properties.put("enableExtendedTracing", String.valueOf(enableExtendedTracing)); + +try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + projectId, instanceId, databaseId), properties)) { + try (Statement statement = connection.createStatement()) { + try (ResultSet rs = statement.executeQuery("SELECT CURRENT_TIMESTAMP()")) { + while (rs.next()) { + System.out.printf( + "Connected to Cloud Spanner at [%s]%n", rs.getTimestamp(1).toString()); + } + } + } +} +``` + +## Traces + +The Spanner JDBC driver produces traces for each statement that is executed on Spanner. The +hierarchy of those traces are as follows: +1. The transaction that was created by the JDBC driver. All of these transactions contain the + attribute `connection_id`, which is a uniquely generated identifier for each JDBC connection. + This identifier can be used to search for all transactions that have been executed by a specific + connection. All JDBC transaction traces are prefixed with `JDBC.`. The transaction type is one of: + 1. `JDBC.SingleUseTransaction`: These transactions are created by the JDBC driver when a + a statement is executed in auto-commit mode. + 2. `JDBC.ReadWriteTransaction`: Read/write transactions that can contain multiple queries and + DML statements. + 3. `JDBC.ReadOnlyTransaction`: Read-only transaction that can contain multiple queries. + 4. `JDBC.DdlBatch`: A batch of DDL statements. + + The JDBC transaction trace contains the following attributes: + 1. `connection_id`: A randomly generated unique identifier for the JDBC connection that executed + the transaction. + 2. `transaction.retried`: This attribute is added to the transaction trace if the transaction was + aborted by Spanner and retried by the Spanner JDBC driver. It is not added to transactions + that are not retried. +2. The transaction that was created by the Spanner client library. These are named + `CloudSpanner.ReadWriteTransaction` and `CloudSpanner.ReadOnlyTransaction`. This is normally a + one-to-one mapping with the transaction that the Spanner JDBC driver created, except if the JDBC + transaction created a `Savepoint` and rolled back to that `Savepoint`. A transaction trace + includes the following attributes: + 1. `transaction.tag`: Any transaction tag that was set for the transaction. +3. The statements that were executed as part of the transaction. Statement traces include the + following attributes: + 1. `statement.tag`: Any statement tag that was set for that statement. + 2. `db.statement`: The SQL string of the statement. This attribute is only added if + `enableExtendedTracing=true` has been added to the JDBC connection URL. The attribute contains + an array of SQL strings if the statement was a batch of DML statements. Note that Google Cloud + Tracing limits attribute values to at most 256 bytes. SQL strings that are longer than this + limit are truncated. + 3. `gfe_latency`: The execution time of the statement in milliseconds as measured by the Google + Front End. This value can be seen as the 'server-side latency' of the statement. A large + difference between this value and the execution time measured client side could be an + indication that the network connection between the application and Spanner is slow. + +The following screenshot shows an example of a traced read/write transaction that was executed by +the Spanner JDBC driver. + +![JDBC tracing example](img/example-tracing.png) + +### API Tracing + +API tracing adds traces for each RPC that is executed by the JDBC driver. This gives you insights +into RPC retries. + +Enable API tracing by adding the property `enableApiTracing=true` to the JDBC connection URL or set +the environment variable `SPANNER_ENABLE_API_TRACING=true`. + +The API traces contain the following attributes: + +1. `attempt.count`: The RPC attempt. This is 1 for the initial attempt. A value higher than 1 + indicates that the RPC was retried. +2. `total_response_count`: This attribute is only added to streaming RPCs and indicate the number of + streaming results the RPC returned. This property can for example be used to inspect the number + of `PartialResultSet` instances that was returned by a `ExecuteStreamingSql` call. + +This screenshot shows the same transaction as above with API tracing enabled. + +![JDBC API tracing example](img/example-api-tracing.png) + +## Aborted Transactions + +The Spanner JDBC driver automatically retries read/write transactions that are aborted by Spanner. +These retries are transparent to the application, and the only indication that the application gets +that a transaction might have been retried, is that the transaction execution time is higher than +when it is not retried. The OpenTelemetry traces can be used to investigate transactions that were +retried. These transactions have a `transaction.retried:true` attribute. This can be used to search +for traces of transactions that were aborted. + +This screenshot shows an example of a transaction that was aborted and retried. In this case, API +tracing was also enabled. + +![JDBC aborted transaction tracing example](img/example-aborted-transaction.png) + +## Searching + +The attributes that are added to traces can be used to search for traces of specific transactions, +transactions that show problematic behavior, and transactions that are retried. + +The examples in this guide assume that the OpenTelemetry traces are exported to Google Cloud Trace. +The traces can also be exported to any other tracing solution that has an OpenTelemetry exporter. +Consult the documentation of the tracing solution of your choice for more information on how to +search for traces with specific attributes. + +### Search for transaction or statement tag + +See [this sample](https://cloud.google.com/spanner/docs/getting-started/jdbc#transaction_tags_and_request_tags) +for how to set transaction tags and statement tags with the Spanner JDBC driver. + +The JDBC driver adds the attributes `transaction.tag` and `statement.tag` to respectively the +transactions and statements that it executes. These attributes can be used to search for all traces +of transactions and statements with specific tags. The Google Cloud Trace UI allows you to search +for such traces by entering the following as a trace filter: + +``` +transaction.tag:my_transaction_tag +``` + +![JDBC search for transaction tag](img/example-search-for-tag.png) + +### Search for aborted transactions + +The `transaction.retried` attribute is added to transactions that are aborted by Spanner and retried +by the JDBC driver. You can search for these transactions with the filter `HasLabel:transaction.retried`. + +![JDBC search for aborted transactions](img/example-search-for-aborted-transactions.png) + +## Backend Latency vs Client Latency + +The traces that are generated by the Spanner JDBC driver show the end-to-end latency for executing +a statement. This includes the network latency between the client application and Spanner. You can +use the `gfe_latency` attribute of the `CloudSpannerOperation.*` traces to inspect the difference +between the two. This can be used to determine whether the latency of a statement is mainly caused +by network transport time or server execution time. + +Click on a `CloudSpannerOperation.*` trace to open the attributes of that trace. The `gfe_latency` +attribute contains the total execution time of the statement as measured by the Spanner front end. +If this value is significantly lower than the total trace time, then that could be an indication of +a slow network connection between your application and Spanner. + +In the example screenshot below the `gfe_latency` is 44ms, while the trace time is 58.411ms. +The 14ms difference between the two indicates that the network connection between the client +application and Spanner is slow and/or that the client application is located geographically far +away from the Spanner instance. + +![gfe_latency vs trace time](img/example-gfe-latency.png) diff --git a/java-spanner-jdbc/pom.xml b/java-spanner-jdbc/pom.xml new file mode 100644 index 000000000000..7469a5e16faa --- /dev/null +++ b/java-spanner-jdbc/pom.xml @@ -0,0 +1,458 @@ + + + 4.0.0 + google-cloud-spanner-jdbc + 2.35.5-SNAPSHOT + jar + Google Cloud Spanner JDBC + https://github.com/googleapis/google-cloud-java + + JDBC driver for Google Cloud Spanner. + + + com.google.cloud + google-cloud-jar-parent + 1.83.0-SNAPSHOT + ../google-cloud-jar-parent/pom.xml + + + + chingor + Jeff Ching + chingor@google.com + Google + + Developer + + + + + Google LLC + + + scm:git:git@github.com:googleapis/google-cloud-java.git + scm:git:git@github.com:googleapis/google-cloud-java.git + https://github.com/googleapis/google-cloud-java + HEAD + + + https://github.com/googleapis/google-cloud-java/issues + GitHub Issues + + + + + Apache-2.0 + https://www.apache.org/licenses/LICENSE-2.0.txt + + + + google-cloud-spanner-jdbc + 4.13.2 + 3.0.2 + 1.4.5 + 4.11.0 + 0.31.1 + + + + + + io.opencensus + opencensus-api + ${opencensus.version} + + + io.opencensus + opencensus-contrib-grpc-util + ${opencensus.version} + + + + + + com.google.http-client + google-http-client + + + io.grpc + grpc-api + + + com.google.cloud + google-cloud-core-grpc + + + com.google.api.grpc + proto-google-common-protos + + + com.google.api + gax + runtime + + + com.google.cloud + google-cloud-spanner + 6.112.1-SNAPSHOT + + + io.grpc + grpc-alts + + + com.google.protobuf + protobuf-java + + + com.google.guava + guava + + + io.grpc + grpc-netty-shaded + + + com.google.api + api-common + + + com.google.code.findbugs + jsr305 + + + com.google.auth + google-auth-library-oauth2-http + + + com.google.auth + google-auth-library-credentials + + + com.google.cloud + google-cloud-core + + + com.google.api.grpc + proto-google-cloud-spanner-v1 + 6.112.1-SNAPSHOT + + + io.opentelemetry + opentelemetry-api + + + + com.google.cloud + google-cloud-spanner + 6.112.1-SNAPSHOT + test-jar + test + + + org.testcontainers + testcontainers + 2.0.3 + test + + + com.google.truth + truth + ${truth.version} + test + + + org.checkerframework + checker-qual + + + + + org.mockito + mockito-core + ${mockito.version} + test + + + junit + junit + ${junit.version} + test + + + com.google.api + gax-grpc + testlib + test + + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + test + + + io.opentelemetry + opentelemetry-api-incubator + 1.45.0-alpha + test + + + com.google.cloud + google-cloud-trace + 2.89.0-SNAPSHOT + test + + + com.google.api.grpc + proto-google-cloud-trace-v1 + 2.89.0-SNAPSHOT + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + UTF-8 + -Xlint:unchecked + -Xlint:deprecation + true + + + + org.apache.maven.plugins + maven-surefire-plugin + + + com.google.cloud.spanner.jdbc.it.** + + sponge_log + + + com.google.cloud.spanner.GceTestEnvConfig + + + projects/gcloud-devel/instances/spanner-testing-east1 + + logging.properties + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + + + com.google.cloud.spanner.GceTestEnvConfig + + + projects/gcloud-devel/instances/spanner-testing-east1 + + logging.properties + + 2400 + 4 + true + + + + org.apache.maven.plugins + maven-shade-plugin + 3.6.1 + + + + shade + + + true + true + true + single-jar-with-dependencies + false + + + *:* + + + java:* + junit:* + + + + + + META-INF/services + java.sql.Driver + + + com.google.cloud.spanner.jdbc + ClientSideStatements.json + + + com.google.cloud.spanner.jdbc + *.sql + + + + META-INF/SIGNINGC.RSA + META-INF/SIGNINGC.SF + META-INF/DEPENDENCIES + META-INF/LICENSE + META-INF/LICENSE.txt + META-INF/NOTICE + META-INF/NOTICE.txt + + + + + + + + + + org.codehaus.mojo + flatten-maven-plugin + + + + org.codehaus.mojo + exec-maven-plugin + + + generate_connection_properties_documentation + test + + java + + + com.google.cloud.spanner.jdbc.ConnectionPropertiesFileGenerator + + ${project.basedir}/documentation/connection_properties.md + + test + false + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + org.graalvm.sdk:graal-sdk + com.google.api.grpc:grpc-google-cloud-spanner-v1 + com.google.api.grpc:proto-google-cloud-spanner-admin-instance-v1 + com.google.api.grpc:proto-google-cloud-spanner-admin-database-v1 + io.opentelemetry:* + + + + io.grpc:grpc-alts + com.google.api:gax-grpc + com.google.cloud:google-cloud-core-grpc + com.google.api.grpc:grpc-google-cloud-spanner-v1 + com.google.api.grpc:grpc-google-cloud-spanner-admin-instance-v1 + com.google.api.grpc:grpc-google-cloud-spanner-admin-database-v1 + io.opentelemetry:* + javax.annotation:javax.annotation-api + + + io.opentelemetry:* + com.google.cloud:google-cloud-trace + com.google.cloud.opentelemetry:exporter-trace + + + + + + + + + + + shade + + + + org.apache.maven.plugins + maven-shade-plugin + + + + com. + com.google.cloud.spanner.jdbc.shaded.com. + + com.google.cloud.spanner.** + + + + android. + com.google.cloud.spanner.jdbc.shaded.android. + + + io. + com.google.cloud.spanner.jdbc.shaded.io. + + io.grpc.netty.shaded.** + + + + org. + com.google.cloud.spanner.jdbc.shaded.org. + + org.conscrypt.** + + + + + + + + + + + alt_build_dir + + + alt.build.dir + + + + ${alt.build.dir} + + + org.apache.maven.plugins + maven-shade-plugin + 3.6.1 + + + + ${alt.build.dir}/single.jar + + + + + + + + + + diff --git a/java-spanner-jdbc/samples/install-without-bom/pom.xml b/java-spanner-jdbc/samples/install-without-bom/pom.xml new file mode 100644 index 000000000000..80876b00da84 --- /dev/null +++ b/java-spanner-jdbc/samples/install-without-bom/pom.xml @@ -0,0 +1,84 @@ + + + 4.0.0 + com.google.cloud + spanner-jdbc-install-without-bom + jar + Google Google Cloud Spanner JDBC Install Without Bom + https://github.com/googleapis/java-spanner-jdbc + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + 1.8 + 1.8 + UTF-8 + + + + + + + com.google.cloud + google-cloud-spanner-jdbc + 2.35.3 + + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + + diff --git a/java-spanner-jdbc/samples/pom.xml b/java-spanner-jdbc/samples/pom.xml new file mode 100644 index 000000000000..c36572ade921 --- /dev/null +++ b/java-spanner-jdbc/samples/pom.xml @@ -0,0 +1,57 @@ + + + 4.0.0 + com.google.cloud + google-cloud-spanner-jdbc-samples + 0.0.1-SNAPSHOT + pom + Google Google Cloud Spanner JDBC Samples Parent + https://github.com/googleapis/java-spanner-jdbc + + Java idiomatic client for Google Cloud Platform services. + + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + + + + install-without-bom + snapshot + snippets + + + + + + org.apache.maven.plugins + maven-deploy-plugin + 3.1.4 + + true + + + + org.sonatype.plugins + nexus-staging-maven-plugin + 1.7.0 + + true + + + + + diff --git a/java-spanner-jdbc/samples/quickperf/.gitignore b/java-spanner-jdbc/samples/quickperf/.gitignore new file mode 100644 index 000000000000..1df5b5e0cfce --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/.gitignore @@ -0,0 +1,3 @@ +target +.vscode +.DS_Store \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/exampleconfigs/config.json b/java-spanner-jdbc/samples/quickperf/exampleconfigs/config.json new file mode 100644 index 000000000000..b43c32e1c9cb --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/exampleconfigs/config.json @@ -0,0 +1,9 @@ +{ + "project": "xxxx", + "instance": "xxx", + "database": "xxx", + "threads": 1, + "iterations": 100, + "query": "SELECT 1", + "writeMetricToFile": false +} \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/groupmgt_config.json b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/groupmgt_config.json new file mode 100644 index 000000000000..8bc094513273 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/groupmgt_config.json @@ -0,0 +1,13 @@ +{ + "project": "xxx", + "instance": "xxx", + "database": "users", + "threads": 4, + "iterations": 250, + "query": "INSERT INTO GroupMgmt (group_id, grpname) VALUES(?,?)", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#i"}, + {"order": 2, "value": "#s"} + ] +} \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/loadtestusers.json b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/loadtestusers.json new file mode 100644 index 000000000000..4a4a3aee87c0 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/loadtestusers.json @@ -0,0 +1,13 @@ +{ + "project": "xxx", + "instance": "xxx", + "database": "users", + "threads": 1, + "iterations": 10, + "query": "SELECT users.user_id, membership.enrolled, GroupMgmt.grpname FROM users, GroupMgmt, membership WHERE users.user_id = ? AND users.user_id = membership.user_id AND GroupMgmt.group_id = membership.group_id", + "samplingQuery": "SELECT user_id FROM Users TABLESAMPLE RESERVOIR (100000 ROWS)", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#pi"} + ] +} \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/membership_config.json b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/membership_config.json new file mode 100644 index 000000000000..d3c56d101a6c --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/membership_config.json @@ -0,0 +1,9 @@ +{ + "project": "xxx", + "instance": "xxx", + "database": "users", + "threads": 1, + "iterations": 100, + "query": "INSERT INTO membership(user_id, group_id, enrolled) VALUES((SELECT user_id FROM Users TABLESAMPLE RESERVOIR (1 ROWS)), (SELECT group_id FROM GroupMgmt TABLESAMPLE RESERVOIR (1 ROWS)), CURRENT_TIMESTAMP())", + "writeMetricToFile": false +} \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/run.sh b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/run.sh new file mode 100755 index 000000000000..ac82643f8cd1 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Generate Data +cd ../.. + +mvn -q compile + +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/users_config.json" + +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/groupmgt_config.json" + +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/membership_config.json" + +# load test random users +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/loadtestusers.json" diff --git a/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/users.ddl b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/users.ddl new file mode 100644 index 000000000000..6498bb591a00 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/users.ddl @@ -0,0 +1,17 @@ +CREATE TABLE GroupMgmt ( + group_id INT64, + grpname STRING(MAX), +) PRIMARY KEY(group_id); + +CREATE TABLE Users ( + user_id INT64, + name STRING(MAX), +) PRIMARY KEY(user_id); + +CREATE TABLE membership ( + user_id INT64, + group_id INT64, + enrolled TIMESTAMP NOT NULL OPTIONS ( + allow_commit_timestamp = true + ), +) PRIMARY KEY(user_id, group_id); \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/users_config.json b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/users_config.json new file mode 100644 index 000000000000..6cdbbedc5cc9 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/exampleconfigs/users/users_config.json @@ -0,0 +1,13 @@ +{ + "project": "xxx", + "instance": "xxx", + "database": "users", + "threads": 1, + "iterations": 1000, + "query": "INSERT INTO Users (user_id, name) VALUES(?,?)", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#i"}, + {"order": 2, "value": "#s"} + ] +} \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/java.header b/java-spanner-jdbc/samples/quickperf/java.header new file mode 100644 index 000000000000..d0970ba7d375 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/java.header @@ -0,0 +1,15 @@ +^/\*$ +^ \* Copyright \d\d\d\d,? Google (Inc\.|LLC)$ +^ \*$ +^ \* Licensed under the Apache License, Version 2\.0 \(the "License"\);$ +^ \* you may not use this file except in compliance with the License\.$ +^ \* You may obtain a copy of the License at$ +^ \*$ +^ \*[ ]+https?://www.apache.org/licenses/LICENSE-2\.0$ +^ \*$ +^ \* Unless required by applicable law or agreed to in writing, software$ +^ \* distributed under the License is distributed on an "AS IS" BASIS,$ +^ \* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied\.$ +^ \* See the License for the specific language governing permissions and$ +^ \* limitations under the License\.$ +^ \*/$ diff --git a/java-spanner-jdbc/samples/quickperf/license-checks.xml b/java-spanner-jdbc/samples/quickperf/license-checks.xml new file mode 100644 index 000000000000..a7a61194058d --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/license-checks.xml @@ -0,0 +1,10 @@ + + + + + + + + diff --git a/java-spanner-jdbc/samples/quickperf/pom.xml b/java-spanner-jdbc/samples/quickperf/pom.xml new file mode 100644 index 000000000000..99c400000bd3 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/pom.xml @@ -0,0 +1,106 @@ + + + + 4.0.0 + + com.google.cloud.jdbc.quickperf + jdbc-quickperf + 1.0.0 + jdbc-quickperf + + com.google.cloud + sdk-platform-java-config + 3.57.0 + + + + + UTF-8 + 1.8 + 1.8 + + + + + + com.google.cloud + libraries-bom + 26.76.0 + pom + import + + + + + + + net.datafaker + datafaker + 1.7.0 + + + com.google.cloud + google-cloud-spanner + + + commons-cli + commons-cli + 1.11.0 + + + com.google.cloud + google-cloud-spanner-jdbc + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + + + org.apache.commons + commons-lang3 + 3.20.0 + + + com.fasterxml.jackson.core + jackson-databind + 2.21.1 + + + + org.testcontainers + testcontainers + 2.0.3 + test + + + org.springframework.boot + spring-boot + 4.0.3 + test + + + junit + junit + 4.13.2 + test + + + + + + + org.codehaus.mojo + exec-maven-plugin + 3.6.3 + + com.google.cloud.jdbc.quickperf.QuickPerf + + + + + + \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/readme.md b/java-spanner-jdbc/samples/quickperf/readme.md new file mode 100644 index 000000000000..675155b94043 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/readme.md @@ -0,0 +1,302 @@ +# Introduction + +QuickPerf is a simple utility that uses JDBC to perform load testing on individual statements (such as queries and DML) against Spanner. It provides a rapid assessment of expected end-to-end latencies for specific statements, aiding in the performance tuning of schemas, indexes, and more. The tool includes random data generators to quickly fill a given schema with dummy data, respecting foreign-key relationships and interleaved tables. + +QuickPerf is not designed to replace comprehensive test suites like JMeter. Instead, it serves as a quick alternative for gaining performance insights or populating schemas. + +**Key Features**: +* Multi-threading to simulate concurrency +* Query parameterization with random value generators (String, Integer, Timestamp) +* Sampling of records for seeding foreign-key relationships or testing against a specific subset of data +* Batch mode support +* Automatic statement and transaction tagging + + +# Installation on Ubuntu +``` +sudo apt-get install openjdk-8-jdk +sudo apt install maven +``` + +## Authentification +It is recommended to use a service account, otherwise larger scale tests will run into quota limitations + +Set active auth to service account: +``` +gcloud auth list +gcloud config set account xxx-compute@developer.gserviceaccount.com +``` + +# Configuration + +## Parameters +``` +{ + "project": "Project ID", + "instance": "Spanner Instance", + "database": "Spanner Database", + "threads": Number of concurrent threads, + "iterations": Number of how often a statement should be executed in a thread, + "query": "Statement (e.g. query)", + "samplingQuery": "OPTIONAL - Sampling query", + "writeMetricToFile": Will write latency metrics to a file (true/false), + "batchSize": If testing batching - determines how large a batch size would be, + "queryParams": [ + {"order": 1, "value": "query paramters with value generator"} + ] +} +``` + +## Example Config +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "users", + "threads": 1, + "iterations": 10, + "query": "SELECT users.user_id, membership.enrolled, GroupMgmt.grpname FROM users, GroupMgmt, membership WHERE users.user_id = ? AND users.user_id = membership.user_id AND GroupMgmt.group_id = membership.group_id", + "samplingQuery": "SELECT user_id FROM Users TABLESAMPLE RESERVOIR (100000 ROWS)", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#pi"} + ] +} +``` + +# Hello World Example + +The folder `exampleconfigs/config.json` contains a simple setup that runs SELECT 1 against the database + +Configure the right Spanner `project` and `instance` and run the app. + +**config.json** +``` +{ +"project": "xxxx", +"instance": "xxx", +"database": "xxx", +"threads": 1, +"iterations": 100, +"query": "SELECT 1", +"writeMetricToFile": false, +"batchSize": 0 +} +``` + +**Run:** +``` +mvn -q exec:java -Dexec.args="-c exampleconfigs/config.json" +``` + + + +# End-to-End Example + +Generates three tables with n:m relationships and performs a load test. + +All in one runner generating test data and executing load test: +``` +exampleconfigs/users/run.sh +``` + +What needs to be done to run it: +* Create spanner instance +* Create database named `users` +* Set `project` and `instance` in each of the config JSON files located under `exampleconfigs/users/users_config.json` + * `exampleconfigs/users/users_config.json` + * `exampleconfigs/users/groupmgt_config.json` + * `exampleconfigs/users/membership_config.json` + * `exampleconfigs/users/loadtestusers.json` + + +**Generate users table:** + +``` +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/users_config.json" +``` + +users_config.json: +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "users", + "threads": 4, + "iterations": 1000, + "query": "INSERT INTO Users (user_id, name) VALUES(?,?)", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#i"}, + {"order": 2, "value": "#s"} + ] +} +``` + +**Generate GroupMgmt table:** + +``` +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/groupmgt_config.json" +``` + +groupmgt_config.json +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "users", + "threads": 4, + "iterations": 1000, + "query": "INSERT INTO GroupMgmt (group_id, grpname) VALUES(?,?)", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#i"}, + {"order": 2, "value": "#s"} + ] +} +``` + +**Generate Membership table:** + +Run: +``` +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/membership_config.json" +``` + +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "users", + "threads": 1, + "iterations": 100, + "query": "INSERT INTO membership(user_id, group_id, enrolled) VALUES((SELECT user_id FROM Users TABLESAMPLE RESERVOIR (1 ROWS)), (SELECT group_id FROM GroupMgmt TABLESAMPLE RESERVOIR (1 ROWS)), CURRENT_TIMESTAMP())", + "writeMetricToFile": false +} +``` + + +Load test random users +``` +mvn -q exec:java -Dexec.args="-c exampleconfigs/users/loadtestusers.json" +``` + +# Randomization examples + +## String +Will generate a different random String value for each #s +``` +INSERT INTO transactions (id, name, ts) VALUES (#s, #s, CURRENT_TIMESTAMP()) +``` +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "xxxx", + "threads": 4, + "iterations": 1000, + "query": "INSERT INTO transactions (id, name, ts) VALUES (?, ?, CURRENT_TIMESTAMP())", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#s"}, + {"order": 2, "value": "#s"} + ] +} +``` + +``` +SELECT * FROM transactions WHERE id=#s +``` +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "xxxx", + "threads": 4, + "iterations": 1000, + "query": "SELECT * FROM transactions WHERE id=?", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#s"}, + ] +} +``` + + +## Integer +Will generate a different random value for each #i +``` +UPDATE accounts SET cid=#i WHERE aaId=4 +``` + +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "xxxx", + "threads": 4, + "iterations": 1000, + "query": "UPDATE accounts SET ? WHERE aaId=4", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#i"}, + ] +} +``` + +## Integer min max +Generates #i(1,10) integer values between 1 and 10 +``` +INSERT INTO test (id, groupid, amount) VALUES (#s, #i(1,2)#, #i) +``` + +## Timestamp +Workaround for not randomizing timestamps is to use current_timestsamp() +``` +INSERT INTO transactions (id, name, ts) VALUES (#s, #s, CURRENT_TIMESTAMP())' +``` +``` +{ + "project": "xxxx", + "instance": "xxxx", + "database": "xxxx", + "threads": 4, + "iterations": 1000, + "query": "INSERT INTO transactions (id, name, ts) VALUES (?, ?, CURRENT_TIMESTAMP())'", + "writeMetricToFile": false, + "queryParams": [ + {"order": 1, "value": "#s"}, + {"order": 1, "value": "#s"} + ] +} +``` + +## Sampling IDs +Sometimes it might be required to sample existing IDs that are then used in the query that is executed. +The parameter ```-s``` allows to pull a sampled dataset, but only one column is allowed in the resultset. +This query is executed only once and before the beginning of the run and the dataset is reused across the threads. + +* #ps will add quotes - such as for Strings +* #pi will **not** add quotes such as when integers are used + +In this case the #ps is the placeholder for samples that are pulled from the -s parameter +``` +-q 'SELECT * FROM test WHERE id = #ps'' +-s 'SELECT id FROM test TABLESAMPLE RESERVOIR (100 ROWS)' +``` + +## Many-to-Many Relationship Example +Insert Users +``` +INSERT INTO Users (user_id, name) VALUES(#i,#s) +``` + +Insert Groups +``` +INSERT INTO GroupMgmt (group_id, grpname) VALUES(#i,#s) +``` + +Insert relationships with sampling: +``` +INSERT INTO membership(user_id, group_id, enrolled) VALUES((SELECT user_id FROM Users TABLESAMPLE RESERVOIR (1 ROWS)), (SELECT group_id FROM GroupMgmt TABLESAMPLE RESERVOIR (1 ROWS)), CURRENT_TIMESTAMP()) +``` \ No newline at end of file diff --git a/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/ProgressTracker.java b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/ProgressTracker.java new file mode 100644 index 000000000000..90b241b1dd5f --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/ProgressTracker.java @@ -0,0 +1,79 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.jdbc.quickperf; + +import java.util.List; + +public class ProgressTracker extends Thread { + private static final int SLEEP_TIME_INIT = 2000; + private static final int SLEEP_TIME_POLL = 200; + + private final List threadList; + + private final int maxIt; + private int currentIt = 0; + + public ProgressTracker(List threadList, int maxIt) { + this.threadList = threadList; + this.maxIt = maxIt; + } + + public void run() { + sleep(SLEEP_TIME_INIT); + while (currentIt < maxIt) { + currentIt = 0; + for (QuickPerfRunner thread : threadList) { + currentIt = currentIt + thread.getProgress(); + + int percent = (int) Math.ceil(((double) currentIt / maxIt) * 100.0); + print_progress(percent); + } + + if (sleep(SLEEP_TIME_POLL)) { + break; + } + } + print_progress(100); + } + + public void print_progress(int percent) { + StringBuilder bar = new StringBuilder("Progress: ["); + + for (int i = 0; i < 50; i++) { + if (i < (percent / 2)) { + bar.append("="); + } else if (i == (percent / 2)) { + bar.append(">"); + } else { + bar.append(" "); + } + } + + bar.append("] ").append(percent).append("% "); + System.out.print("\r" + bar); + } + + private boolean sleep(int sleeptime) { + try { + Thread.sleep(sleeptime); + } catch (InterruptedException e) { + System.err.println("Progress tracker thread interrupted"); + return true; + } + return false; + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/QuickPerf.java b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/QuickPerf.java new file mode 100644 index 000000000000..9eb0fc45cf2d --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/QuickPerf.java @@ -0,0 +1,191 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.jdbc.quickperf; + +import com.google.cloud.jdbc.quickperf.config.Config; +import com.google.cloud.jdbc.quickperf.config.ConfigParser; +import java.io.BufferedWriter; +import java.io.FileWriter; +import java.io.IOException; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.DefaultParser; +import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.lang3.ArrayUtils; + +public class QuickPerf extends Thread { + + private static final String BREAK_STR = + "###################################################################################################"; + + // TODO: make measurement file configurable + private static final String MEASURES_FILE_NAME = "measures.txt"; + + public static void main(String[] args) throws Exception { + Options options = new Options(); + + options.addOption(QuickPerf.addOption("c", "config", true, "Config File")); + + CommandLineParser parser = new DefaultParser(); + HelpFormatter formatter = new HelpFormatter(); + CommandLine cmd = null; + + ZonedDateTime testStartTimestamp = ZonedDateTime.now(); + + try { + cmd = parser.parse(options, args); + } catch (ParseException e) { + System.out.println(e.getMessage()); + formatter.printHelp("utility-name", options); + + System.exit(1); + } + + Config config = ConfigParser.parseConfigFile(cmd.getOptionValue("config")); + + float[] measures = new float[config.getIterations() * config.getThreads()]; + + // initialize threads (for sampling if present) + List threadList = new ArrayList(); + for (int i = 0; i < config.getThreads(); i++) { + QuickPerfRunner thread = new QuickPerfRunner(config); + if (config.getSamplingQuery() != null) { + thread.runSampling(); + } + threadList.add(thread); + } + + // start threads + for (QuickPerfRunner thread : threadList) { + thread.start(); + } + + // ProgressBar Tracker Thread + ProgressTracker progressTracker = + progressTracker = + new ProgressTracker(threadList, config.getIterations() * config.getThreads()); + + progressTracker.start(); + progressTracker.join(); + + int i = 0; + for (QuickPerfRunner thread : threadList) { + thread.join(); + + if (i == 0) { + measures = thread.getMeasures(); + } else { + measures = ArrayUtils.addAll(measures, thread.getMeasures()); + } + i++; + } + + // write to file before its sorted + if (config.getWriteMetricToFile()) { + try { + writeMeasuresToFile(measures, MEASURES_FILE_NAME); + } catch (IOException e) { + System.err.println("An error occurred while writing the file: " + e.getMessage()); + } + } + + System.out.println("\n" + BREAK_STR); + System.out.println("Query: " + config.getQuery()); + System.out.println("Params: " + config.paramsToString()); + System.out.println("Tag: " + Config.DEFAULT_TAG); + if (config.getBatchSize() > 0) { + System.out.println("Batching Enabled (size): " + config.getBatchSize()); + } + System.out.printf("Start: %s End: %s%n", testStartTimestamp, ZonedDateTime.now()); + System.out.printf( + "Finished with a total of %s runs across %s Threads.\nLatencies (ms): p50 = %s, p95 = %s, p99 = %s, min = %s, max = %s%n", + config.getIterations() * config.getThreads(), + config.getThreads(), + calcPerc(measures, 50), + calcPerc(measures, 95), + calcPerc(measures, 99), + getMin(measures), + getMax(measures)); + System.out.println(BREAK_STR); + } + + public static Option addOption(String option, String longOption, boolean hasArgs, String desc) { + Option opt = new Option(option, longOption, hasArgs, desc); + opt.setRequired(true); + + return opt; + } + + public static Option addOption( + String option, String longOption, boolean hasArgs, String desc, boolean required) { + Option opt = new Option(option, longOption, hasArgs, desc); + opt.setRequired(required); + + return opt; + } + + public static float calcPerc(float[] nums, double percentile) { + int n = nums.length; + Arrays.sort(nums); + + double index = (percentile / 100) * (n - 1); + + if (index == Math.floor(index)) { + return nums[(int) index]; + } else { + int lowerIndex = (int) Math.floor(index); + int upperIndex = (int) Math.ceil(index); + float lowerValue = nums[lowerIndex]; + float upperValue = nums[upperIndex]; + return (float) ((1 - (index - lowerIndex)) * lowerValue + (index - lowerIndex) * upperValue); + } + } + + public static float getMax(float[] measures) { + if (measures == null || measures.length == 0) { + throw new IllegalArgumentException("Array is null or empty"); + } + + Arrays.sort(measures); + return measures[measures.length - 1]; + } + + public static float getMin(float[] measures) { + if (measures == null || measures.length == 0) { + throw new IllegalArgumentException("Array is null or empty"); + } + + Arrays.sort(measures); + return measures[0]; + } + + public static void writeMeasuresToFile(float[] array, String fileName) throws IOException { + try (BufferedWriter writer = new BufferedWriter(new FileWriter(fileName))) { + for (float value : array) { + writer.write(Float.toString(value)); + writer.newLine(); + } + } + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/QuickPerfRunner.java b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/QuickPerfRunner.java new file mode 100644 index 000000000000..6b91fa1c824c --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/QuickPerfRunner.java @@ -0,0 +1,323 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.jdbc.quickperf; + +import com.google.cloud.jdbc.quickperf.config.Config; +import com.google.cloud.jdbc.quickperf.config.QueryParam; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.jdbc.CloudSpannerJdbcConnection; +import java.security.SecureRandom; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Random; +import java.util.UUID; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import net.datafaker.Faker; + +public class QuickPerfRunner extends Thread { + private static final Properties DEFAULT_PROPERTIES = new Properties(); + + // perf measurement + private float[] measures; + + private final List sampledValueList = new ArrayList(); + private final Config config; + + private int progress; + + public QuickPerfRunner(Config config) { + this.config = config; + } + + public void runSampling() { + System.out.println("Running Sampling... "); + + try (Connection connection = createConnection(config)) { + try (Statement statement = connection.createStatement()) { + boolean hasResults = statement.execute(config.getSamplingQuery()); + + if (!hasResults) { + System.out.println("Nothing sampled"); + return; + } + + ResultSet rs = statement.getResultSet(); + while (rs.next()) { + String value = rs.getString(1); + sampledValueList.add(value); + } + + System.out.printf("Finished sampling %s records%n", sampledValueList.size()); + } catch (SQLException e) { + //noinspection CallToPrintStackTrace + e.printStackTrace(); + } + + } catch (SQLException e) { + //noinspection CallToPrintStackTrace + e.printStackTrace(); + } + } + + private Connection createConnection(Config config) throws SQLException { + String connectionUrl = createConnectionURL(config); + Properties properties = createConnectionProperties(); + return DriverManager.getConnection(connectionUrl, properties); + } + + private String createConnectionURL(Config config) { + if (config.isIsEmulator()) { + return String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s?autoConfigEmulator=true", + config.getProject(), config.getInstance(), config.getDatabase()); + } else { + return String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + config.getProject(), config.getInstance(), config.getDatabase()); + } + } + + private Properties createConnectionProperties() { + if (System.getProperty("spanner.host") != null) { + Properties properties = new Properties(); + properties.setProperty("endpoint", System.getProperty("spanner.host")); + return properties; + } + return DEFAULT_PROPERTIES; + } + + public void run() { + if (config.getBatchSize() > 0) { + int val = (int) Math.ceil((double) config.getIterations() / config.getBatchSize()); + measures = new float[val]; + } else { + measures = new float[config.getIterations()]; + } + + try (Connection connection = createConnection(config)) { + + // determine database dialect to set right tagging syntax + boolean isGoogleSQL = + connection + .unwrap(CloudSpannerJdbcConnection.class) + .getDialect() + .equals(Dialect.GOOGLE_STANDARD_SQL); + String tagPrefix = isGoogleSQL ? "" : "SPANNER."; + + connection.setAutoCommit(false); + + // if there is DML switch to r/w transaction mode and apply transaction tagging. + // Otherwise set to read-only mode. + if (config.getQuery().contains("INSERT") + || config.getQuery().contains("UPDATE") + || config.getQuery().contains("DELETE")) { + // read-write + connection.createStatement().execute("SET TRANSACTION READ WRITE"); + connection + .createStatement() + .execute(String.format("SET %sTRANSACTION_TAG = '%s'", tagPrefix, config.DEFAULT_TAG)); + + } else { + // read-only + // connection.createStatement().execute("SET TRANSACTION READ ONLY"); + connection.setAutoCommit(true); + } + + PreparedStatement statement = connection.prepareStatement(config.getQuery()); + int batchCounter = config.getBatchSize(); + int batchRound = 0; + + for (int i = 0; i < config.getIterations(); i++) { + if (config.getBatchSize() == 0) { + // single statements + try { + if (config.getQueryParams() != null) { + parametrizeStatement(statement, config.getQueryParams()); + } + connection + .createStatement() + .execute(String.format("SET %sSTATEMENT_TAG='%s'", tagPrefix, config.DEFAULT_TAG)); + + long start = System.nanoTime(); + boolean hasResults = statement.execute(); + if (!connection.getAutoCommit()) { + connection.commit(); + connection + .createStatement() + .execute(String.format("SET %sTRANSACTION_TAG = '%s'", tagPrefix, config.DEFAULT_TAG)); + } + long stop = System.nanoTime() - start; + + if (hasResults) { + statement.getResultSet().close(); + } + + measures[i] = (float) stop / 1000000; + progress++; + } catch (Exception e) { + if (e.getMessage().contains("ALREADY_EXISTS:")) { + System.out.println("duplicate key - retry"); + i--; + } else { + throw e; + } + } + } else if (config.getQuery().contains("INSERT") + || config.getQuery().contains("UPDATE") + || config.getQuery().contains("DELETE")) { + // batching + try { + if (config.getQueryParams() != null) { + parametrizeStatement(statement, config.getQueryParams()); + } + + statement.addBatch(); + + if (batchCounter == 0 || i == config.getIterations() - 1) { + connection + .createStatement() + .execute( + String.format("SET %sSTATEMENT_TAG='%s'", tagPrefix, config.DEFAULT_TAG)); + + long start = System.nanoTime(); + statement.executeBatch(); + if (!connection.getAutoCommit()) { + connection.commit(); + } + long stop = System.nanoTime() - start; + + batchCounter = config.getBatchSize(); + + measures[batchRound] = (float) stop / 1000000; + batchRound++; + } + + progress++; + batchCounter--; + } catch (Exception e) { + if (e.getMessage().contains("ALREADY_EXISTS:")) { + System.out.println("duplicate key - retry"); + i--; + } else { + throw e; + } + } + } else { + System.out.println( + "Batching is only allowed for DML. Set batchSize=0 to disable batching."); + System.exit(1); + } + } + } catch (SQLException e) { + //noinspection CallToPrintStackTrace + e.printStackTrace(); + } + } + + public static float[] appendFloatArray(float[] originalArray, float[] elementsToAppend) { + int originalLength = originalArray.length; + int elementsLength = elementsToAppend.length; + + float[] resultArray = new float[originalLength + elementsLength]; + System.arraycopy(originalArray, 0, resultArray, 0, originalLength); + System.arraycopy(elementsToAppend, 0, resultArray, originalLength, elementsLength); + + return resultArray; + } + + private void parametrizeStatement(PreparedStatement statement, List paramList) + throws SQLException { + for (QueryParam param : paramList) { + if (param.getValue().contains("#i")) { + // integer plus integer with custom range + int value = replaceInt(param.getValue()); + statement.setInt(param.getOrder(), value); + } else if (param.getValue().contains("#d")) { + // double + Double value = replaceDouble(param.getValue()); + statement.setDouble(param.getOrder(), value); + } else if (param.getValue().contains("#s")) { + // String + String value = replaceString(param.getValue()); + statement.setString(param.getOrder(), value); + } else if (param.getValue().contains("#ps")) { + // Sampled Query - String + String value = replaceSampleQueryString(param.getValue()); + statement.setString(param.getOrder(), value); + } else if (param.getValue().contains("#pi")) { + // Sampled Query - Integer + Long value = replaceSampleQueryInt(param.getValue()); + statement.setLong(param.getOrder(), value); + } + } + } + + private int replaceInt(String value) { + Faker f = new Faker(); + // integer with min, max + String pattern = "#i\\((\\d+),(\\d+)\\)#"; + Pattern regexPattern = Pattern.compile(pattern); + Matcher matcher = regexPattern.matcher(value); + + if (matcher.find()) { + int min = Integer.parseInt(matcher.group(1)); + int max = Integer.parseInt(matcher.group(2)); + + return f.number().numberBetween(min, max); + } + + return Integer.parseInt( + value.replaceFirst("#i", String.valueOf(new SecureRandom().nextInt(Integer.MAX_VALUE)))); + } + + private Double replaceDouble(String value) { + Faker f = new Faker(); + + return Double.valueOf( + value.replaceFirst("#d", String.valueOf(f.number().randomDouble(2, 0, 999999999)))); + } + + private String replaceString(String value) { + return value.replaceFirst("#s", UUID.randomUUID().toString()); + } + + private String replaceSampleQueryString(String value) { + int randomIndex = new Random().nextInt(sampledValueList.size()); + return value.replaceFirst("#ps", sampledValueList.get(randomIndex)); + } + + private Long replaceSampleQueryInt(String value) { + int randomIndex = new Random().nextInt(sampledValueList.size()); + + return Long.parseLong(value.replaceFirst("#pi", sampledValueList.get(randomIndex))); + } + + public int getProgress() { + return progress; + } + + public float[] getMeasures() { + return measures; + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/Config.java b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/Config.java new file mode 100644 index 000000000000..b869be386e46 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/Config.java @@ -0,0 +1,145 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.jdbc.quickperf.config; + +import java.util.List; +import java.util.Random; + +public class Config { + public static String DEFAULT_TAG = "perftest_" + (new Random()).nextInt(300); + + private String project; + private String instance; + private String database; + private int threads; + private int iterations; + private String query; + private String samplingQuery; + private boolean writeMetricToFile; + private int batchSize; + private boolean isEmulator; + private List queryParams; + + public String paramsToString() { + StringBuilder retVal = new StringBuilder(); + + if (queryParams != null) { + + for (QueryParam param : queryParams) { + retVal.append(String.format("%s:%s ", param.getOrder(), param.getValue())); + } + } + + return retVal.toString(); + } + + public int getBatchSize() { + return this.batchSize; + } + + public void setBatchSize(int batchSize) { + this.batchSize = batchSize; + } + + public String getProject() { + return project; + } + + public void setProject(String project) { + this.project = project; + } + + public String getInstance() { + return instance; + } + + public void setInstance(String instance) { + this.instance = instance; + } + + public String getDatabase() { + return database; + } + + public void setDatabase(String database) { + this.database = database; + } + + public int getThreads() { + return threads; + } + + public void setThreads(int threads) { + this.threads = threads; + } + + public int getIterations() { + return iterations; + } + + public void setIterations(int iterations) { + this.iterations = iterations; + } + + public String getQuery() { + return query; + } + + public void setQuery(String query) { + this.query = query; + } + + public boolean isWriteMetricToFile() { + return writeMetricToFile; + } + + public void setWriteMetricToFile(boolean writeMetricToFile) { + this.writeMetricToFile = writeMetricToFile; + } + + public List getQueryParams() { + return queryParams; + } + + public void setQueryParams(List queryParams) { + this.queryParams = queryParams; + } + + public String getSamplingQuery() { + return this.samplingQuery; + } + + public void setSamplingQuery(String samplingQuery) { + this.samplingQuery = samplingQuery; + } + + public boolean getWriteMetricToFile() { + return this.writeMetricToFile; + } + + public boolean isIsEmulator() { + return this.isEmulator; + } + + public boolean getIsEmulator() { + return this.isEmulator; + } + + public void setIsEmulator(boolean isEmulator) { + this.isEmulator = isEmulator; + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/ConfigParser.java b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/ConfigParser.java new file mode 100644 index 000000000000..8f056d114cb5 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/ConfigParser.java @@ -0,0 +1,30 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.jdbc.quickperf.config; + +import com.fasterxml.jackson.databind.ObjectMapper; +import java.io.File; +import java.io.IOException; + +public class ConfigParser { + + public static Config parseConfigFile(String configFile) throws IOException { + ObjectMapper mapper = new ObjectMapper(); + + return mapper.readValue(new File(configFile), Config.class); + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/QueryParam.java b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/QueryParam.java new file mode 100644 index 000000000000..52807eb499d1 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/main/java/com/google/cloud/jdbc/quickperf/config/QueryParam.java @@ -0,0 +1,38 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.jdbc.quickperf.config; + +public class QueryParam { + private int order; + private String value; + + public int getOrder() { + return order; + } + + public void setOrder(int order) { + this.order = order; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/AppTest.java b/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/AppTest.java new file mode 100644 index 000000000000..c147ebaa9b87 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/AppTest.java @@ -0,0 +1,174 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.jdbc.quickperf; + +import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.google.cloud.spanner.connection.SpannerPool; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import javax.annotation.Nonnull; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.springframework.boot.SpringApplication; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; + +public class AppTest { + + private static final String TEST_FILE = "src/test/resources/testfile.json"; + + private static GenericContainer emulator; + + private static final String projectId = "test"; + private static final String instanceId = "test"; + private static final String databaseId = "quickperftest"; + + @BeforeClass + public static void setup() throws Exception { + System.out.println("Starting Emulator"); + emulator = + new GenericContainer<>( + DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator:latest")) + .withExposedPorts(9010) + .waitingFor(Wait.forListeningPort()); + + emulator.start(); + System.out.println("Finished starting Emulator"); + + List ddlList = + Arrays.asList( + "CREATE TABLE GroupMgmt (" + + "group_id INT64," + + "grpname STRING(MAX)," + + ") PRIMARY KEY(group_id)", + "CREATE TABLE Users (" + + "user_id INT64," + + "name STRING(MAX)," + + ") PRIMARY KEY(user_id)", + "CREATE TABLE membership (" + + "user_id INT64," + + "group_id INT64," + + "enrolled TIMESTAMP NOT NULL OPTIONS (" + + " allow_commit_timestamp = true" + + ")," + + ") PRIMARY KEY(user_id, group_id)"); + try (Connection connection = createConnection(); + Statement statement = connection.createStatement()) { + for (String ddl : ddlList) { + statement.addBatch(ddl); + } + statement.executeBatch(); + } + // create test file + ProjectConfig projectConfig = createProjectConfig(); + + // Write the JSON to a file + ObjectMapper mapper = new ObjectMapper(); + mapper.enable(SerializationFeature.INDENT_OUTPUT); + mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); + + File file = new File(TEST_FILE); + mapper.writeValue(file, projectConfig); + } + + @Nonnull + private static ProjectConfig createProjectConfig() { + ProjectConfig projectConfig = new ProjectConfig(); + projectConfig.setProject(projectId); + projectConfig.setInstance(instanceId); + projectConfig.setDatabase(databaseId); + projectConfig.setThreads(1); + projectConfig.setIterations(1000); + projectConfig.setQuery("INSERT INTO Users (user_id, name) VALUES(?,?)"); + projectConfig.setWriteMetricToFile(false); + projectConfig.setIsEmulator(true); + + QueryParam param1 = new QueryParam(1, "#i"); + QueryParam param2 = new QueryParam(2, "#s"); + projectConfig.setQueryParams(Arrays.asList(param1, param2)); + return projectConfig; + } + + @AfterClass + public static void cleanup() throws IOException { + // Close all Spanner connections. + SpannerPool.closeSpannerPool(); + + // Write an empty test file + Path path = Paths.get(TEST_FILE); + Files.newBufferedWriter(path, TRUNCATE_EXISTING).close(); + + // Stop the emulator. + emulator.stop(); + } + + private static Connection createConnection() throws SQLException { + String url = + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s?autoConfigEmulator=true", + projectId, instanceId, databaseId); + Properties properties = new Properties(); + properties.put("endpoint", "localhost:" + emulator.getMappedPort(9010)); + return DriverManager.getConnection(url, properties); + } + + @Test + public void testRunApplication() throws Exception { + + System.setProperty("spanner.emulator", "true"); + System.setProperty("spanner.host", "localhost:" + emulator.getMappedPort(9010)); + SpringApplication.run(AppTest.class).close(); + + String[] userConfig = {"-c" + TEST_FILE}; + QuickPerf.main(userConfig); + + try (Connection connection = createConnection()) { + testQuery(connection, "SELECT count(*) FROM Users", 1000); + } + } + + private void testQuery(Connection connection, String query, int expected) throws SQLException { + try (Statement statement = connection.createStatement()) { + boolean hasResults = statement.execute(query); + assertTrue(hasResults); + + ResultSet rs = statement.getResultSet(); + while (rs.next()) { + int value = rs.getInt(1); + assertEquals(expected, value); + } + } + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/ProjectConfig.java b/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/ProjectConfig.java new file mode 100644 index 000000000000..372d100fd330 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/ProjectConfig.java @@ -0,0 +1,113 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.jdbc.quickperf; + +import java.util.List; + +public class ProjectConfig { + private String project; + private String instance; + private String database; + private int threads; + private int iterations; + private String query; + private boolean writeMetricToFile; + private boolean isEmulator; + private List queryParams; + + // Getters and setters + + public String getProject() { + return project; + } + + public void setProject(String project) { + this.project = project; + } + + public String getInstance() { + return instance; + } + + public void setInstance(String instance) { + this.instance = instance; + } + + public String getDatabase() { + return database; + } + + public void setDatabase(String database) { + this.database = database; + } + + public int getThreads() { + return threads; + } + + public void setThreads(int threads) { + this.threads = threads; + } + + public int getIterations() { + return iterations; + } + + public void setIterations(int iterations) { + this.iterations = iterations; + } + + public String getQuery() { + return query; + } + + public void setQuery(String query) { + this.query = query; + } + + public boolean isWriteMetricToFile() { + return writeMetricToFile; + } + + public void setWriteMetricToFile(boolean writeMetricToFile) { + this.writeMetricToFile = writeMetricToFile; + } + + public List getQueryParams() { + return queryParams; + } + + public void setQueryParams(List queryParams) { + this.queryParams = queryParams; + } + + public boolean getWriteMetricToFile() { + return this.writeMetricToFile; + } + + public boolean isIsEmulator() { + return this.isEmulator; + } + + public boolean getIsEmulator() { + return this.isEmulator; + } + + public void setIsEmulator(boolean isEmulator) { + this.isEmulator = isEmulator; + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/QueryParam.java b/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/QueryParam.java new file mode 100644 index 000000000000..dcc0d85d4e38 --- /dev/null +++ b/java-spanner-jdbc/samples/quickperf/src/test/java/com/google/cloud/jdbc/quickperf/QueryParam.java @@ -0,0 +1,45 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.jdbc.quickperf; + +public class QueryParam { + private int order; + private String value; + + public QueryParam(int order, String value) { + this.order = order; + this.value = value; + } + + // Getters and setters + + public int getOrder() { + return order; + } + + public void setOrder(int order) { + this.order = order; + } + + public String getValue() { + return value; + } + + public void setValue(String value) { + this.value = value; + } +} diff --git a/java-spanner-jdbc/samples/quickperf/src/test/resources/testfile.json b/java-spanner-jdbc/samples/quickperf/src/test/resources/testfile.json new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/java-spanner-jdbc/samples/snapshot/pom.xml b/java-spanner-jdbc/samples/snapshot/pom.xml new file mode 100644 index 000000000000..c2a919dec96f --- /dev/null +++ b/java-spanner-jdbc/samples/snapshot/pom.xml @@ -0,0 +1,84 @@ + + + 4.0.0 + com.google.cloud + spanner-jdbc-snapshot + jar + Google Google Cloud Spanner JDBC Snapshot Samples + https://github.com/googleapis/java-spanner-jdbc + + + + com.google.cloud.samples + shared-configuration + 1.2.2 + + + + + 1.8 + 1.8 + UTF-8 + + + + + + com.google.cloud + google-cloud-spanner-jdbc + 2.35.5-SNAPSHOT + + + + + junit + junit + 4.13.2 + test + + + com.google.truth + truth + 1.4.5 + test + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-snippets-source + + add-source + + + + ../snippets/src/main/java + + + + + add-snippets-tests + + add-test-source + + + + ../snippets/src/test/java + + + + + + + + \ No newline at end of file diff --git a/java-spanner-jdbc/samples/snippets/java.header b/java-spanner-jdbc/samples/snippets/java.header new file mode 100644 index 000000000000..d0970ba7d375 --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/java.header @@ -0,0 +1,15 @@ +^/\*$ +^ \* Copyright \d\d\d\d,? Google (Inc\.|LLC)$ +^ \*$ +^ \* Licensed under the Apache License, Version 2\.0 \(the "License"\);$ +^ \* you may not use this file except in compliance with the License\.$ +^ \* You may obtain a copy of the License at$ +^ \*$ +^ \*[ ]+https?://www.apache.org/licenses/LICENSE-2\.0$ +^ \*$ +^ \* Unless required by applicable law or agreed to in writing, software$ +^ \* distributed under the License is distributed on an "AS IS" BASIS,$ +^ \* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied\.$ +^ \* See the License for the specific language governing permissions and$ +^ \* limitations under the License\.$ +^ \*/$ diff --git a/java-spanner-jdbc/samples/snippets/license-checks.xml b/java-spanner-jdbc/samples/snippets/license-checks.xml new file mode 100644 index 000000000000..a7a61194058d --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/license-checks.xml @@ -0,0 +1,10 @@ + + + + + + + + diff --git a/java-spanner-jdbc/samples/snippets/pom.xml b/java-spanner-jdbc/samples/snippets/pom.xml new file mode 100644 index 000000000000..c683ecc6b437 --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/pom.xml @@ -0,0 +1,134 @@ + + + 4.0.0 + spanner-jdbc-snippets + jar + Google Google Cloud Spanner JDBC Snippets + https://github.com/googleapis/java-spanner-jdbc + + + com.google.cloud + sdk-platform-java-config + 3.57.0 + + + + + 1.8 + 1.8 + UTF-8 + + + + + + + + com.google.cloud + libraries-bom + 26.76.0 + pom + import + + + + + + + com.google.cloud + google-cloud-spanner-jdbc + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + + + + + com.google.cloud + google-cloud-spanner + + + + org.testcontainers + testcontainers + 2.0.3 + test + + + junit + junit + 4.13.2 + test + + + + + + + maven-resources-plugin + + + copy-resources + validate + + copy-resources + + + ${project.build.directory}/jdbc-snippets + + + resources + true + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-dependencies + prepare-package + + copy-dependencies + + + ${project.build.directory}/jdbc-snippets/lib + false + false + true + + + + + + org.apache.maven.plugins + maven-jar-plugin + + jdbc-snippets/jdbc-samples + + false + + com.example.spanner.jdbc.JdbcSample + true + lib/ + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + **/SingerProto.java + + + + + diff --git a/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/IsolationLevel.java b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/IsolationLevel.java new file mode 100644 index 000000000000..7153b7c7aa2d --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/IsolationLevel.java @@ -0,0 +1,73 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.jdbc; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Properties; + +final class IsolationLevel { + + static void isolationLevel( + final String project, + final String instance, + final String database, + final Properties properties) + throws SQLException { + String url = String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database); + try (Connection connection = DriverManager.getConnection(url, properties)) { + connection.setAutoCommit(false); + + // Spanner supports setting the isolation level to: + // 1. TRANSACTION_SERIALIZABLE (this is the default) + // 2. TRANSACTION_REPEATABLE_READ + + // The following line sets the default isolation level that will be used + // for all read/write transactions on this connection. + connection.setTransactionIsolation( + Connection.TRANSACTION_REPEATABLE_READ); + + // This query will not take any locks when using + // isolation level repeatable read. + try (ResultSet resultSet = connection + .createStatement() + .executeQuery("SELECT SingerId, Active " + + "FROM Singers " + + "ORDER BY LastName")) { + while (resultSet.next()) { + try (PreparedStatement statement = connection.prepareStatement( + "INSERT OR UPDATE INTO SingerHistory " + + "(SingerId, Active, CreatedAt) " + + "VALUES (?, ?, CURRENT_TIMESTAMP)")) { + statement.setLong(1, resultSet.getLong(1)); + statement.setBoolean(2, resultSet.getBoolean(2)); + statement.executeUpdate(); + } + } + } + connection.commit(); + } + } + + private IsolationLevel() { + } +} diff --git a/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/JdbcSample.java b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/JdbcSample.java new file mode 100644 index 000000000000..2b340feb195c --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/JdbcSample.java @@ -0,0 +1,1999 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.jdbc; + +import com.example.spanner.jdbc.SingerProto.Genre; +import com.example.spanner.jdbc.SingerProto.SingerInfo; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminSettings; +import com.google.cloud.spanner.jdbc.CloudSpannerJdbcConnection; +import com.google.cloud.spanner.jdbc.ProtoEnumType; +import com.google.cloud.spanner.jdbc.ProtoMessageType; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.database.v1.CreateDatabaseRequest; +import com.google.spanner.admin.database.v1.DatabaseDialect; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.v1.DatabaseName; +import io.grpc.ManagedChannelBuilder; +import java.io.IOException; +import java.io.InputStream; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.ExecutionException; + +public final class JdbcSample { + static class Singer { + + /** Primary key in the Singers table. */ + private final long singerId; + + /** Mapped to the FirstName column. */ + private final String firstName; + + /** Mapped to the FirstName column. */ + private final String lastName; + + Singer(final long id, final String first, final String last) { + this.singerId = id; + this.firstName = first; + this.lastName = last; + } + + public long getSingerId() { + return singerId; + } + + public String getFirstName() { + return firstName; + } + + public String getLastName() { + return lastName; + } + } + + static class Album { + + /** The first part of the primary key of Albums. */ + private final long singerId; + + /** The second part of the primary key of Albums. */ + private final long albumId; + + /** Mapped to the AlbumTitle column. */ + private final String albumTitle; + + Album(final long singer, final long album, final String title) { + this.singerId = singer; + this.albumId = album; + this.albumTitle = title; + } + + public long getSingerId() { + return singerId; + } + + public long getAlbumId() { + return albumId; + } + + public String getAlbumTitle() { + return albumTitle; + } + } + + // [START spanner_insert_data] + // [START spanner_postgresql_insert_data] + /** The list of Singers to insert. */ + static final List SINGERS = + Arrays.asList( + new Singer(1, "Marc", "Richards"), + new Singer(2, "Catalina", "Smith"), + new Singer(3, "Alice", "Trentor"), + new Singer(4, "Lea", "Martin"), + new Singer(5, "David", "Lomond")); + + /** The list of Albums to insert. */ + static final List ALBUMS = + Arrays.asList( + new Album(1, 1, "Total Junk"), + new Album(1, 2, "Go, Go, Go"), + new Album(2, 1, "Green"), + new Album(2, 2, "Forever Hold Your Peace"), + new Album(2, 3, "Terrified")); + + // [END spanner_insert_data] + // [END spanner_postgresql_insert_data] + + private JdbcSample() { + } + + // [START spanner_create_database] + static void createDatabase( + final DatabaseAdminClient dbAdminClient, + final InstanceName instanceName, + final String databaseId, + final Properties properties) throws SQLException { + // Use the Spanner admin client to create a database. + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + .setCreateStatement("CREATE DATABASE `" + databaseId + "`") + .setParent(instanceName.toString()) + .build(); + try { + dbAdminClient.createDatabaseAsync(createDatabaseRequest).get(); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + + // Connect to the database with the JDBC driver and create two test tables. + String projectId = instanceName.getProject(); + String instanceId = instanceName.getInstance(); + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + projectId, instanceId, databaseId), + properties)) { + try (Statement statement = connection.createStatement()) { + // Create the tables in one batch. + statement.addBatch( + "CREATE TABLE Singers (" + + " SingerId INT64 NOT NULL," + + " FirstName STRING(1024)," + + " LastName STRING(1024)," + + " SingerInfo BYTES(MAX)," + + " FullName STRING(2048) AS " + + " (ARRAY_TO_STRING([FirstName, LastName], \" \")) STORED" + + ") PRIMARY KEY (SingerId)"); + statement.addBatch( + "CREATE TABLE Albums (" + + " SingerId INT64 NOT NULL," + + " AlbumId INT64 NOT NULL," + + " AlbumTitle STRING(MAX)" + + ") PRIMARY KEY (SingerId, AlbumId)," + + " INTERLEAVE IN PARENT Singers ON DELETE CASCADE"); + statement.executeBatch(); + } + } + System.out.printf( + "Created database [%s]\n", + DatabaseName.of(projectId, instanceId, databaseId)); + } + // [END spanner_create_database] + + // [START spanner_postgresql_create_database] + static void createPostgreSQLDatabase( + final DatabaseAdminClient dbAdminClient, + final InstanceName instanceName, + final String databaseId, + final Properties properties) throws SQLException { + // Use the Spanner admin client to create a database. + CreateDatabaseRequest createDatabaseRequest = + CreateDatabaseRequest.newBuilder() + // PostgreSQL database names and other identifiers + // must be quoted using double quotes. + .setCreateStatement("create database \"" + databaseId + "\"") + .setParent(instanceName.toString()) + .setDatabaseDialect(DatabaseDialect.POSTGRESQL) + .build(); + try { + dbAdminClient.createDatabaseAsync(createDatabaseRequest).get(); + } catch (ExecutionException e) { + throw SpannerExceptionFactory.asSpannerException(e.getCause()); + } catch (InterruptedException e) { + throw SpannerExceptionFactory.propagateInterrupt(e); + } + + // Connect to the database with the JDBC driver and create two test tables. + String projectId = instanceName.getProject(); + String instanceId = instanceName.getInstance(); + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + projectId, instanceId, databaseId), + properties)) { + try (Statement statement = connection.createStatement()) { + // Create the tables in one batch. + statement.addBatch( + "create table singers (" + + " singer_id bigint primary key not null," + + " first_name varchar(1024)," + + " last_name varchar(1024)," + + " singer_info bytea," + + " full_name varchar(2048) generated always as (\n" + + " case when first_name is null then last_name\n" + + " when last_name is null then first_name\n" + + " else first_name || ' ' || last_name\n" + + " end) stored" + + ")"); + statement.addBatch( + "create table albums (" + + " singer_id bigint not null," + + " album_id bigint not null," + + " album_title varchar," + + " primary key (singer_id, album_id)" + + ") interleave in parent singers on delete cascade"); + statement.executeBatch(); + } + } + System.out.printf( + "Created database [%s]\n", + DatabaseName.of(projectId, instanceId, databaseId)); + } + // [END spanner_postgresql_create_database] + + // [START spanner_create_jdbc_connection] + static void createConnection( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + // Connection properties can be specified both with in a Properties object + // and in the connection URL. + properties.put("numChannels", "8"); + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s" + + ";minSessions=400;maxSessions=400", + project, instance, database), + properties)) { + try (ResultSet resultSet = + connection.createStatement().executeQuery("select 'Hello World!'")) { + while (resultSet.next()) { + System.out.println(resultSet.getString(1)); + } + } + } + } + // [END spanner_create_jdbc_connection] + + // [START spanner_create_jdbc_connection_with_emulator] + static void createConnectionWithEmulator( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + // Add autoConfigEmulator=true to the connection URL to instruct the JDBC + // driver to connect to the Spanner emulator on localhost:9010. + // The Spanner instance and database are automatically created if these + // don't already exist. + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s" + + ";autoConfigEmulator=true", + project, instance, database), + properties)) { + try (ResultSet resultSet = + connection.createStatement().executeQuery("select 'Hello World!'")) { + while (resultSet.next()) { + System.out.println(resultSet.getString(1)); + } + } + } + } + // [END spanner_create_jdbc_connection_with_emulator] + + // [START spanner_dml_getting_started_insert] + static void writeDataWithDml( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Add 4 rows in one statement. + // JDBC always uses '?' as a parameter placeholder. + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "INSERT INTO Singers (SingerId, FirstName, LastName) VALUES " + + "(?, ?, ?), " + + "(?, ?, ?), " + + "(?, ?, ?), " + + "(?, ?, ?)")) { + + final ImmutableList singers = + ImmutableList.of( + new Singer(/* SingerId = */ 12L, "Melissa", "Garcia"), + new Singer(/* SingerId = */ 13L, "Russel", "Morales"), + new Singer(/* SingerId = */ 14L, "Jacqueline", "Long"), + new Singer(/* SingerId = */ 15L, "Dylan", "Shaw")); + + // Note that JDBC parameters start at index 1. + int paramIndex = 0; + for (Singer singer : singers) { + preparedStatement.setLong(++paramIndex, singer.singerId); + preparedStatement.setString(++paramIndex, singer.firstName); + preparedStatement.setString(++paramIndex, singer.lastName); + } + + int updateCount = preparedStatement.executeUpdate(); + System.out.printf("%d records inserted.\n", updateCount); + } + } + } + // [END spanner_dml_getting_started_insert] + + // [START spanner_postgresql_dml_getting_started_insert] + static void writeDataWithDmlPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Add 4 rows in one statement. + // JDBC always uses '?' as a parameter placeholder. + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "INSERT INTO singers (singer_id, first_name, last_name) VALUES " + + "(?, ?, ?), " + + "(?, ?, ?), " + + "(?, ?, ?), " + + "(?, ?, ?)")) { + + final ImmutableList singers = + ImmutableList.of( + new Singer(/* SingerId = */ 12L, "Melissa", "Garcia"), + new Singer(/* SingerId = */ 13L, "Russel", "Morales"), + new Singer(/* SingerId = */ 14L, "Jacqueline", "Long"), + new Singer(/* SingerId = */ 15L, "Dylan", "Shaw")); + + // Note that JDBC parameters start at index 1. + int paramIndex = 0; + for (Singer singer : singers) { + preparedStatement.setLong(++paramIndex, singer.singerId); + preparedStatement.setString(++paramIndex, singer.firstName); + preparedStatement.setString(++paramIndex, singer.lastName); + } + + int updateCount = preparedStatement.executeUpdate(); + System.out.printf("%d records inserted.\n", updateCount); + } + } + } + // [END spanner_postgresql_dml_getting_started_insert] + + // [START spanner_dml_batch] + static void writeDataWithDmlBatch( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Add multiple rows in one DML batch. + // JDBC always uses '?' as a parameter placeholder. + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "INSERT INTO Singers (SingerId, FirstName, LastName) " + + "VALUES (?, ?, ?)")) { + final ImmutableList singers = + ImmutableList.of( + new Singer(/* SingerId = */ 16L, "Sarah", "Wilson"), + new Singer(/* SingerId = */ 17L, "Ethan", "Miller"), + new Singer(/* SingerId = */ 18L, "Maya", "Patel")); + + for (Singer singer : singers) { + // Note that JDBC parameters start at index 1. + int paramIndex = 0; + preparedStatement.setLong(++paramIndex, singer.singerId); + preparedStatement.setString(++paramIndex, singer.firstName); + preparedStatement.setString(++paramIndex, singer.lastName); + preparedStatement.addBatch(); + } + + int[] updateCounts = preparedStatement.executeBatch(); + System.out.printf( + "%d records inserted.\n", + Arrays.stream(updateCounts).sum()); + } + } + } + // [END spanner_dml_batch] + + // [START spanner_postgresql_dml_batch] + static void writeDataWithDmlBatchPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Add multiple rows in one DML batch. + // JDBC always uses '?' as a parameter placeholder. + try (PreparedStatement preparedStatement = + connection.prepareStatement( + "INSERT INTO singers (singer_id, first_name, last_name)" + + " VALUES (?, ?, ?)")) { + final ImmutableList singers = + ImmutableList.of( + new Singer(/* SingerId = */ 16L, "Sarah", "Wilson"), + new Singer(/* SingerId = */ 17L, "Ethan", "Miller"), + new Singer(/* SingerId = */ 18L, "Maya", "Patel")); + + for (Singer singer : singers) { + // Note that JDBC parameters start at index 1. + int paramIndex = 0; + preparedStatement.setLong(++paramIndex, singer.singerId); + preparedStatement.setString(++paramIndex, singer.firstName); + preparedStatement.setString(++paramIndex, singer.lastName); + preparedStatement.addBatch(); + } + + int[] updateCounts = preparedStatement.executeBatch(); + System.out.printf( + "%d records inserted.\n", + Arrays.stream(updateCounts).sum()); + } + } + } + // [END spanner_postgresql_dml_batch] + + // [START spanner_insert_data] + static void writeDataWithMutations( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Unwrap the CloudSpannerJdbcConnection interface + // from the java.sql.Connection. + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + + List mutations = new ArrayList<>(); + for (Singer singer : SINGERS) { + mutations.add( + Mutation.newInsertBuilder("Singers") + .set("SingerId") + .to(singer.singerId) + .set("FirstName") + .to(singer.firstName) + .set("LastName") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("Albums") + .set("SingerId") + .to(album.singerId) + .set("AlbumId") + .to(album.albumId) + .set("AlbumTitle") + .to(album.albumTitle) + .build()); + } + // Apply the mutations atomically to Spanner. + cloudSpannerJdbcConnection.write(mutations); + System.out.printf("Inserted %d rows.\n", mutations.size()); + } + } + // [END spanner_insert_data] + + // [START spanner_postgresql_insert_data] + static void writeDataWithMutationsPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Unwrap the CloudSpannerJdbcConnection interface + // from the java.sql.Connection. + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + + List mutations = new ArrayList<>(); + for (Singer singer : SINGERS) { + mutations.add( + Mutation.newInsertBuilder("singers") + .set("singer_id") + .to(singer.singerId) + .set("first_name") + .to(singer.firstName) + .set("last_name") + .to(singer.lastName) + .build()); + } + for (Album album : ALBUMS) { + mutations.add( + Mutation.newInsertBuilder("albums") + .set("singer_id") + .to(album.singerId) + .set("album_id") + .to(album.albumId) + .set("album_title") + .to(album.albumTitle) + .build()); + } + // Apply the mutations atomically to Spanner. + cloudSpannerJdbcConnection.write(mutations); + System.out.printf("Inserted %d rows.\n", mutations.size()); + } + } + // [END spanner_postgresql_insert_data] + + // [START spanner_query_data] + static void queryData( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "SELECT SingerId, AlbumId, AlbumTitle " + + "FROM Albums")) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + resultSet.getString("AlbumTitle")); + } + } + } + } + // [END spanner_query_data] + + // [START spanner_postgresql_query_data] + static void queryDataPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "SELECT singer_id, album_id, album_title " + + "FROM albums")) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("singer_id"), + resultSet.getLong("album_id"), + resultSet.getString("album_title")); + } + } + } + } + // [END spanner_postgresql_query_data] + + // [START spanner_query_with_parameter] + static void queryWithParameter( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + try (PreparedStatement statement = + connection.prepareStatement( + "SELECT SingerId, FirstName, LastName " + + "FROM Singers " + + "WHERE LastName = ?")) { + statement.setString(1, "Garcia"); + try (ResultSet resultSet = statement.executeQuery()) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + } + } + // [END spanner_query_with_parameter] + + // [START spanner_postgresql_query_with_parameter] + static void queryWithParameterPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + try (PreparedStatement statement = + connection.prepareStatement( + "SELECT singer_id, first_name, last_name " + + "FROM singers " + + "WHERE last_name = ?")) { + statement.setString(1, "Garcia"); + try (ResultSet resultSet = statement.executeQuery()) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("singer_id"), + resultSet.getString("first_name"), + resultSet.getString("last_name")); + } + } + } + } + } + // [END spanner_postgresql_query_with_parameter] + + // [START spanner_add_column] + static void addColumn( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + connection + .createStatement() + .execute("ALTER TABLE Albums ADD COLUMN MarketingBudget INT64"); + System.out.println("Added MarketingBudget column"); + } + } + // [END spanner_add_column] + + // [START spanner_postgresql_add_column] + static void addColumnPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + connection + .createStatement() + .execute("alter table albums add column marketing_budget bigint"); + System.out.println("Added marketing_budget column"); + } + } + // [END spanner_postgresql_add_column] + + // [START spanner_ddl_batch] + static void ddlBatch( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + try (Statement statement = connection.createStatement()) { + // Create two new tables in one batch. + statement.addBatch( + "CREATE TABLE Venues (" + + " VenueId INT64 NOT NULL," + + " Name STRING(1024)," + + " Description JSON" + + ") PRIMARY KEY (VenueId)"); + statement.addBatch( + "CREATE TABLE Concerts (" + + " ConcertId INT64 NOT NULL," + + " VenueId INT64 NOT NULL," + + " SingerId INT64 NOT NULL," + + " StartTime TIMESTAMP," + + " EndTime TIMESTAMP," + + " CONSTRAINT Fk_Concerts_Venues FOREIGN KEY" + + " (VenueId) REFERENCES Venues (VenueId)," + + " CONSTRAINT Fk_Concerts_Singers FOREIGN KEY" + + " (SingerId) REFERENCES Singers (SingerId)," + + ") PRIMARY KEY (ConcertId)"); + statement.executeBatch(); + } + System.out.println("Added Venues and Concerts tables"); + } + } + // [END spanner_ddl_batch] + + // [START spanner_postgresql_ddl_batch] + static void ddlBatchPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + try (Statement statement = connection.createStatement()) { + // Create two new tables in one batch. + statement.addBatch( + "CREATE TABLE venues (" + + " venue_id bigint not null primary key," + + " name varchar(1024)," + + " description jsonb" + + ")"); + statement.addBatch( + "CREATE TABLE concerts (" + + " concert_id bigint not null primary key ," + + " venue_id bigint not null," + + " singer_id bigint not null," + + " start_time timestamptz," + + " end_time timestamptz," + + " constraint fk_concerts_venues foreign key" + + " (venue_id) references venues (venue_id)," + + " constraint fk_concerts_singers foreign key" + + " (singer_id) references singers (singer_id)" + + ")"); + statement.executeBatch(); + } + System.out.println("Added venues and concerts tables"); + } + } + // [END spanner_postgresql_ddl_batch] + + // [START spanner_update_data] + static void updateDataWithMutations( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Unwrap the CloudSpannerJdbcConnection interface + // from the java.sql.Connection. + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + + final long marketingBudgetAlbum1 = 100000L; + final long marketingBudgetAlbum2 = 500000L; + // Mutation can be used to update/insert/delete a single row in a table. + // Here we use newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(1) + .set("AlbumId") + .to(1) + .set("MarketingBudget") + .to(marketingBudgetAlbum1) + .build(), + Mutation.newUpdateBuilder("Albums") + .set("SingerId") + .to(2) + .set("AlbumId") + .to(2) + .set("MarketingBudget") + .to(marketingBudgetAlbum2) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + cloudSpannerJdbcConnection.write(mutations); + System.out.println("Updated albums"); + } + } + // [END spanner_update_data] + + // [START spanner_postgresql_update_data] + static void updateDataWithMutationsPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Unwrap the CloudSpannerJdbcConnection interface + // from the java.sql.Connection. + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + + final long marketingBudgetAlbum1 = 100000L; + final long marketingBudgetAlbum2 = 500000L; + // Mutation can be used to update/insert/delete a single row in a table. + // Here we use newUpdateBuilder to create update mutations. + List mutations = + Arrays.asList( + Mutation.newUpdateBuilder("albums") + .set("singer_id") + .to(1) + .set("album_id") + .to(1) + .set("marketing_budget") + .to(marketingBudgetAlbum1) + .build(), + Mutation.newUpdateBuilder("albums") + .set("singer_id") + .to(2) + .set("album_id") + .to(2) + .set("marketing_budget") + .to(marketingBudgetAlbum2) + .build()); + // This writes all the mutations to Cloud Spanner atomically. + cloudSpannerJdbcConnection.write(mutations); + System.out.println("Updated albums"); + } + } + // [END spanner_postgresql_update_data] + + // [START spanner_query_data_with_new_column] + static void queryDataWithNewColumn( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Rows without an explicit value for MarketingBudget will have a + // MarketingBudget equal to null. + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "SELECT SingerId, AlbumId, MarketingBudget " + + "FROM Albums")) { + while (resultSet.next()) { + // Use the ResultSet#getObject(String) method to get data + // of any type from the ResultSet. + System.out.printf( + "%s %s %s\n", + resultSet.getObject("SingerId"), + resultSet.getObject("AlbumId"), + resultSet.getObject("MarketingBudget")); + } + } + } + } + // [END spanner_query_data_with_new_column] + + // [START spanner_postgresql_query_data_with_new_column] + static void queryDataWithNewColumnPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Rows without an explicit value for marketing_budget will have a + // marketing_budget equal to null. + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "select singer_id, album_id, marketing_budget " + + "from albums")) { + while (resultSet.next()) { + // Use the ResultSet#getObject(String) method to get data + // of any type from the ResultSet. + System.out.printf( + "%s %s %s\n", + resultSet.getObject("singer_id"), + resultSet.getObject("album_id"), + resultSet.getObject("marketing_budget")); + } + } + } + } + // [END spanner_postgresql_query_data_with_new_column] + + // [START spanner_dml_getting_started_update] + static void writeWithTransactionUsingDml( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Set AutoCommit=false to enable transactions. + connection.setAutoCommit(false); + + // Transfer marketing budget from one album to another. + // We do it in a transaction to ensure that the transfer is atomic. + // There is no need to explicitly start the transaction. The first + // statement on the connection will start a transaction when + // AutoCommit=false. + String selectMarketingBudgetSql = + "SELECT MarketingBudget " + + "FROM Albums " + + "WHERE SingerId = ? AND AlbumId = ?"; + long album2Budget = 0; + try (PreparedStatement selectMarketingBudgetStatement = + connection.prepareStatement(selectMarketingBudgetSql)) { + // Bind the query parameters to SingerId=2 and AlbumId=2. + selectMarketingBudgetStatement.setLong(1, 2); + selectMarketingBudgetStatement.setLong(2, 2); + try (ResultSet resultSet = + selectMarketingBudgetStatement.executeQuery()) { + while (resultSet.next()) { + album2Budget = resultSet.getLong("MarketingBudget"); + } + } + // The transaction will only be committed if this condition still holds + // at the time of commit. Otherwise, the transaction will be aborted. + final long transfer = 200000; + if (album2Budget >= transfer) { + long album1Budget = 0; + // Re-use the existing PreparedStatement for selecting the + // MarketingBudget to get the budget for Album 1. + // Bind the query parameters to SingerId=1 and AlbumId=1. + selectMarketingBudgetStatement.setLong(1, 1); + selectMarketingBudgetStatement.setLong(2, 1); + try (ResultSet resultSet = + selectMarketingBudgetStatement.executeQuery()) { + while (resultSet.next()) { + album1Budget = resultSet.getLong("MarketingBudget"); + } + } + + // Transfer part of the marketing budget of Album 2 to Album 1. + album1Budget += transfer; + album2Budget -= transfer; + String updateSql = + "UPDATE Albums " + + "SET MarketingBudget = ? " + + "WHERE SingerId = ? and AlbumId = ?"; + try (PreparedStatement updateStatement = + connection.prepareStatement(updateSql)) { + // Update Album 1. + int paramIndex = 0; + updateStatement.setLong(++paramIndex, album1Budget); + updateStatement.setLong(++paramIndex, 1); + updateStatement.setLong(++paramIndex, 1); + // Create a DML batch by calling addBatch on + // the current PreparedStatement. + updateStatement.addBatch(); + + // Update Album 2 in the same DML batch. + paramIndex = 0; + updateStatement.setLong(++paramIndex, album2Budget); + updateStatement.setLong(++paramIndex, 2); + updateStatement.setLong(++paramIndex, 2); + updateStatement.addBatch(); + + // Execute both DML statements in one batch. + updateStatement.executeBatch(); + } + } + } + // Commit the current transaction. + connection.commit(); + System.out.println( + "Transferred marketing budget from Album 2 to Album 1"); + } + } + // [END spanner_dml_getting_started_update] + + // [START spanner_postgresql_dml_getting_started_update] + static void writeWithTransactionUsingDmlPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Set AutoCommit=false to enable transactions. + connection.setAutoCommit(false); + + // Transfer marketing budget from one album to another. We do it in a + // transaction to ensure that the transfer is atomic. There is no need + // to explicitly start the transaction. The first statement on the + // connection will start a transaction when AutoCommit=false. + String selectMarketingBudgetSql = + "SELECT marketing_budget " + + "from albums " + + "WHERE singer_id = ? and album_id = ?"; + long album2Budget = 0; + try (PreparedStatement selectMarketingBudgetStatement = + connection.prepareStatement(selectMarketingBudgetSql)) { + // Bind the query parameters to SingerId=2 and AlbumId=2. + selectMarketingBudgetStatement.setLong(1, 2); + selectMarketingBudgetStatement.setLong(2, 2); + try (ResultSet resultSet = + selectMarketingBudgetStatement.executeQuery()) { + while (resultSet.next()) { + album2Budget = resultSet.getLong("marketing_budget"); + } + } + // The transaction will only be committed if this condition still holds + // at the time of commit. Otherwise, the transaction will be aborted. + final long transfer = 200000; + if (album2Budget >= transfer) { + long album1Budget = 0; + // Re-use the existing PreparedStatement for selecting the + // marketing_budget to get the budget for Album 1. + // Bind the query parameters to SingerId=1 and AlbumId=1. + selectMarketingBudgetStatement.setLong(1, 1); + selectMarketingBudgetStatement.setLong(2, 1); + try (ResultSet resultSet = + selectMarketingBudgetStatement.executeQuery()) { + while (resultSet.next()) { + album1Budget = resultSet.getLong("marketing_budget"); + } + } + + // Transfer part of the marketing budget of Album 2 to Album 1. + album1Budget += transfer; + album2Budget -= transfer; + String updateSql = + "UPDATE albums " + + "SET marketing_budget = ? " + + "WHERE singer_id = ? and album_id = ?"; + try (PreparedStatement updateStatement = + connection.prepareStatement(updateSql)) { + // Update Album 1. + int paramIndex = 0; + updateStatement.setLong(++paramIndex, album1Budget); + updateStatement.setLong(++paramIndex, 1); + updateStatement.setLong(++paramIndex, 1); + // Create a DML batch by calling addBatch + // on the current PreparedStatement. + updateStatement.addBatch(); + + // Update Album 2 in the same DML batch. + paramIndex = 0; + updateStatement.setLong(++paramIndex, album2Budget); + updateStatement.setLong(++paramIndex, 2); + updateStatement.setLong(++paramIndex, 2); + updateStatement.addBatch(); + + // Execute both DML statements in one batch. + updateStatement.executeBatch(); + } + } + } + // Commit the current transaction. + connection.commit(); + System.out.println( + "Transferred marketing budget from Album 2 to Album 1"); + } + } + // [END spanner_postgresql_dml_getting_started_update] + + // [START spanner_transaction_and_statement_tag] + static void tags( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Set AutoCommit=false to enable transactions. + connection.setAutoCommit(false); + // Set the TRANSACTION_TAG session variable to set a transaction tag + // for the current transaction. + connection + .createStatement() + .execute("SET TRANSACTION_TAG='example-tx-tag'"); + + // Set the STATEMENT_TAG session variable to set the request tag + // that should be included with the next SQL statement. + connection + .createStatement() + .execute("SET STATEMENT_TAG='query-marketing-budget'"); + long marketingBudget = 0L; + long singerId = 1L; + long albumId = 1L; + try (PreparedStatement statement = connection.prepareStatement( + "SELECT MarketingBudget " + + "FROM Albums " + + "WHERE SingerId=? AND AlbumId=?")) { + statement.setLong(1, singerId); + statement.setLong(2, albumId); + try (ResultSet albumResultSet = statement.executeQuery()) { + while (albumResultSet.next()) { + marketingBudget = albumResultSet.getLong(1); + } + } + } + // Reduce the marketing budget by 10% if it is more than 1,000. + final long maxMarketingBudget = 1000L; + final float reduction = 0.1f; + if (marketingBudget > maxMarketingBudget) { + marketingBudget -= (long) (marketingBudget * reduction); + connection + .createStatement() + .execute("SET STATEMENT_TAG='reduce-marketing-budget'"); + try (PreparedStatement statement = connection.prepareStatement( + "UPDATE Albums SET MarketingBudget=? " + + "WHERE SingerId=? AND AlbumId=?")) { + int paramIndex = 0; + statement.setLong(++paramIndex, marketingBudget); + statement.setLong(++paramIndex, singerId); + statement.setLong(++paramIndex, albumId); + statement.executeUpdate(); + } + } + + // Commit the current transaction. + connection.commit(); + System.out.println("Reduced marketing budget"); + } + } + // [END spanner_transaction_and_statement_tag] + + // [START spanner_postgresql_transaction_and_statement_tag] + static void tagsPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Set AutoCommit=false to enable transactions. + connection.setAutoCommit(false); + // Set the TRANSACTION_TAG session variable to set a transaction tag + // for the current transaction. + connection + .createStatement() + .execute("set spanner.transaction_tag='example-tx-tag'"); + + // Set the STATEMENT_TAG session variable to set the request tag + // that should be included with the next SQL statement. + connection + .createStatement() + .execute("set spanner.statement_tag='query-marketing-budget'"); + long marketingBudget = 0L; + long singerId = 1L; + long albumId = 1L; + try (PreparedStatement statement = connection.prepareStatement( + "select marketing_budget " + + "from albums " + + "where singer_id=? and album_id=?")) { + statement.setLong(1, singerId); + statement.setLong(2, albumId); + try (ResultSet albumResultSet = statement.executeQuery()) { + while (albumResultSet.next()) { + marketingBudget = albumResultSet.getLong(1); + } + } + } + // Reduce the marketing budget by 10% if it is more than 1,000. + final long maxMarketingBudget = 1000L; + final float reduction = 0.1f; + if (marketingBudget > maxMarketingBudget) { + marketingBudget -= (long) (marketingBudget * reduction); + connection + .createStatement() + .execute("set spanner.statement_tag='reduce-marketing-budget'"); + try (PreparedStatement statement = connection.prepareStatement( + "update albums set marketing_budget=? " + + "where singer_id=? AND album_id=?")) { + int paramIndex = 0; + statement.setLong(++paramIndex, marketingBudget); + statement.setLong(++paramIndex, singerId); + statement.setLong(++paramIndex, albumId); + statement.executeUpdate(); + } + } + + // Commit the current transaction. + connection.commit(); + System.out.println("Reduced marketing budget"); + } + } + // [END spanner_postgresql_transaction_and_statement_tag] + + // [START spanner_read_only_transaction] + static void readOnlyTransaction( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Set AutoCommit=false to enable transactions. + connection.setAutoCommit(false); + // This SQL statement instructs the JDBC driver to use + // a read-only transaction. + connection.createStatement().execute("SET TRANSACTION READ ONLY"); + + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "SELECT SingerId, AlbumId, AlbumTitle " + + "FROM Albums " + + "ORDER BY SingerId, AlbumId")) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + resultSet.getString("AlbumTitle")); + } + } + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "SELECT SingerId, AlbumId, AlbumTitle " + + "FROM Albums " + + "ORDER BY AlbumTitle")) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("SingerId"), + resultSet.getLong("AlbumId"), + resultSet.getString("AlbumTitle")); + } + } + // End the read-only transaction by calling commit(). + connection.commit(); + } + } + // [END spanner_read_only_transaction] + + // [START spanner_postgresql_read_only_transaction] + static void readOnlyTransactionPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Set AutoCommit=false to enable transactions. + connection.setAutoCommit(false); + // This SQL statement instructs the JDBC driver to use + // a read-only transaction. + connection.createStatement().execute("set transaction read only"); + + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "SELECT singer_id, album_id, album_title " + + "FROM albums " + + "ORDER BY singer_id, album_id")) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("singer_id"), + resultSet.getLong("album_id"), + resultSet.getString("album_title")); + } + } + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "SELECT singer_id, album_id, album_title " + + "FROM albums " + + "ORDER BY album_title")) { + while (resultSet.next()) { + System.out.printf( + "%d %d %s\n", + resultSet.getLong("singer_id"), + resultSet.getLong("album_id"), + resultSet.getString("album_title")); + } + } + // End the read-only transaction by calling commit(). + connection.commit(); + } + } + // [END spanner_postgresql_read_only_transaction] + + // [START spanner_data_boost] + static void dataBoost( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // This enables Data Boost for all partitioned queries on this connection. + connection.createStatement().execute("SET DATA_BOOST_ENABLED=TRUE"); + + // Run a partitioned query. This query will use Data Boost. + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "RUN PARTITIONED QUERY " + + "SELECT SingerId, FirstName, LastName " + + "FROM Singers")) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("SingerId"), + resultSet.getString("FirstName"), + resultSet.getString("LastName")); + } + } + } + } + // [END spanner_data_boost] + + // [START spanner_postgresql_data_boost] + static void dataBoostPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // This enables Data Boost for all partitioned queries on this connection. + connection + .createStatement() + .execute("set spanner.data_boost_enabled=true"); + + // Run a partitioned query. This query will use Data Boost. + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + "run partitioned query " + + "select singer_id, first_name, last_name " + + "from singers")) { + while (resultSet.next()) { + System.out.printf( + "%d %s %s\n", + resultSet.getLong("singer_id"), + resultSet.getString("first_name"), + resultSet.getString("last_name")); + } + } + } + } + // [END spanner_postgresql_data_boost] + + // [START spanner_partitioned_dml] + static void partitionedDml( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Enable Partitioned DML on this connection. + connection + .createStatement() + .execute("SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'"); + // Back-fill a default value for the MarketingBudget column. + long lowerBoundUpdateCount = + connection + .createStatement() + .executeUpdate("UPDATE Albums " + + "SET MarketingBudget=0 " + + "WHERE MarketingBudget IS NULL"); + System.out.printf("Updated at least %d albums\n", lowerBoundUpdateCount); + } + } + // [END spanner_partitioned_dml] + + // [START spanner_postgresql_partitioned_dml] + static void partitionedDmlPostgreSQL( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Enable Partitioned DML on this connection. + connection + .createStatement() + .execute("set spanner.autocommit_dml_mode='partitioned_non_atomic'"); + // Back-fill a default value for the MarketingBudget column. + long lowerBoundUpdateCount = + connection + .createStatement() + .executeUpdate("update albums " + + "set marketing_budget=0 " + + "where marketing_budget is null"); + System.out.printf("Updated at least %d albums\n", lowerBoundUpdateCount); + } + } + // [END spanner_postgresql_partitioned_dml] + + static void arrayOfStructAsQueryParameter( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + try (Statement statement = connection.createStatement()) { + statement.execute( + "create table if not exists my_table " + + "(col1 string(max), col2 int64) primary key (col1)"); + statement.execute( + "insert or update into my_table (col1, col2) " + + "values ('value1', 1), ('value2', 2), ('value3', 3)"); + } + + try (PreparedStatement statement = connection.prepareStatement( + "select * from my_table " + + "where STRUCT(col1, col2) " + + "in unnest (?)")) { + statement.setObject( + 1, + Value.structArray( + com.google.cloud.spanner.Type.struct( + StructField.of("col1", Type.string()), + StructField.of("col2", Type.int64())), + ImmutableList.of( + Struct.newBuilder() + .set("col1").to("value1") + .set("col2").to(1L) + .build(), + Struct.newBuilder() + .set("col1").to("value2") + .set("col2").to(2L) + .build()))); + try (java.sql.ResultSet resultSet = statement.executeQuery()) { + while (resultSet.next()) { + for (int col = 1; + col <= resultSet.getMetaData().getColumnCount(); + col++) { + System.out.printf("%s;", resultSet.getString(col)); + } + System.out.println(); + } + } + } + } + } + + static void protoColumns( + final String project, + final String instance, + final String database, + final Properties properties) throws SQLException, IOException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + project, instance, database), + properties)) { + // Create a PROTO BUNDLE and a table. + try (Statement statement = connection.createStatement(); + InputStream protoDescriptors = JdbcSample.class.getClassLoader() + .getResourceAsStream("com/example/spanner/jdbc/descriptors.pb")) { + if (protoDescriptors == null) { + throw new IllegalArgumentException("proto descriptors not found"); + } + + // Unwrap the CloudSpannerJdbcConnection interface to set the proto + // descriptors that should be used for the next DDL statements. + connection + .unwrap(CloudSpannerJdbcConnection.class) + .setProtoDescriptors(protoDescriptors); + // Execute the DDL statements as one batch. + // This will reduce execution time compared to executing each statement + // sequentially. + statement.addBatch("CREATE PROTO BUNDLE (\n" + + "examples.spanner.music.SingerInfo,\n" + + "examples.spanner.music.Genre,\n" + + ")"); + statement.addBatch("CREATE TABLE SingersWithProto (\n" + + " SingerId INT64 NOT NULL,\n" + + " SingerInfo examples.spanner.music.SingerInfo,\n" + + " SingerGenre examples.spanner.music.Genre,\n" + + ") PRIMARY KEY (SingerId)"); + statement.executeBatch(); + } + + // Insert a couple of rows using a prepared statement. + try (PreparedStatement statement = connection.prepareStatement( + "INSERT INTO SingersWithProto " + + "(SingerId, SingerInfo, SingerGenre) " + + "VALUES (?, ?, ?)")) { + int param = 0; + statement.setLong(++param, 1L); + statement.setObject(++param, + SingerInfo.newBuilder() + .setGenre(Genre.ROCK) + .setBirthDate("1998-07-04") + .setSingerId(1L) + .setNationality("ES") + .build(), ProtoMessageType.VENDOR_TYPE_NUMBER); + statement.setObject(++param, Genre.ROCK, + ProtoEnumType.VENDOR_TYPE_NUMBER); + statement.addBatch(); + + param = 0; + statement.setLong(++param, 2L); + statement.setObject(++param, + SingerInfo.newBuilder() + .setGenre(Genre.POP) + .setBirthDate("2001-12-03") + .setSingerId(2L) + .setNationality("FO") + .build(), ProtoMessageType.VENDOR_TYPE_NUMBER); + statement.setObject(++param, Genre.POP, + ProtoEnumType.VENDOR_TYPE_NUMBER); + statement.addBatch(); + + int[] updateCounts = statement.executeBatch(); + System.out.printf("Inserted %d singers\n", + Arrays.stream(updateCounts).sum()); + } + + // Read the inserted rows. + try (ResultSet resultSet = connection.createStatement() + .executeQuery("SELECT * FROM SingersWithProto")) { + while (resultSet.next()) { + long singerId = resultSet.getLong("SingerId"); + // Proto messages and proto enums can be retrieved with the + // ResultSet#getObject(int, Class) method. + // The Spanner JDBC driver automatically deserializes + // and converts the column to the Java class representation. + SingerInfo info = resultSet.getObject("SingerInfo", SingerInfo.class); + Genre genre = resultSet.getObject("SingerGenre", Genre.class); + System.out.printf("%d:\n%s\n%s\n", singerId, info, genre); + } + } + } + } + + /** The expected number of command line arguments. */ + private static final int NUM_EXPECTED_ARGS = 3; + + /** + * Main method for running a sample. + * + * @param args the command line arguments + */ + public static void main(final String[] args) throws Exception { + if (args.length != NUM_EXPECTED_ARGS) { + printUsageAndExit(); + } + try (DatabaseAdminClient dbAdminClient = createDatabaseAdminClient()) { + final String command = args[0]; + DatabaseId databaseId = DatabaseId.of( + SpannerOptions.getDefaultInstance().getProjectId(), + args[1], + args[2]); + + run(dbAdminClient, command, databaseId); + } + System.out.println(); + System.out.println("Finished running sample"); + } + + static DatabaseAdminClient createDatabaseAdminClient() throws Exception { + String emulatorHost = System.getenv("SPANNER_EMULATOR_HOST"); + if (!Strings.isNullOrEmpty(emulatorHost)) { + return DatabaseAdminClient.create( + DatabaseAdminSettings.newBuilder() + .setTransportChannelProvider( + InstantiatingGrpcChannelProvider.newBuilder() + .setEndpoint(emulatorHost) + .setChannelConfigurator( + ManagedChannelBuilder::usePlaintext) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build()); + } + return DatabaseAdminClient.create(); + } + + static Properties createProperties() { + Properties properties = new Properties(); + String emulatorHost = System.getenv("SPANNER_EMULATOR_HOST"); + if (!Strings.isNullOrEmpty(emulatorHost)) { + properties.put("autoConfigEmulator", "true"); + properties.put("endpoint", emulatorHost); + } + return properties; + } + + static void run( + final DatabaseAdminClient dbAdminClient, + final String command, + final DatabaseId database) throws Exception { + if ( + !runGoogleSQLSample(dbAdminClient, command, database) + && !runPostgreSQLSample(dbAdminClient, command, database)) { + System.err.println(); + System.err.println("Unknown command: " + command); + System.err.println(); + printUsageAndExit(); + } + } + + static boolean runGoogleSQLSample( + final DatabaseAdminClient dbAdminClient, + final String command, + final DatabaseId database) throws Exception { + switch (command) { + case "createdatabase": + createDatabase( + dbAdminClient, + InstanceName.of( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance()), + database.getDatabase(), + createProperties()); + return true; + case "writeusingdml": + writeDataWithDml( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "writeusingdmlbatch": + writeDataWithDmlBatch( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "write": + writeDataWithMutations( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "query": + queryData( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "querywithparameter": + queryWithParameter( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "addmarketingbudget": + addColumn( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "ddlbatch": + ddlBatch( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "update": + updateDataWithMutations( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "querymarketingbudget": + queryDataWithNewColumn( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "writewithtransactionusingdml": + writeWithTransactionUsingDml( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "tags": + tags( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "readonlytransaction": + readOnlyTransaction( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "databoost": + dataBoost( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "pdml": + partitionedDml( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "arrayofstructparam": + arrayOfStructAsQueryParameter( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "protocolumns": + protoColumns( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + default: + return false; + } + } + + static boolean runPostgreSQLSample( + final DatabaseAdminClient dbAdminClient, + final String command, + final DatabaseId database) throws Exception { + switch (command) { + case "createpgdatabase": + createPostgreSQLDatabase( + dbAdminClient, + InstanceName.of( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance()), + database.getDatabase(), + createProperties()); + return true; + case "writeusingdmlpg": + writeDataWithDmlPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "writeusingdmlbatchpg": + writeDataWithDmlBatchPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "writepg": + writeDataWithMutationsPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "querypg": + queryDataPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "querywithparameterpg": + queryWithParameterPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "addmarketingbudgetpg": + addColumnPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "ddlbatchpg": + ddlBatchPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "updatepg": + updateDataWithMutationsPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "querymarketingbudgetpg": + queryDataWithNewColumnPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "writewithtransactionusingdmlpg": + writeWithTransactionUsingDmlPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "tagspg": + tagsPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "readonlytransactionpg": + readOnlyTransactionPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "databoostpg": + dataBoostPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + case "pdmlpg": + partitionedDmlPostgreSQL( + database.getInstanceId().getProject(), + database.getInstanceId().getInstance(), + database.getDatabase(), + createProperties()); + return true; + default: + return false; + } + } + + static void printUsageAndExit() { + System.err.println("Usage:"); + System.err.println(" JdbcSample "); + System.err.println(); + System.err.println("Examples:"); + System.err.println(" JdbcSample createdatabase my-instance example-db"); + System.exit(1); + } +} diff --git a/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/SingerProto.java b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/SingerProto.java new file mode 100644 index 000000000000..382456a2eac1 --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/SingerProto.java @@ -0,0 +1,1168 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: singer.proto + +package com.example.spanner.jdbc; + +public final class SingerProto { + private SingerProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistryLite registry) { + } + + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions( + (com.google.protobuf.ExtensionRegistryLite) registry); + } + /** + * Protobuf enum {@code examples.spanner.music.Genre} + */ + public enum Genre + implements com.google.protobuf.ProtocolMessageEnum { + /** + * POP = 0; + */ + POP(0), + /** + * JAZZ = 1; + */ + JAZZ(1), + /** + * FOLK = 2; + */ + FOLK(2), + /** + * ROCK = 3; + */ + ROCK(3), + UNRECOGNIZED(-1), + ; + + /** + * POP = 0; + */ + public static final int POP_VALUE = 0; + /** + * JAZZ = 1; + */ + public static final int JAZZ_VALUE = 1; + /** + * FOLK = 2; + */ + public static final int FOLK_VALUE = 2; + /** + * ROCK = 3; + */ + public static final int ROCK_VALUE = 3; + + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Genre valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Genre forNumber(int value) { + switch (value) { + case 0: return POP; + case 1: return JAZZ; + case 2: return FOLK; + case 3: return ROCK; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static final com.google.protobuf.Internal.EnumLiteMap< + Genre> internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Genre findValueByNumber(int number) { + return Genre.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return com.example.spanner.jdbc.SingerProto.getDescriptor().getEnumTypes().get(0); + } + + private static final Genre[] VALUES = values(); + + public static Genre valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Genre(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:examples.spanner.music.Genre) + } + + public interface SingerInfoOrBuilder extends + // @@protoc_insertion_point(interface_extends:examples.spanner.music.SingerInfo) + com.google.protobuf.MessageOrBuilder { + + /** + * optional int64 singer_id = 1; + * @return Whether the singerId field is set. + */ + boolean hasSingerId(); + /** + * optional int64 singer_id = 1; + * @return The singerId. + */ + long getSingerId(); + + /** + * optional string birth_date = 2; + * @return Whether the birthDate field is set. + */ + boolean hasBirthDate(); + /** + * optional string birth_date = 2; + * @return The birthDate. + */ + java.lang.String getBirthDate(); + /** + * optional string birth_date = 2; + * @return The bytes for birthDate. + */ + com.google.protobuf.ByteString + getBirthDateBytes(); + + /** + * optional string nationality = 3; + * @return Whether the nationality field is set. + */ + boolean hasNationality(); + /** + * optional string nationality = 3; + * @return The nationality. + */ + java.lang.String getNationality(); + /** + * optional string nationality = 3; + * @return The bytes for nationality. + */ + com.google.protobuf.ByteString + getNationalityBytes(); + + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return Whether the genre field is set. + */ + boolean hasGenre(); + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return The enum numeric value on the wire for genre. + */ + int getGenreValue(); + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return The genre. + */ + com.example.spanner.jdbc.SingerProto.Genre getGenre(); + } + /** + * Protobuf type {@code examples.spanner.music.SingerInfo} + */ + public static final class SingerInfo extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:examples.spanner.music.SingerInfo) + SingerInfoOrBuilder { + private static final long serialVersionUID = 0L; + // Use SingerInfo.newBuilder() to construct. + private SingerInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + private SingerInfo() { + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance( + UnusedPrivateParameter unused) { + return new SingerInfo(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.example.spanner.jdbc.SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.example.spanner.jdbc.SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.example.spanner.jdbc.SingerProto.SingerInfo.class, com.example.spanner.jdbc.SingerProto.SingerInfo.Builder.class); + } + + private int bitField0_; + public static final int SINGER_ID_FIELD_NUMBER = 1; + private long singerId_ = 0L; + /** + * optional int64 singer_id = 1; + * @return Whether the singerId field is set. + */ + @java.lang.Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional int64 singer_id = 1; + * @return The singerId. + */ + @java.lang.Override + public long getSingerId() { + return singerId_; + } + + public static final int BIRTH_DATE_FIELD_NUMBER = 2; + @SuppressWarnings("serial") + private volatile java.lang.Object birthDate_ = ""; + /** + * optional string birth_date = 2; + * @return Whether the birthDate field is set. + */ + @java.lang.Override + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional string birth_date = 2; + * @return The birthDate. + */ + @java.lang.Override + public java.lang.String getBirthDate() { + java.lang.Object ref = birthDate_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } + } + /** + * optional string birth_date = 2; + * @return The bytes for birthDate. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getBirthDateBytes() { + java.lang.Object ref = birthDate_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int NATIONALITY_FIELD_NUMBER = 3; + @SuppressWarnings("serial") + private volatile java.lang.Object nationality_ = ""; + /** + * optional string nationality = 3; + * @return Whether the nationality field is set. + */ + @java.lang.Override + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional string nationality = 3; + * @return The nationality. + */ + @java.lang.Override + public java.lang.String getNationality() { + java.lang.Object ref = nationality_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } + } + /** + * optional string nationality = 3; + * @return The bytes for nationality. + */ + @java.lang.Override + public com.google.protobuf.ByteString + getNationalityBytes() { + java.lang.Object ref = nationality_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int GENRE_FIELD_NUMBER = 4; + private int genre_ = 0; + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return Whether the genre field is set. + */ + @java.lang.Override public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return The enum numeric value on the wire for genre. + */ + @java.lang.Override public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return The genre. + */ + @java.lang.Override public com.example.spanner.jdbc.SingerProto.Genre getGenre() { + com.example.spanner.jdbc.SingerProto.Genre result = com.example.spanner.jdbc.SingerProto.Genre.forNumber(genre_); + return result == null ? com.example.spanner.jdbc.SingerProto.Genre.UNRECOGNIZED : result; + } + + private byte memoizedIsInitialized = -1; + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeInt64(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + output.writeEnum(4, genre_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, singerId_); + } + if (((bitField0_ & 0x00000002) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, birthDate_); + } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, nationality_); + } + if (((bitField0_ & 0x00000008) != 0)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, genre_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.example.spanner.jdbc.SingerProto.SingerInfo)) { + return super.equals(obj); + } + com.example.spanner.jdbc.SingerProto.SingerInfo other = (com.example.spanner.jdbc.SingerProto.SingerInfo) obj; + + if (hasSingerId() != other.hasSingerId()) return false; + if (hasSingerId()) { + if (getSingerId() + != other.getSingerId()) return false; + } + if (hasBirthDate() != other.hasBirthDate()) return false; + if (hasBirthDate()) { + if (!getBirthDate() + .equals(other.getBirthDate())) return false; + } + if (hasNationality() != other.hasNationality()) return false; + if (hasNationality()) { + if (!getNationality() + .equals(other.getNationality())) return false; + } + if (hasGenre() != other.hasGenre()) return false; + if (hasGenre()) { + if (genre_ != other.genre_) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSingerId()) { + hash = (37 * hash) + SINGER_ID_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong( + getSingerId()); + } + if (hasBirthDate()) { + hash = (37 * hash) + BIRTH_DATE_FIELD_NUMBER; + hash = (53 * hash) + getBirthDate().hashCode(); + } + if (hasNationality()) { + hash = (37 * hash) + NATIONALITY_FIELD_NUMBER; + hash = (53 * hash) + getNationality().hashCode(); + } + if (hasGenre()) { + hash = (37 * hash) + GENRE_FIELD_NUMBER; + hash = (53 * hash) + genre_; + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + java.nio.ByteBuffer data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input); + } + public static com.example.spanner.jdbc.SingerProto.SingerInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3 + .parseWithIOException(PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + public static Builder newBuilder(com.example.spanner.jdbc.SingerProto.SingerInfo prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE + ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code examples.spanner.music.SingerInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder implements + // @@protoc_insertion_point(builder_implements:examples.spanner.music.SingerInfo) + com.example.spanner.jdbc.SingerProto.SingerInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.example.spanner.jdbc.SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.example.spanner.jdbc.SingerProto.internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.example.spanner.jdbc.SingerProto.SingerInfo.class, com.example.spanner.jdbc.SingerProto.SingerInfo.Builder.class); + } + + // Construct using com.example.spanner.jdbc.SingerProto.SingerInfo.newBuilder() + private Builder() { + + } + + private Builder( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + + } + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + singerId_ = 0L; + birthDate_ = ""; + nationality_ = ""; + genre_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return com.example.spanner.jdbc.SingerProto.internal_static_examples_spanner_music_SingerInfo_descriptor; + } + + @java.lang.Override + public com.example.spanner.jdbc.SingerProto.SingerInfo getDefaultInstanceForType() { + return com.example.spanner.jdbc.SingerProto.SingerInfo.getDefaultInstance(); + } + + @java.lang.Override + public com.example.spanner.jdbc.SingerProto.SingerInfo build() { + com.example.spanner.jdbc.SingerProto.SingerInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.example.spanner.jdbc.SingerProto.SingerInfo buildPartial() { + com.example.spanner.jdbc.SingerProto.SingerInfo result = new com.example.spanner.jdbc.SingerProto.SingerInfo(this); + if (bitField0_ != 0) { buildPartial0(result); } + onBuilt(); + return result; + } + + private void buildPartial0(com.example.spanner.jdbc.SingerProto.SingerInfo result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.singerId_ = singerId_; + to_bitField0_ |= 0x00000001; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.birthDate_ = birthDate_; + to_bitField0_ |= 0x00000002; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.nationality_ = nationality_; + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.genre_ = genre_; + to_bitField0_ |= 0x00000008; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.setField(field, value); + } + @java.lang.Override + public Builder clearField( + com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + @java.lang.Override + public Builder clearOneof( + com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + java.lang.Object value) { + return super.addRepeatedField(field, value); + } + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.example.spanner.jdbc.SingerProto.SingerInfo) { + return mergeFrom((com.example.spanner.jdbc.SingerProto.SingerInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.example.spanner.jdbc.SingerProto.SingerInfo other) { + if (other == com.example.spanner.jdbc.SingerProto.SingerInfo.getDefaultInstance()) return this; + if (other.hasSingerId()) { + setSingerId(other.getSingerId()); + } + if (other.hasBirthDate()) { + birthDate_ = other.birthDate_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (other.hasNationality()) { + nationality_ = other.nationality_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.hasGenre()) { + setGenre(other.getGenre()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: { + singerId_ = input.readInt64(); + bitField0_ |= 0x00000001; + break; + } // case 8 + case 18: { + birthDate_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 26: { + nationality_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 26 + case 32: { + genre_ = input.readEnum(); + bitField0_ |= 0x00000008; + break; + } // case 32 + default: { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + private int bitField0_; + + private long singerId_ ; + /** + * optional int64 singer_id = 1; + * @return Whether the singerId field is set. + */ + @java.lang.Override + public boolean hasSingerId() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * optional int64 singer_id = 1; + * @return The singerId. + */ + @java.lang.Override + public long getSingerId() { + return singerId_; + } + /** + * optional int64 singer_id = 1; + * @param value The singerId to set. + * @return This builder for chaining. + */ + public Builder setSingerId(long value) { + + singerId_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * optional int64 singer_id = 1; + * @return This builder for chaining. + */ + public Builder clearSingerId() { + bitField0_ = (bitField0_ & ~0x00000001); + singerId_ = 0L; + onChanged(); + return this; + } + + private java.lang.Object birthDate_ = ""; + /** + * optional string birth_date = 2; + * @return Whether the birthDate field is set. + */ + public boolean hasBirthDate() { + return ((bitField0_ & 0x00000002) != 0); + } + /** + * optional string birth_date = 2; + * @return The birthDate. + */ + public java.lang.String getBirthDate() { + java.lang.Object ref = birthDate_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + birthDate_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string birth_date = 2; + * @return The bytes for birthDate. + */ + public com.google.protobuf.ByteString + getBirthDateBytes() { + java.lang.Object ref = birthDate_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + birthDate_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string birth_date = 2; + * @param value The birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDate( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + /** + * optional string birth_date = 2; + * @return This builder for chaining. + */ + public Builder clearBirthDate() { + birthDate_ = getDefaultInstance().getBirthDate(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * optional string birth_date = 2; + * @param value The bytes for birthDate to set. + * @return This builder for chaining. + */ + public Builder setBirthDateBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + birthDate_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object nationality_ = ""; + /** + * optional string nationality = 3; + * @return Whether the nationality field is set. + */ + public boolean hasNationality() { + return ((bitField0_ & 0x00000004) != 0); + } + /** + * optional string nationality = 3; + * @return The nationality. + */ + public java.lang.String getNationality() { + java.lang.Object ref = nationality_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + nationality_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string nationality = 3; + * @return The bytes for nationality. + */ + public com.google.protobuf.ByteString + getNationalityBytes() { + java.lang.Object ref = nationality_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + nationality_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string nationality = 3; + * @param value The nationality to set. + * @return This builder for chaining. + */ + public Builder setNationality( + java.lang.String value) { + if (value == null) { throw new NullPointerException(); } + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + /** + * optional string nationality = 3; + * @return This builder for chaining. + */ + public Builder clearNationality() { + nationality_ = getDefaultInstance().getNationality(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + /** + * optional string nationality = 3; + * @param value The bytes for nationality to set. + * @return This builder for chaining. + */ + public Builder setNationalityBytes( + com.google.protobuf.ByteString value) { + if (value == null) { throw new NullPointerException(); } + checkByteStringIsUtf8(value); + nationality_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private int genre_ = 0; + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return Whether the genre field is set. + */ + @java.lang.Override public boolean hasGenre() { + return ((bitField0_ & 0x00000008) != 0); + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return The enum numeric value on the wire for genre. + */ + @java.lang.Override public int getGenreValue() { + return genre_; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * @param value The enum numeric value on the wire for genre to set. + * @return This builder for chaining. + */ + public Builder setGenreValue(int value) { + genre_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return The genre. + */ + @java.lang.Override + public com.example.spanner.jdbc.SingerProto.Genre getGenre() { + com.example.spanner.jdbc.SingerProto.Genre result = com.example.spanner.jdbc.SingerProto.Genre.forNumber(genre_); + return result == null ? com.example.spanner.jdbc.SingerProto.Genre.UNRECOGNIZED : result; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * @param value The genre to set. + * @return This builder for chaining. + */ + public Builder setGenre(com.example.spanner.jdbc.SingerProto.Genre value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + genre_ = value.getNumber(); + onChanged(); + return this; + } + /** + * optional .examples.spanner.music.Genre genre = 4; + * @return This builder for chaining. + */ + public Builder clearGenre() { + bitField0_ = (bitField0_ & ~0x00000008); + genre_ = 0; + onChanged(); + return this; + } + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + + // @@protoc_insertion_point(builder_scope:examples.spanner.music.SingerInfo) + } + + // @@protoc_insertion_point(class_scope:examples.spanner.music.SingerInfo) + private static final com.example.spanner.jdbc.SingerProto.SingerInfo DEFAULT_INSTANCE; + static { + DEFAULT_INSTANCE = new com.example.spanner.jdbc.SingerProto.SingerInfo(); + } + + public static com.example.spanner.jdbc.SingerProto.SingerInfo getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser + PARSER = new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SingerInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.example.spanner.jdbc.SingerProto.SingerInfo getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_examples_spanner_music_SingerInfo_descriptor; + private static final + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\014singer.proto\022\026examples.spanner.music\"\301" + + "\001\n\nSingerInfo\022\026\n\tsinger_id\030\001 \001(\003H\000\210\001\001\022\027\n" + + "\nbirth_date\030\002 \001(\tH\001\210\001\001\022\030\n\013nationality\030\003 " + + "\001(\tH\002\210\001\001\0221\n\005genre\030\004 \001(\0162\035.examples.spann" + + "er.music.GenreH\003\210\001\001B\014\n\n_singer_idB\r\n\013_bi" + + "rth_dateB\016\n\014_nationalityB\010\n\006_genre*.\n\005Ge" + + "nre\022\007\n\003POP\020\000\022\010\n\004JAZZ\020\001\022\010\n\004FOLK\020\002\022\010\n\004ROCK" + + "\020\003B)\n\030com.example.spanner.jdbcB\013SingerPr" + + "otoP\000b\006proto3" + }; + descriptor = com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }); + internal_static_examples_spanner_music_SingerInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_examples_spanner_music_SingerInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_examples_spanner_music_SingerInfo_descriptor, + new java.lang.String[] { "SingerId", "BirthDate", "Nationality", "Genre", "SingerId", "BirthDate", "Nationality", "Genre", }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/package-info.java b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/package-info.java new file mode 100644 index 000000000000..bdaf4070ccf5 --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/main/java/com/example/spanner/jdbc/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** Sample package for the Spanner JDBC driver. */ +package com.example.spanner.jdbc; diff --git a/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/README.md b/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/README.md new file mode 100644 index 000000000000..4b689229b239 --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/README.md @@ -0,0 +1,7 @@ +#### To generate SingerProto.java and descriptors.pb file from singer.proto using `protoc` +```shell +cd samples/snippets/src/main/resources/ +protoc --proto_path=com/example/spanner/jdbc/ \ + --include_imports --descriptor_set_out=com/example/spanner/jdbc/descriptors.pb \ + --java_out=../java com/example/spanner/jdbc/singer.proto +``` diff --git a/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/descriptors.pb b/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/descriptors.pb new file mode 100644 index 000000000000..2dfd90bae756 Binary files /dev/null and b/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/descriptors.pb differ diff --git a/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/singer.proto b/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/singer.proto new file mode 100644 index 000000000000..c6662e180e7f --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/main/resources/com/example/spanner/jdbc/singer.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package examples.spanner.music; + +option java_package = "com.example.spanner.jdbc"; +option java_outer_classname = "SingerProto"; +option java_multiple_files = false; + +message SingerInfo { + optional int64 singer_id = 1; + optional string birth_date = 2; + optional string nationality = 3; + optional Genre genre = 4; +} + +enum Genre { + POP = 0; + JAZZ = 1; + FOLK = 2; + ROCK = 3; +} diff --git a/java-spanner-jdbc/samples/snippets/src/test/java/com/example/spanner/jdbc/IsolationLevelTest.java b/java-spanner-jdbc/samples/snippets/src/test/java/com/example/spanner/jdbc/IsolationLevelTest.java new file mode 100644 index 000000000000..e1c7bfbe0155 --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/test/java/com/example/spanner/jdbc/IsolationLevelTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.jdbc; + +import static com.example.spanner.jdbc.IsolationLevel.isolationLevel; +import static org.junit.Assume.assumeTrue; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.images.PullPolicy; +import org.testcontainers.utility.DockerImageName; + +@RunWith(JUnit4.class) +public class IsolationLevelTest { + + private static GenericContainer emulator; + + private static final String PROJECT = "emulator-project"; + private static final String INSTANCE = "my-instance"; + private static final String DATABASE = "my-database"; + private static final Properties PROPERTIES = new Properties(); + + @BeforeClass + public static void setupEmulator() throws Exception { + assumeTrue("This test requires Docker", DockerClientFactory.instance().isDockerAvailable()); + + emulator = + new GenericContainer<>(DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator")); + emulator.withImagePullPolicy(PullPolicy.alwaysPull()); + emulator.addExposedPort(9010); + emulator.setWaitStrategy(Wait.forListeningPorts(9010)); + emulator.start(); + + PROPERTIES.setProperty("endpoint", + String.format("localhost:%d", emulator.getMappedPort(9010))); + PROPERTIES.setProperty("autoConfigEmulator", "true"); + + String url = String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + PROJECT, INSTANCE, DATABASE); + try (Connection connection = DriverManager.getConnection(url, PROPERTIES)) { + try (Statement statement = connection.createStatement()) { + statement.addBatch( + "CREATE TABLE Singers (" + + "SingerId INT64 PRIMARY KEY, " + + "FirstName STRING(MAX), " + + "LastName STRING(MAX), " + + "Active BOOL)"); + statement.addBatch( + "CREATE TABLE SingerHistory (" + + "SingerId INT64, " + + "Active BOOL, " + + "CreatedAt TIMESTAMP) " + + "PRIMARY KEY (SingerId, CreatedAt)"); + statement.executeBatch(); + } + } + } + + @AfterClass + public static void stopEmulator() { + if (emulator != null) { + emulator.stop(); + } + } + + @Test + public void testIsolationLevel() throws SQLException { + isolationLevel("emulator-project", "my-instance", "my-database", PROPERTIES); + } + +} diff --git a/java-spanner-jdbc/samples/snippets/src/test/java/com/example/spanner/jdbc/JdbcSampleTest.java b/java-spanner-jdbc/samples/snippets/src/test/java/com/example/spanner/jdbc/JdbcSampleTest.java new file mode 100644 index 000000000000..3f46e4d6312a --- /dev/null +++ b/java-spanner-jdbc/samples/snippets/src/test/java/com/example/spanner/jdbc/JdbcSampleTest.java @@ -0,0 +1,417 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.spanner.jdbc; + +import static com.example.spanner.jdbc.JdbcSample.addColumn; +import static com.example.spanner.jdbc.JdbcSample.addColumnPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.arrayOfStructAsQueryParameter; +import static com.example.spanner.jdbc.JdbcSample.createConnection; +import static com.example.spanner.jdbc.JdbcSample.createConnectionWithEmulator; +import static com.example.spanner.jdbc.JdbcSample.createDatabase; +import static com.example.spanner.jdbc.JdbcSample.createPostgreSQLDatabase; +import static com.example.spanner.jdbc.JdbcSample.dataBoost; +import static com.example.spanner.jdbc.JdbcSample.dataBoostPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.ddlBatch; +import static com.example.spanner.jdbc.JdbcSample.ddlBatchPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.partitionedDml; +import static com.example.spanner.jdbc.JdbcSample.partitionedDmlPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.protoColumns; +import static com.example.spanner.jdbc.JdbcSample.queryData; +import static com.example.spanner.jdbc.JdbcSample.queryDataPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.queryDataWithNewColumn; +import static com.example.spanner.jdbc.JdbcSample.queryDataWithNewColumnPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.queryWithParameter; +import static com.example.spanner.jdbc.JdbcSample.queryWithParameterPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.readOnlyTransaction; +import static com.example.spanner.jdbc.JdbcSample.readOnlyTransactionPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.tags; +import static com.example.spanner.jdbc.JdbcSample.tagsPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.updateDataWithMutations; +import static com.example.spanner.jdbc.JdbcSample.updateDataWithMutationsPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.writeDataWithDml; +import static com.example.spanner.jdbc.JdbcSample.writeDataWithDmlBatch; +import static com.example.spanner.jdbc.JdbcSample.writeDataWithDmlBatchPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.writeDataWithDmlPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.writeDataWithMutations; +import static com.example.spanner.jdbc.JdbcSample.writeDataWithMutationsPostgreSQL; +import static com.example.spanner.jdbc.JdbcSample.writeWithTransactionUsingDml; +import static com.example.spanner.jdbc.JdbcSample.writeWithTransactionUsingDmlPostgreSQL; +import static org.junit.Assert.assertEquals; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminClient; +import com.google.cloud.spanner.admin.database.v1.DatabaseAdminSettings; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminClient; +import com.google.cloud.spanner.admin.instance.v1.InstanceAdminSettings; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.common.collect.ImmutableList; +import com.google.spanner.admin.instance.v1.Instance; +import com.google.spanner.admin.instance.v1.InstanceConfig; +import com.google.spanner.admin.instance.v1.InstanceName; +import com.google.spanner.admin.instance.v1.ProjectName; +import com.google.spanner.v1.DatabaseName; +import io.grpc.ManagedChannelBuilder; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.Properties; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; + +@RunWith(JUnit4.class) +public class JdbcSampleTest { + private static final String PROJECT_ID = "emulator-project"; + private static final String INSTANCE_ID = "test-instance"; + private static final String DATABASE_ID = "test-database"; + private static final String PG_DATABASE_ID = "pg-test-database"; + + private static final ProjectName PROJECT_NAME = ProjectName.of(PROJECT_ID); + + private static final InstanceName INSTANCE_NAME = InstanceName.of(PROJECT_ID, INSTANCE_ID); + + private static GenericContainer emulator; + + private static Properties properties; + + @BeforeClass + public static void setup() throws Exception { + emulator = + new GenericContainer<>( + DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator:latest")) + .withExposedPorts(9010) + .waitingFor(Wait.forListeningPort()); + emulator.start(); + try (InstanceAdminClient client = + InstanceAdminClient.create( + InstanceAdminSettings.newBuilder() + .setTransportChannelProvider( + InstantiatingGrpcChannelProvider.newBuilder() + .setEndpoint(emulator.getHost() + ":" + emulator.getMappedPort(9010)) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build())) { + InstanceConfig config = + client.listInstanceConfigs(PROJECT_NAME).iterateAll().iterator().next(); + client + .createInstanceAsync( + PROJECT_NAME, + INSTANCE_ID, + Instance.newBuilder() + .setConfig(config.getName()) + .setDisplayName("Test Instance") + .setNodeCount(1) + .build()) + .get(); + } + // Create properties for the JDBC driver to connect to the emulator. + properties = new Properties(); + properties.put("autoConfigEmulator", "true"); + properties.put("lenient", "true"); + properties.put("endpoint", emulator.getHost() + ":" + emulator.getMappedPort(9010)); + } + + @AfterClass + public static void cleanup() { + SpannerPool.closeSpannerPool(); + emulator.stop(); + } + + DatabaseAdminClient createDatabaseAdminClient() throws Exception { + return DatabaseAdminClient.create( + DatabaseAdminSettings.newBuilder() + .setTransportChannelProvider( + InstantiatingGrpcChannelProvider.newBuilder() + .setEndpoint(emulator.getHost() + ":" + emulator.getMappedPort(9010)) + .setChannelConfigurator(ManagedChannelBuilder::usePlaintext) + .build()) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build()); + } + + @Test + public void testGoogleSQLSamples() throws Exception { + String result; + try (DatabaseAdminClient client = createDatabaseAdminClient()) { + result = runSample(() -> createDatabase(client, INSTANCE_NAME, DATABASE_ID, properties)); + } + assertEquals( + "Created database [" + DatabaseName.of(PROJECT_ID, INSTANCE_ID, DATABASE_ID) + "]\n", + result); + + result = runSample(() -> createConnection(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Hello World!\n", result); + + result = runSample(() -> createConnectionWithEmulator(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Hello World!\n", result); + + result = runSample(() -> writeDataWithDml(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("4 records inserted.\n", result); + + result = + runSample(() -> writeDataWithDmlBatch(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("3 records inserted.\n", result); + + result = + runSample(() -> writeDataWithMutations(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Inserted 10 rows.\n", result); + + result = runSample(() -> queryData(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals( + "1 2 Go, Go, Go\n" + + "2 2 Forever Hold Your Peace\n" + + "1 1 Total Junk\n" + + "2 1 Green\n" + + "2 3 Terrified\n", + result); + + result = runSample(() -> queryWithParameter(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("12 Melissa Garcia\n", result); + + result = runSample(() -> addColumn(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Added MarketingBudget column\n", result); + + result = runSample(() -> ddlBatch(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Added Venues and Concerts tables\n", result); + + result = + runSample(() -> updateDataWithMutations(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Updated albums\n", result); + + result = + runSample(() -> queryDataWithNewColumn(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals( + "1 2 null\n" + "2 2 500000\n" + "1 1 100000\n" + "2 1 null\n" + "2 3 null\n", result); + + result = + runSample( + () -> writeWithTransactionUsingDml(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Transferred marketing budget from Album 2 to Album 1\n", result); + + result = + runSample( + () -> tags(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Reduced marketing budget\n", result); + + result = runSample(() -> readOnlyTransaction(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals( + "1 1 Total Junk\n" + + "1 2 Go, Go, Go\n" + + "2 1 Green\n" + + "2 2 Forever Hold Your Peace\n" + + "2 3 Terrified\n" + + "2 2 Forever Hold Your Peace\n" + + "1 2 Go, Go, Go\n" + + "2 1 Green\n" + + "2 3 Terrified\n" + + "1 1 Total Junk\n", + result); + + result = runSample(() -> dataBoost(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals( + "2 Catalina Smith\n" + + "4 Lea Martin\n" + + "12 Melissa Garcia\n" + + "14 Jacqueline Long\n" + + "16 Sarah Wilson\n" + + "18 Maya Patel\n" + + "1 Marc Richards\n" + + "3 Alice Trentor\n" + + "5 David Lomond\n" + + "13 Russel Morales\n" + + "15 Dylan Shaw\n" + + "17 Ethan Miller\n", + result); + + result = runSample(() -> partitionedDml(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Updated at least 3 albums\n", result); + + result = runSample( + () -> arrayOfStructAsQueryParameter(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("value1;1;\nvalue2;2;\n", result); + + result = runSample( + () -> protoColumns(PROJECT_ID, INSTANCE_ID, DATABASE_ID, properties)); + assertEquals("Inserted 2 singers\n" + + "2:\n" + + "singer_id: 2\n" + + "birth_date: \"2001-12-03\"\n" + + "nationality: \"FO\"\n" + + "genre: POP\n" + + "\n" + + "POP\n" + + "1:\n" + + "singer_id: 1\n" + + "birth_date: \"1998-07-04\"\n" + + "nationality: \"ES\"\n" + + "genre: ROCK\n" + + "\n" + + "ROCK\n", result); + } + + @Test + public void testPostgreSQLSamples() throws Exception { + String result; + try (DatabaseAdminClient client = createDatabaseAdminClient()) { + result = + runSample( + () -> createPostgreSQLDatabase(client, INSTANCE_NAME, PG_DATABASE_ID, properties)); + } + assertEquals( + "Created database [" + DatabaseName.of(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID) + "]\n", + result); + + result = runSample(() -> createConnection(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Hello World!\n", result); + + result = runSample(() -> createConnectionWithEmulator(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Hello World!\n", result); + + result = + runSample( + () -> writeDataWithDmlPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("4 records inserted.\n", result); + + result = + runSample( + () -> + writeDataWithDmlBatchPostgreSQL( + PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("3 records inserted.\n", result); + + result = + runSample( + () -> + writeDataWithMutationsPostgreSQL( + PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Inserted 10 rows.\n", result); + + result = + runSample(() -> queryDataPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals( + "1 2 Go, Go, Go\n" + + "2 2 Forever Hold Your Peace\n" + + "1 1 Total Junk\n" + + "2 1 Green\n" + + "2 3 Terrified\n", + result); + + result = + runSample( + () -> + queryWithParameterPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("12 Melissa Garcia\n", result); + + result = + runSample(() -> addColumnPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Added marketing_budget column\n", result); + + result = + runSample(() -> ddlBatchPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Added venues and concerts tables\n", result); + + result = + runSample( + () -> + updateDataWithMutationsPostgreSQL( + PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Updated albums\n", result); + + result = + runSample( + () -> + queryDataWithNewColumnPostgreSQL( + PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals( + "1 2 null\n" + "2 2 500000\n" + "1 1 100000\n" + "2 1 null\n" + "2 3 null\n", result); + + result = + runSample( + () -> + writeWithTransactionUsingDmlPostgreSQL( + PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Transferred marketing budget from Album 2 to Album 1\n", result); + + result = + runSample( + () -> + tagsPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Reduced marketing budget\n", result); + + result = + runSample( + () -> + readOnlyTransactionPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals( + "1 1 Total Junk\n" + + "1 2 Go, Go, Go\n" + + "2 1 Green\n" + + "2 2 Forever Hold Your Peace\n" + + "2 3 Terrified\n" + + "2 2 Forever Hold Your Peace\n" + + "1 2 Go, Go, Go\n" + + "2 1 Green\n" + + "2 3 Terrified\n" + + "1 1 Total Junk\n", + result); + + result = + runSample(() -> dataBoostPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals( + "2 Catalina Smith\n" + + "4 Lea Martin\n" + + "12 Melissa Garcia\n" + + "14 Jacqueline Long\n" + + "16 Sarah Wilson\n" + + "18 Maya Patel\n" + + "1 Marc Richards\n" + + "3 Alice Trentor\n" + + "5 David Lomond\n" + + "13 Russel Morales\n" + + "15 Dylan Shaw\n" + + "17 Ethan Miller\n", + result); + + result = + runSample( + () -> partitionedDmlPostgreSQL(PROJECT_ID, INSTANCE_ID, PG_DATABASE_ID, properties)); + assertEquals("Updated at least 3 albums\n", result); + } + + interface Sample { + void run() throws Exception; + } + + String runSample(Sample sample) throws Exception { + PrintStream stdOut = System.out; + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + PrintStream out = new PrintStream(bout); + System.setOut(out); + try { + sample.run(); + } finally { + System.setOut(stdOut); + } + return bout.toString(); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/README.md b/java-spanner-jdbc/samples/spring-data-jdbc/README.md new file mode 100644 index 000000000000..4b6dbcc57b91 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/README.md @@ -0,0 +1,17 @@ +# Spring Data JDBC + +This directory contains two sample applications for using Spring Data JDBC +with the Spanner JDBC driver. + +[Spring Data JDBC](https://spring.io/projects/spring-data-jdbc) is part of the larger Spring Data +family. It makes it easy to implement JDBC based repositories. +This module deals with enhanced support for JDBC based data access layers. + +Spring Data JDBC aims at being conceptually easy. In order to achieve this it does NOT offer caching, +lazy loading, write behind or many other features of JPA. This makes Spring Data JDBC a simple, +limited, opinionated ORM. + +- [GoogleSQL](googlesql): This sample uses the Spanner GoogleSQL dialect. +- [PostgreSQL](postgresql): This sample uses the Spanner PostgreSQL dialect and the Spanner JDBC + driver. It does not use PGAdapter. The sample application can also be configured to run on open + source PostgreSQL, and shows how a portable application be developed using this setup. diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/README.md b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/README.md new file mode 100644 index 000000000000..7abe334efb22 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/README.md @@ -0,0 +1,57 @@ +# Spring Data JDBC Sample Application with Spanner GoogleSQL + +This sample application shows how to use Spring Data JDBC with Spanner GoogleSQL. + +This sample shows: + +1. How to use Spring Data JDBC with Spanner GoogleSQL. +2. How to use bit-reversed identity columns to automatically generate primary key values for entities. +3. How to set the transaction isolation level that is used by the Spanner JDBC driver. + +## Spring Data JDBC + +[Spring Data JDBC](https://spring.io/projects/spring-data-jdbc) is part of the larger Spring Data +family. It makes it easy to implement JDBC based repositories. This module deals with enhanced +support for JDBC based data access layers. + +Spring Data JDBC aims at being conceptually easy. In order to achieve this it does NOT offer caching, +lazy loading, write behind or many other features of JPA. This makes Spring Data JDBC a simple, +limited, opinionated ORM. + +### Running the Application + +The application by default runs on the Spanner Emulator. + +1. Modify the [application.properties](src/main/resources/application.properties) to point to an existing + database. The database must use the GoogleSQL dialect. +2. Run the application with `mvn spring-boot:run`. + +### Main Application Components + +The main application components are: +* [DatabaseSeeder.java](src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java): This + class is responsible for creating the database schema and inserting some initial test data. The + schema is created from the [create_schema.sql](src/main/resources/create_schema.sql) file. The + `DatabaseSeeder` class loads this file into memory and executes it on the active database using + standard JDBC APIs. +* [SpannerDialectProvider](src/main/java/com/google/cloud/spanner/sample/SpannerDialectProvider.java): + Spring Data JDBC by default detects the database dialect based on the JDBC driver that is used. + Spanner GoogleSQL is not automatically recognized by Spring Data, so we add a dialect provider + for Spanner. +* [SpannerDialect](src/main/java/com/google/cloud/spanner/sample/SpannerDialect.java): + Spring Data JDBC requires a dialect for the database, so it knows which features are supported, + and how to build clauses like `LIMIT` and `FOR UPDATE`. This class provides this information. It + is based on the built-in `AnsiDialect` in Spring Data JDBC. +* [JdbcConfiguration.java](src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java): + This configuration file serves two purposes: + 1. Make sure `OpenTelemetry` is initialized before any data sources. + 2. Add a converter for `LocalDate` properties. Spring Data JDBC by default map these to `TIMESTAMP` + columns, but a better fit in Spanner is `DATE`. +* [AbstractEntity.java](src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java): + This is the shared base class for all entities in this sample application. It defines a number of + standard attributes, such as the identifier (primary key). The primary key is automatically + generated using a (bit-reversed) sequence. [Bit-reversed sequential values](https://cloud.google.com/spanner/docs/schema-design#bit_reverse_primary_key) + are considered a good choice for primary keys on Cloud Spanner. +* [Application.java](src/main/java/com/google/cloud/spanner/sample/Application.java): The starter + class of the application. It contains a command-line runner that executes a selection of queries + and updates on the database. diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/pom.xml b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/pom.xml new file mode 100644 index 000000000000..b15cc01e31b3 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/pom.xml @@ -0,0 +1,142 @@ + + + 4.0.0 + + org.example + cloud-spanner-spring-data-jdbc-googlesql-example + 1.0-SNAPSHOT + + Sample application showing how to use Spring Data JDBC with Cloud Spanner GoogleSQL. + + + + 17 + 17 + 17 + UTF-8 + + + + + + org.springframework.data + spring-data-bom + 2025.0.5 + import + pom + + + com.google.cloud + google-cloud-spanner-bom + 6.111.1 + import + pom + + + com.google.cloud + grpc-gcp + + + com.google.cloud + libraries-bom + 26.76.0 + import + pom + + + io.opentelemetry + opentelemetry-bom + 1.59.0 + pom + import + + + + + + + org.springframework.boot + spring-boot-starter-data-jdbc + 4.0.3 + + + + + com.google.cloud + google-cloud-spanner-jdbc + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + + + + + io.opentelemetry + opentelemetry-sdk + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + + + com.google.cloud.opentelemetry + exporter-metrics + 0.36.0 + + + + + org.testcontainers + testcontainers + 2.0.3 + + + + com.google.collections + google-collections + 1.0 + + + + + com.google.cloud + google-cloud-spanner + test-jar + test + + + com.google.api + gax-grpc + testlib + test + + + junit + junit + 4.13.2 + + + + + + + com.spotify.fmt + fmt-maven-plugin + 2.29 + + + + format + + + + + + + diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/Application.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/Application.java new file mode 100644 index 000000000000..a75ea2fec7dc --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/Application.java @@ -0,0 +1,269 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.cloud.spanner.connection.SavepointSupport; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.cloud.spanner.jdbc.CloudSpannerJdbcConnection; +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.entities.Track; +import com.google.cloud.spanner.sample.repositories.AlbumRepository; +import com.google.cloud.spanner.sample.repositories.SingerRepository; +import com.google.cloud.spanner.sample.repositories.TrackRepository; +import com.google.cloud.spanner.sample.service.SingerService; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Savepoint; +import java.sql.Statement; +import javax.sql.DataSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application implements CommandLineRunner { + private static final Logger logger = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + // This automatically starts the Spanner emulator in a Docker container, unless the + // spanner.auto_start_emulator property has been set to false. In that case, it is a no-op. + EmulatorInitializer emulatorInitializer = new EmulatorInitializer(); + try { + SpringApplication application = new SpringApplication(Application.class); + application.addListeners(emulatorInitializer); + application.run(args).close(); + } finally { + SpannerPool.closeSpannerPool(); + emulatorInitializer.stopEmulator(); + } + } + + private final DatabaseSeeder databaseSeeder; + + private final SingerService singerService; + + private final SingerRepository singerRepository; + + private final AlbumRepository albumRepository; + + private final TrackRepository trackRepository; + + private final Tracer tracer; + + private final DataSource dataSource; + + public Application( + SingerService singerService, + DatabaseSeeder databaseSeeder, + SingerRepository singerRepository, + AlbumRepository albumRepository, + TrackRepository trackRepository, + Tracer tracer, + DataSource dataSource) { + this.databaseSeeder = databaseSeeder; + this.singerService = singerService; + this.singerRepository = singerRepository; + this.albumRepository = albumRepository; + this.trackRepository = trackRepository; + this.tracer = tracer; + this.dataSource = dataSource; + } + + @Override + public void run(String... args) { + // Set the system property 'drop_schema' to true to drop any existing database + // schema when the application is executed. + if (Boolean.parseBoolean(System.getProperty("drop_schema", "false"))) { + logger.info("Dropping existing schema if it exists"); + databaseSeeder.dropDatabaseSchemaIfExists(); + } + + logger.info("Creating database schema if it does not already exist"); + databaseSeeder.createDatabaseSchemaIfNotExists(); + logger.info("Deleting existing test data"); + databaseSeeder.deleteTestData(); + logger.info("Inserting fresh test data"); + databaseSeeder.insertTestData(); + + Iterable allSingers = singerRepository.findAll(); + for (Singer singer : allSingers) { + logger.info( + "Found singer: {} with {} albums", + singer, + albumRepository.countAlbumsBySingerId(singer.getId())); + for (Album album : albumRepository.findAlbumsBySingerId(singer.getId())) { + logger.info("\tAlbum: {}, released at {}", album, album.getReleaseDate()); + } + } + + // Create a new singer and three albums in a transaction. + Singer insertedSinger = + singerService.createSingerAndAlbums( + new Singer("Amethyst", "Jiang"), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle())); + logger.info( + "Inserted singer {} {} {}", + insertedSinger.getId(), + insertedSinger.getFirstName(), + insertedSinger.getLastName()); + + // Create a new track record and insert it into the database. + Album album = albumRepository.getFirst().orElseThrow(); + Track track = new Track(album, 1, DatabaseSeeder.randomTitle()); + track.setSampleRate(3.14d); + // Spring Data JDBC supports the same base CRUD operations on entities as for example + // Spring Data JPA. + trackRepository.save(track); + + // List all singers that have a last name starting with an 'J'. + logger.info("All singers with a last name starting with an 'J':"); + for (Singer singer : singerRepository.findSingersByLastNameStartingWith("J")) { + logger.info("\t{}", singer.getFullName()); + } + + // The singerService.listSingersWithLastNameStartingWith(..) method uses a read-only + // transaction. You should prefer read-only transactions to read/write transactions whenever + // possible, as read-only transactions do not take locks. + logger.info("All singers with a last name starting with an 'A', 'B', or 'C'."); + for (Singer singer : singerService.listSingersWithLastNameStartingWith("A", "B", "C")) { + logger.info("\t{}", singer.getFullName()); + } + + // Run two concurrent transactions that conflict with each other to show the automatic retry + // behavior built into the JDBC driver. + concurrentTransactions(); + + // Use a savepoint to roll back to a previous point in a transaction. + savepoints(); + } + + void concurrentTransactions() { + // Create two transactions that conflict with each other to trigger a transaction retry. + // This sample is intended to show a couple of things: + // 1. Spanner will abort transactions that conflict. The Spanner JDBC driver will automatically + // retry aborted transactions internally, which ensures that both these transactions + // succeed without any errors. See + // https://cloud.google.com/spanner/docs/jdbc-session-mgmt-commands#retry_aborts_internally + // for more information on how the JDBC driver retries aborted transactions. + // 2. The JDBC driver adds information to the OpenTelemetry tracing that makes it easier to find + // transactions that were aborted and retried. + logger.info("Executing two concurrent transactions"); + Span span = tracer.spanBuilder("update-singers").startSpan(); + try (Scope ignore = span.makeCurrent(); + Connection connection1 = dataSource.getConnection(); + Connection connection2 = dataSource.getConnection(); + Statement statement1 = connection1.createStatement(); + Statement statement2 = connection2.createStatement()) { + statement1.execute("begin"); + statement1.execute("set transaction_tag='update-singer-1'"); + statement2.execute("begin"); + statement2.execute("set transaction_tag='update-singer-2'"); + long id = 0L; + statement1.execute("set statement_tag='fetch-singer-id'"); + try (ResultSet resultSet = statement1.executeQuery("select id from singers limit 1")) { + while (resultSet.next()) { + id = resultSet.getLong(1); + } + } + String sql = "update singers set active=not active where id=?"; + statement1.execute("set statement_tag='update-singer-1'"); + try (PreparedStatement preparedStatement = connection1.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + statement2.execute("set statement_tag='update-singer-2'"); + try (PreparedStatement preparedStatement = connection2.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + statement1.execute("commit"); + statement2.execute("commit"); + } catch (SQLException exception) { + span.recordException(exception); + throw new RuntimeException(exception); + } finally { + span.end(); + } + } + + void savepoints() { + // Run a transaction with a savepoint, and rollback to that savepoint. + logger.info("Executing a transaction with a savepoint"); + Span span = tracer.spanBuilder("savepoint-sample").startSpan(); + try (Scope ignore = span.makeCurrent(); + Connection connection = dataSource.getConnection(); + Statement statement = connection.createStatement()) { + // Enable savepoints for this connection. + connection + .unwrap(CloudSpannerJdbcConnection.class) + .setSavepointSupport(SavepointSupport.ENABLED); + + statement.execute("begin"); + statement.execute("set transaction_tag='transaction-with-savepoint'"); + + // Fetch a random album. + long id = 0L; + try (ResultSet resultSet = + statement.executeQuery( + "/*@statement_tag='fetch-album-id'*/ select id from albums limit 1")) { + while (resultSet.next()) { + id = resultSet.getLong(1); + } + } + // Set a savepoint that we can roll back to at a later moment in the transaction. + // Note that the savepoint name must be a valid identifier. + Savepoint savepoint = connection.setSavepoint("fetched_album_id"); + + String sql = + "/*@statement_tag='update-album-marketing-budget-by-10-percent'*/ update albums set marketing_budget=marketing_budget * 1.1 where id=?"; + try (PreparedStatement preparedStatement = connection.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + + // Rollback to the savepoint that we set at an earlier stage, and then update the marketing + // budget by 20 percent instead. + connection.rollback(savepoint); + + sql = + "/*@statement_tag='update-album-marketing-budget-by-20-percent'*/ update albums set marketing_budget=marketing_budget * 1.2 where id=?"; + try (PreparedStatement preparedStatement = connection.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + statement.execute("commit"); + + // Reset the state of the connection before returning it to the connection pool. + statement.execute("reset all"); + } catch (SQLException exception) { + span.recordException(exception); + throw new RuntimeException(exception); + } finally { + span.end(); + } + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java new file mode 100644 index 000000000000..3e370a097a86 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java @@ -0,0 +1,353 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.io.UncheckedIOException; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.time.LocalDate; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import javax.annotation.Nonnull; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.io.Resource; +import org.springframework.jdbc.core.BatchPreparedStatementSetter; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; +import org.springframework.util.FileCopyUtils; + +/** This component creates the database schema and seeds it with some random test data. */ +@Component +public class DatabaseSeeder { + + /** Randomly generated names. */ + public static final ImmutableList INITIAL_SINGERS = + ImmutableList.of( + new Singer("Aaliyah", "Smith"), + new Singer("Benjamin", "Jones"), + new Singer("Chloe", "Brown"), + new Singer("David", "Williams"), + new Singer("Elijah", "Johnson"), + new Singer("Emily", "Miller"), + new Singer("Gabriel", "Garcia"), + new Singer("Hannah", "Rodriguez"), + new Singer("Isabella", "Hernandez"), + new Singer("Jacob", "Perez")); + + private static final Random RANDOM = new Random(); + + private final JdbcTemplate jdbcTemplate; + + private final Tracer tracer; + + @Value("classpath:create_schema.sql") + private Resource createSchemaFile; + + @Value("classpath:drop_schema.sql") + private Resource dropSchemaFile; + + public DatabaseSeeder(JdbcTemplate jdbcTemplate, Tracer tracer) { + this.jdbcTemplate = jdbcTemplate; + this.tracer = tracer; + } + + /** Reads a resource file into a string. */ + private static String resourceAsString(Resource resource) { + try (Reader reader = new InputStreamReader(resource.getInputStream(), UTF_8)) { + return FileCopyUtils.copyToString(reader); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Removes all statements that start with a 'skip_on_open_source_pg' comment if the application is + * running on open-source PostgreSQL. This ensures that we can use the same DDL script both on + * Cloud Spanner and on open-source PostgreSQL. It also removes any empty statements in the given + * array. + */ + private String[] updateDdlStatements(String[] statements) { + // Remove any empty statements from the script. + return Arrays.stream(statements) + .filter(statement -> !statement.isBlank()) + .toArray(String[]::new); + } + + /** Creates the database schema if it does not yet exist. */ + public void createDatabaseSchemaIfNotExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(createSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Cloud Spanner will apply + // all statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Drops the database schema if it exists. */ + public void dropDatabaseSchemaIfExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(dropSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Cloud Spanner will apply + // all statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Deletes all data currently in the sample tables. */ + public void deleteTestData() { + Span span = tracer.spanBuilder("deleteTestData").startSpan(); + try (Scope ignore = span.makeCurrent()) { + // Delete all data in one batch. + jdbcTemplate.execute("set statement_tag='batch_delete_test_data'"); + jdbcTemplate.batchUpdate( + "delete from concerts where true", + "delete from venues where true", + "delete from tracks where true", + "delete from albums where true", + "delete from singers where true"); + } catch (Throwable t) { + span.recordException(t); + throw t; + } finally { + span.end(); + } + } + + /** Inserts some initial test data into the database. */ + public void insertTestData() { + Span span = tracer.spanBuilder("insertTestData").startSpan(); + try (Scope ignore = span.makeCurrent()) { + jdbcTemplate.execute("begin"); + jdbcTemplate.execute("set transaction_tag='insert_test_data'"); + jdbcTemplate.execute("set statement_tag='insert_singers'"); + jdbcTemplate.batchUpdate( + "insert into singers (first_name, last_name) values (?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, INITIAL_SINGERS.get(i).getFirstName()); + preparedStatement.setString(2, INITIAL_SINGERS.get(i).getLastName()); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size(); + } + }); + + List singerIds = + jdbcTemplate.query( + "select id from singers", + resultSet -> { + ImmutableList.Builder builder = ImmutableList.builder(); + while (resultSet.next()) { + builder.add(resultSet.getLong(1)); + } + return builder.build(); + }); + jdbcTemplate.execute("set statement_tag='insert_albums'"); + jdbcTemplate.batchUpdate( + "insert into albums (title, marketing_budget, release_date, cover_picture, singer_id) values (?, ?, ?, ?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, randomTitle()); + preparedStatement.setBigDecimal(2, randomBigDecimal()); + preparedStatement.setObject(3, randomDate()); + preparedStatement.setBytes(4, randomBytes()); + preparedStatement.setLong(5, randomElement(singerIds)); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size() * 20; + } + }); + jdbcTemplate.execute("commit"); + } catch (Throwable t) { + try { + jdbcTemplate.execute("rollback"); + } catch (Exception ignore) { + } + span.recordException(t); + throw t; + } finally { + span.end(); + } + } + + /** Generates a random title for an album or a track. */ + static String randomTitle() { + return randomElement(ADJECTIVES) + " " + randomElement(NOUNS); + } + + /** Returns a random element from the given list. */ + static T randomElement(List list) { + return list.get(RANDOM.nextInt(list.size())); + } + + /** Generates a random {@link BigDecimal}. */ + BigDecimal randomBigDecimal() { + return BigDecimal.valueOf(RANDOM.nextDouble()).setScale(9, RoundingMode.HALF_UP); + } + + /** Generates a random {@link LocalDate}. */ + static LocalDate randomDate() { + return LocalDate.of(RANDOM.nextInt(200) + 1800, RANDOM.nextInt(12) + 1, RANDOM.nextInt(28) + 1); + } + + /** Generates a random byte array with a length between 4 and 1024 bytes. */ + static byte[] randomBytes() { + int size = RANDOM.nextInt(1020) + 4; + byte[] res = new byte[size]; + RANDOM.nextBytes(res); + return res; + } + + /** Some randomly generated nouns that are used to generate random titles. */ + private static final ImmutableList NOUNS = + ImmutableList.of( + "apple", + "banana", + "cherry", + "dog", + "elephant", + "fish", + "grass", + "house", + "key", + "lion", + "monkey", + "nail", + "orange", + "pen", + "queen", + "rain", + "shoe", + "tree", + "umbrella", + "van", + "whale", + "xylophone", + "zebra"); + + /** Some randomly generated adjectives that are used to generate random titles. */ + private static final ImmutableList ADJECTIVES = + ImmutableList.of( + "able", + "angelic", + "artistic", + "athletic", + "attractive", + "autumnal", + "calm", + "careful", + "cheerful", + "clever", + "colorful", + "confident", + "courageous", + "creative", + "curious", + "daring", + "determined", + "different", + "dreamy", + "efficient", + "elegant", + "energetic", + "enthusiastic", + "exciting", + "expressive", + "faithful", + "fantastic", + "funny", + "gentle", + "gifted", + "great", + "happy", + "helpful", + "honest", + "hopeful", + "imaginative", + "intelligent", + "interesting", + "inventive", + "joyful", + "kind", + "knowledgeable", + "loving", + "loyal", + "magnificent", + "mature", + "mysterious", + "natural", + "nice", + "optimistic", + "peaceful", + "perfect", + "pleasant", + "powerful", + "proud", + "quick", + "relaxed", + "reliable", + "responsible", + "romantic", + "safe", + "sensitive", + "sharp", + "simple", + "sincere", + "skillful", + "smart", + "sociable", + "strong", + "successful", + "sweet", + "talented", + "thankful", + "thoughtful", + "unique", + "upbeat", + "valuable", + "victorious", + "vivacious", + "warm", + "wealthy", + "wise", + "wonderful", + "worthy", + "youthful"); +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java new file mode 100644 index 000000000000..afc55890e7ea --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java @@ -0,0 +1,57 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import org.springframework.boot.context.event.ApplicationEnvironmentPreparedEvent; +import org.springframework.context.ApplicationListener; +import org.springframework.core.env.ConfigurableEnvironment; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.images.PullPolicy; +import org.testcontainers.utility.DockerImageName; + +public class EmulatorInitializer + implements ApplicationListener { + private GenericContainer emulator; + + @Override + public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) { + ConfigurableEnvironment environment = event.getEnvironment(); + boolean useEmulator = + Boolean.TRUE.equals(environment.getProperty("spanner.emulator", Boolean.class)); + boolean autoStartEmulator = + Boolean.TRUE.equals(environment.getProperty("spanner.auto_start_emulator", Boolean.class)); + if (!(useEmulator && autoStartEmulator)) { + return; + } + + emulator = + new GenericContainer<>(DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator")); + emulator.withImagePullPolicy(PullPolicy.alwaysPull()); + emulator.addExposedPort(9010); + emulator.setWaitStrategy(Wait.forListeningPorts(9010)); + emulator.start(); + + System.setProperty("spanner.endpoint", "//localhost:" + emulator.getMappedPort(9010)); + } + + public void stopEmulator() { + if (this.emulator != null) { + this.emulator.stop(); + } + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java new file mode 100644 index 000000000000..9d45fef5bed7 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java @@ -0,0 +1,74 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import java.sql.JDBCType; +import java.sql.SQLType; +import java.time.LocalDate; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.DependsOn; +import org.springframework.context.annotation.Lazy; +import org.springframework.data.jdbc.core.convert.DefaultJdbcTypeFactory; +import org.springframework.data.jdbc.core.convert.JdbcConverter; +import org.springframework.data.jdbc.core.convert.JdbcCustomConversions; +import org.springframework.data.jdbc.core.convert.MappingJdbcConverter; +import org.springframework.data.jdbc.core.convert.RelationResolver; +import org.springframework.data.jdbc.core.dialect.JdbcArrayColumns; +import org.springframework.data.jdbc.core.dialect.JdbcDialect; +import org.springframework.data.jdbc.core.mapping.JdbcMappingContext; +import org.springframework.data.jdbc.repository.config.AbstractJdbcConfiguration; +import org.springframework.data.relational.core.dialect.Dialect; +import org.springframework.data.relational.core.mapping.RelationalPersistentProperty; +import org.springframework.jdbc.core.namedparam.NamedParameterJdbcOperations; + +/** + * This configuration class is registered as depending on OpenTelemetry, as the JDBC driver uses the + * globally registered OpenTelemetry instance. It also overrides the default jdbcConverter + * implementation to map LocalDate to the JDBC type DATE (the default implementation maps LocalDate + * to TIMESTAMP). + */ +@DependsOn("openTelemetry") +@Configuration +public class JdbcConfiguration extends AbstractJdbcConfiguration { + + @Bean + @Override + public JdbcConverter jdbcConverter( + JdbcMappingContext mappingContext, + NamedParameterJdbcOperations operations, + @Lazy RelationResolver relationResolver, + JdbcCustomConversions conversions, + Dialect dialect) { + JdbcArrayColumns arrayColumns = + dialect instanceof JdbcDialect + ? ((JdbcDialect) dialect).getArraySupport() + : JdbcArrayColumns.DefaultSupport.INSTANCE; + DefaultJdbcTypeFactory jdbcTypeFactory = + new DefaultJdbcTypeFactory(operations.getJdbcOperations(), arrayColumns); + return new MappingJdbcConverter( + mappingContext, relationResolver, conversions, jdbcTypeFactory) { + @Override + public SQLType getTargetSqlType(RelationalPersistentProperty property) { + if (property.getActualType().equals(LocalDate.class)) { + return JDBCType.DATE; + } + return super.getTargetSqlType(property); + } + }; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/OpenTelemetryConfiguration.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/OpenTelemetryConfiguration.java new file mode 100644 index 000000000000..076554473838 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/OpenTelemetryConfiguration.java @@ -0,0 +1,121 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.cloud.opentelemetry.trace.TraceConfiguration; +import com.google.cloud.opentelemetry.trace.TraceExporter; +import com.google.cloud.spanner.SpannerOptions; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.io.IOException; +import java.util.concurrent.ThreadLocalRandom; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +// @AutoConfiguration(before = DataSourceAutoConfiguration.class) +@Configuration +public class OpenTelemetryConfiguration { + + @Value("${open_telemetry.enabled}") + private boolean enabled; + + @Value("${spanner.emulator}") + private boolean emulator; + + @Value("${open_telemetry.project}") + private String project; + + @Bean + public OpenTelemetry openTelemetry() { + if (!enabled || emulator) { + return OpenTelemetry.noop(); + } + + // Enable OpenTelemetry tracing in Spanner. + SpannerOptions.enableOpenTelemetryTraces(); + SpannerOptions.enableOpenTelemetryMetrics(); + + if (!hasDefaultCredentials()) { + // Create a no-op OpenTelemetry object if this environment does not have any default + // credentials configured. This could for example be on local test environments that use + // the Spanner emulator. + return OpenTelemetry.noop(); + } + + TraceConfiguration.Builder traceConfigurationBuilder = TraceConfiguration.builder(); + TraceConfiguration traceConfiguration = traceConfigurationBuilder.setProjectId(project).build(); + SpanExporter traceExporter = TraceExporter.createWithConfiguration(traceConfiguration); + + MetricConfiguration.Builder metricConfigurationBuilder = MetricConfiguration.builder(); + MetricConfiguration metricConfiguration = + metricConfigurationBuilder.setProjectId(project).build(); + MetricExporter metricExporter = + GoogleCloudMetricExporter.createWithConfiguration(metricConfiguration); + + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder() + .registerMetricReader(PeriodicMetricReader.builder(metricExporter).build()) + .build(); + + // Create an OpenTelemetry object and register it as the global OpenTelemetry object. This + // will automatically be picked up by the Spanner libraries and used for tracing. + return OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + // Set sampling to 'AlwaysOn' in this example. In production, you want to reduce + // this to a smaller fraction to limit the number of traces that are being + // collected. + .setSampler(Sampler.alwaysOn()) + .setResource( + Resource.builder() + .put( + "service.name", + "spanner-jdbc-spring-data-sample-" + + ThreadLocalRandom.current().nextInt()) + .build()) + .addSpanProcessor(BatchSpanProcessor.builder(traceExporter).build()) + .build()) + .setMeterProvider(sdkMeterProvider) + .buildAndRegisterGlobal(); + } + + private boolean hasDefaultCredentials() { + try { + return GoogleCredentials.getApplicationDefault() != null; + } catch (IOException exception) { + return false; + } + } + + @Bean + public Tracer tracer(OpenTelemetry openTelemetry) { + return openTelemetry.getTracer("com.google.cloud.spanner.jdbc.sample.spring-data-jdbc"); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/SpannerDialect.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/SpannerDialect.java new file mode 100644 index 000000000000..b895005849a2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/SpannerDialect.java @@ -0,0 +1,139 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static java.time.ZoneId.systemDefault; + +import com.google.common.collect.ImmutableList; +import java.sql.JDBCType; +import java.sql.Timestamp; +import java.time.LocalDateTime; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.util.Collection; +import java.util.Date; +import javax.annotation.Nonnull; +import org.springframework.core.convert.converter.Converter; +import org.springframework.data.convert.ReadingConverter; +import org.springframework.data.convert.WritingConverter; +import org.springframework.data.jdbc.core.mapping.JdbcValue; +import org.springframework.data.relational.core.dialect.AnsiDialect; +import org.springframework.data.relational.core.dialect.LimitClause; +import org.springframework.data.relational.core.sql.IdentifierProcessing; +import org.springframework.data.relational.core.sql.IdentifierProcessing.LetterCasing; +import org.springframework.data.relational.core.sql.IdentifierProcessing.Quoting; + +/** + * The Spanner GoogleSQL dialect is relatively close to the standard ANSI dialect. We therefore + * create a custom dialect based on ANSI, but with a few overrides. + */ +public class SpannerDialect extends AnsiDialect { + public static final SpannerDialect INSTANCE = new SpannerDialect(); + + /** Spanner uses backticks for identifier quoting. */ + private static final Quoting QUOTING = new Quoting("`"); + + /** Spanner supports mixed-case identifiers. */ + private static final IdentifierProcessing IDENTIFIER_PROCESSING = + IdentifierProcessing.create(QUOTING, LetterCasing.AS_IS); + + private static final LimitClause LIMIT_CLAUSE = + new LimitClause() { + private static final long DEFAULT_LIMIT_FOR_OFFSET = Long.MAX_VALUE / 2; + + @Nonnull + @Override + public String getLimit(long limit) { + return String.format("LIMIT %d", limit); + } + + @Nonnull + @Override + public String getOffset(long offset) { + // Spanner does not support an OFFSET clause without a LIMIT clause. + return String.format("LIMIT %d OFFSET %d", DEFAULT_LIMIT_FOR_OFFSET, offset); + } + + @Nonnull + @Override + public String getLimitOffset(long limit, long offset) { + return String.format("LIMIT %d OFFSET %d", limit, offset); + } + + @Nonnull + @Override + public Position getClausePosition() { + return Position.AFTER_ORDER_BY; + } + }; + + private SpannerDialect() {} + + @Nonnull + @Override + public IdentifierProcessing getIdentifierProcessing() { + return IDENTIFIER_PROCESSING; + } + + @Nonnull + @Override + public LimitClause limit() { + return LIMIT_CLAUSE; + } + + @Nonnull + @Override + public Collection getConverters() { + return ImmutableList.of( + TimestampAtUtcToOffsetDateTimeConverter.INSTANCE, + OffsetDateTimeToTimestampJdbcValueConverter.INSTANCE, + LocalDateTimeToDateConverter.INSTANCE); + } + + @ReadingConverter + enum TimestampAtUtcToOffsetDateTimeConverter implements Converter { + INSTANCE; + + private static final ZoneId UTC = ZoneId.of("UTC"); + + @Override + public OffsetDateTime convert(Timestamp timestamp) { + return OffsetDateTime.ofInstant(timestamp.toInstant(), UTC); + } + } + + @WritingConverter + enum OffsetDateTimeToTimestampJdbcValueConverter implements Converter { + INSTANCE; + + @Override + public JdbcValue convert(@Nonnull OffsetDateTime source) { + return JdbcValue.of(source, JDBCType.TIMESTAMP); + } + } + + @ReadingConverter + enum LocalDateTimeToDateConverter implements Converter { + INSTANCE; + + @Nonnull + @Override + public Date convert(LocalDateTime source) { + return Date.from(source.atZone(systemDefault()).toInstant()); + } + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/SpannerDialectProvider.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/SpannerDialectProvider.java new file mode 100644 index 000000000000..8f9f5ce46755 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/SpannerDialectProvider.java @@ -0,0 +1,46 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.SQLException; +import java.util.Locale; +import java.util.Optional; +import org.springframework.data.jdbc.repository.config.DialectResolver; +import org.springframework.data.relational.core.dialect.Dialect; +import org.springframework.jdbc.core.ConnectionCallback; +import org.springframework.jdbc.core.JdbcOperations; +import org.springframework.lang.Nullable; + +public class SpannerDialectProvider implements DialectResolver.JdbcDialectProvider { + @Override + public Optional getDialect(JdbcOperations operations) { + return Optional.ofNullable( + operations.execute((ConnectionCallback) SpannerDialectProvider::getDialect)); + } + + @Nullable + private static Dialect getDialect(Connection connection) throws SQLException { + DatabaseMetaData metaData = connection.getMetaData(); + String name = metaData.getDatabaseProductName().toLowerCase(Locale.ENGLISH); + if (name.contains("spanner")) { + return SpannerDialect.INSTANCE; + } + return null; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java new file mode 100644 index 000000000000..251acd1a85a9 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java @@ -0,0 +1,80 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; +import org.springframework.data.annotation.CreatedDate; +import org.springframework.data.annotation.Id; +import org.springframework.data.annotation.LastModifiedDate; +import org.springframework.data.annotation.PersistenceCreator; + +public abstract class AbstractEntity { + + /** This ID is generated using a (bit-reversed) sequence. */ + @Id private Long id; + + @CreatedDate private OffsetDateTime createdAt; + + @LastModifiedDate private OffsetDateTime updatedAt; + + @PersistenceCreator + public AbstractEntity() {} + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractEntity)) { + return false; + } + AbstractEntity other = (AbstractEntity) o; + if (this == other) { + return true; + } + return this.getClass().equals(other.getClass()) + && this.id != null + && other.id != null + && this.id.equals(other.id); + } + + @Override + public int hashCode() { + return this.id == null ? 0 : this.id.hashCode(); + } + + public Long getId() { + return id; + } + + protected void setId(Long id) { + this.id = id; + } + + public OffsetDateTime getCreatedAt() { + return createdAt; + } + + protected void setCreatedAt(OffsetDateTime createdAt) { + this.createdAt = createdAt; + } + + public OffsetDateTime getUpdatedAt() { + return updatedAt; + } + + protected void setUpdatedAt(OffsetDateTime updatedAt) { + this.updatedAt = updatedAt; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java new file mode 100644 index 000000000000..36674c609de7 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java @@ -0,0 +1,88 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.math.BigDecimal; +import java.time.LocalDate; +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Table; + +@Table("albums") +public class Album extends AbstractEntity { + + private String title; + + private BigDecimal marketingBudget; + + private LocalDate releaseDate; + + private byte[] coverPicture; + + private Long singerId; + + @PersistenceCreator + public Album() {} + + public Album(String title) { + this.title = title; + } + + @Override + public String toString() { + return getTitle(); + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public BigDecimal getMarketingBudget() { + return marketingBudget; + } + + public void setMarketingBudget(BigDecimal marketingBudget) { + this.marketingBudget = marketingBudget; + } + + public LocalDate getReleaseDate() { + return releaseDate; + } + + public void setReleaseDate(LocalDate releaseDate) { + this.releaseDate = releaseDate; + } + + public byte[] getCoverPicture() { + return coverPicture; + } + + public void setCoverPicture(byte[] coverPicture) { + this.coverPicture = coverPicture; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java new file mode 100644 index 000000000000..5c1fb0a4f4a1 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java @@ -0,0 +1,84 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Table; + +@Table("concerts") +public class Concert extends AbstractEntity { + + private Long venueId; + + private Long singerId; + + private String name; + + private OffsetDateTime startTime; + + private OffsetDateTime endTime; + + @PersistenceCreator + public Concert() {} + + public Concert(Venue venue, Singer singer, String name) { + this.venueId = venue.getId(); + this.singerId = singer.getId(); + this.name = name; + } + + public Long getVenueId() { + return venueId; + } + + public void setVenueId(Long venueId) { + this.venueId = venueId; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public OffsetDateTime getStartTime() { + return startTime; + } + + public void setStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + } + + public OffsetDateTime getEndTime() { + return endTime; + } + + public void setEndTime(OffsetDateTime endTime) { + this.endTime = endTime; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java new file mode 100644 index 000000000000..a6f8fdfc4ec2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java @@ -0,0 +1,75 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.annotation.ReadOnlyProperty; +import org.springframework.data.relational.core.mapping.Table; + +@Table("singers") +public class Singer extends AbstractEntity { + + private String firstName; + + private String lastName; + + /** Mark fullName as a {@link ReadOnlyProperty}, as it is generated by the database. */ + @ReadOnlyProperty private String fullName; + + private Boolean active; + + @PersistenceCreator + public Singer() {} + + public Singer(String firstName, String lastName) { + this.firstName = firstName; + this.lastName = lastName; + } + + @Override + public String toString() { + return getFullName(); + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + public String getLastName() { + return lastName; + } + + public void setLastName(String lastName) { + this.lastName = lastName; + } + + public String getFullName() { + return fullName; + } + + public Boolean getActive() { + return active; + } + + public void setActive(Boolean active) { + this.active = active; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java new file mode 100644 index 000000000000..1a8e031b23b2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java @@ -0,0 +1,88 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Column; +import org.springframework.data.relational.core.mapping.Table; + +/** + * The "tracks" table is interleaved in "albums". That means that the first part of the primary key + * (the "id" column) references the Album that this Track belongs to. That again means that we do + * not auto-generate the id for this entity. We can achieve this by adding an extra property, + * albumId, that is mapped to the "id" column. We can then manually set an albumId value before + * inserting the record in the database. + */ +@Table("tracks") +public class Track extends AbstractEntity { + + /** + * We need to map this to the "id" column to be able to explicitly set it, instead of letting + * Spring Data generate it. This is necessary, because Track is interleaved in Album. That again + * means that we must use the ID value of the Album for a Track. + */ + @Column("id") + private Long albumId; + + /** This is the second part of the primary key of a Track. */ + private int trackNumber; + + private String title; + + private Double sampleRate; + + @PersistenceCreator + public Track() {} + + public Track(Album album, int trackNumber, String title) { + setAlbumId(album.getId()); + this.trackNumber = trackNumber; + this.title = title; + } + + public Long getAlbumId() { + return albumId; + } + + private void setAlbumId(Long albumId) { + this.albumId = albumId; + } + + public int getTrackNumber() { + return trackNumber; + } + + public void setTrackNumber(int trackNumber) { + this.trackNumber = trackNumber; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public Double getSampleRate() { + return sampleRate; + } + + public void setSampleRate(Double sampleRate) { + this.sampleRate = sampleRate; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java new file mode 100644 index 000000000000..78137ebc0976 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java @@ -0,0 +1,50 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Table; + +@Table("venues") +public class Venue extends AbstractEntity { + private String name; + + private String description; + + @PersistenceCreator + public Venue() {} + + public Venue(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/AlbumRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/AlbumRepository.java new file mode 100644 index 000000000000..ae6bf52d3108 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/AlbumRepository.java @@ -0,0 +1,40 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Album; +import java.util.List; +import java.util.Optional; +import org.springframework.data.jdbc.repository.query.Query; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface AlbumRepository extends CrudRepository { + + /** + * The implementation for this method is automatically generated and will fetch all albums of the + * given singer. + */ + List findAlbumsBySingerId(Long singerId); + + long countAlbumsBySingerId(Long singerId); + + /** Returns the first album in the database. */ + @Query("select * from albums limit 1") + Optional getFirst(); +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/ConcertRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/ConcertRepository.java new file mode 100644 index 000000000000..dbfb82ccc06a --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/ConcertRepository.java @@ -0,0 +1,24 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Concert; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface ConcertRepository extends CrudRepository {} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/SingerRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/SingerRepository.java new file mode 100644 index 000000000000..c542d69fb210 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/SingerRepository.java @@ -0,0 +1,34 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Singer; +import java.util.List; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface SingerRepository extends CrudRepository { + + /** + * The implementation for this method is automatically generated and will fetch all singers with + * the given last name. + */ + List findSingersByLastName(String lastName); + + List findSingersByLastNameStartingWith(String prefix); +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/TrackRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/TrackRepository.java new file mode 100644 index 000000000000..7b1147024565 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/TrackRepository.java @@ -0,0 +1,24 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Track; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface TrackRepository extends CrudRepository {} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/VenueRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/VenueRepository.java new file mode 100644 index 000000000000..58ce3bc17f87 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/repositories/VenueRepository.java @@ -0,0 +1,24 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Venue; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface VenueRepository extends CrudRepository {} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java new file mode 100644 index 000000000000..55f4ee5b90fa --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java @@ -0,0 +1,67 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.service; + +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.repositories.AlbumRepository; +import com.google.cloud.spanner.sample.repositories.SingerRepository; +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +public class SingerService { + private final SingerRepository singerRepository; + + private final AlbumRepository albumRepository; + + public SingerService(SingerRepository singerRepository, AlbumRepository albumRepository) { + this.singerRepository = singerRepository; + this.albumRepository = albumRepository; + } + + /** Creates a singer and a list of albums in a transaction. */ + @Transactional + public Singer createSingerAndAlbums(Singer singer, Album... albums) { + // Saving a singer will return an updated singer entity that has the primary key value set. + singer = singerRepository.save(singer); + for (Album album : albums) { + // Set the singerId that was generated on the Album before saving it. + album.setSingerId(singer.getId()); + albumRepository.save(album); + } + return singer; + } + + /** + * Searches for all singers that have a last name starting with any of the given prefixes. This + * method uses a read-only transaction. Read-only transactions should be preferred to read/write + * transactions whenever possible, as read-only transactions do not take locks. + */ + @Transactional(readOnly = true) + public List listSingersWithLastNameStartingWith(String... prefixes) { + ImmutableList.Builder result = ImmutableList.builder(); + // This is not the most efficient way to search for this, but the main purpose of this method is + // to show how to use read-only transactions. + for (String prefix : prefixes) { + result.addAll(singerRepository.findSingersByLastNameStartingWith(prefix)); + } + return result.build(); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/META-INF/spring.factories b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/META-INF/spring.factories new file mode 100644 index 000000000000..3dc08db94b04 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/META-INF/spring.factories @@ -0,0 +1 @@ +org.springframework.data.jdbc.repository.config.DialectResolver$JdbcDialectProvider=org.springframework.data.jdbc.repository.config.DialectResolver.DefaultDialectProvider,com.google.cloud.spanner.sample.SpannerDialectProvider diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/application.properties b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/application.properties new file mode 100644 index 000000000000..07724fdfb036 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/application.properties @@ -0,0 +1,36 @@ + +# This application uses a Spanner GoogleSQL database. + +spanner.project=my-project +spanner.instance=my-instance +spanner.database=spring-data-jdbc + +# Sets the isolation level that will be used by default for read/write transactions. +# Spanner supports the isolation levels SERIALIZABLE and REPEATABLE READ. +spanner.default_isolation_level=SERIALIZABLE + +# The sample by default uses the Spanner emulator. +# Disable this flag to run the sample on a real Spanner instance. +spanner.emulator=true + +# The sample by default starts an emulator instance in Docker. +# Disable this flag to run the sample on an Emulator instance that +# you start manually, for example if you don't have Docker installed +# on your local machine. Keep the 'spanner.emulator=true' line above +# to connect to the emulator that you have started. +spanner.auto_start_emulator=true + +# Setting this property to true instructs the Spanner JDBC driver to include the SQL statement that +# is executed in the trace. This makes it easier to identify slow queries in your application. +spanner.enable_extended_tracing=true + +spring.datasource.url=jdbc:cloudspanner:${spanner.endpoint}/projects/${spanner.project}/instances/${spanner.instance}/databases/${spanner.database};default_isolation_level=${spanner.default_isolation_level};autoConfigEmulator=${spanner.emulator};enableExtendedTracing=${spanner.enable_extended_tracing};${spanner.additional_properties} +spring.datasource.driver-class-name=com.google.cloud.spanner.jdbc.JdbcDriver + +# Enable/disable OpenTelemetry tracing and export these to Google Cloud Trace. +open_telemetry.enabled=true +open_telemetry.project=${spanner.project} + +# Used for testing +spanner.endpoint= +spanner.additional_properties= diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/create_schema.sql b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/create_schema.sql new file mode 100644 index 000000000000..f54ef649222a --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/create_schema.sql @@ -0,0 +1,59 @@ + +-- This script creates the database schema for this sample application. +-- The script is executed by the DatabaseSeeder class. + +CREATE TABLE IF NOT EXISTS singers ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + first_name STRING(MAX), + last_name STRING(MAX), + full_name STRING(MAX) AS (CASE WHEN first_name IS NULL THEN last_name + WHEN last_name IS NULL THEN first_name + ELSE first_name || ' ' || last_name END) STORED, + active BOOL DEFAULT (TRUE), + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), +); + +CREATE TABLE IF NOT EXISTS albums ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + title STRING(MAX) NOT NULL, + marketing_budget NUMERIC, + release_date DATE, + cover_picture BYTES(MAX), + singer_id INT64 NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + CONSTRAINT fk_albums_singers FOREIGN KEY (singer_id) REFERENCES singers (id) +); + +CREATE TABLE IF NOT EXISTS tracks ( + id INT64 NOT NULL, + track_number INT64 NOT NULL, + title STRING(MAX) NOT NULL, + sample_rate FLOAT64 NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), +) PRIMARY KEY (id, track_number), INTERLEAVE IN PARENT albums ON DELETE CASCADE +; + +CREATE TABLE IF NOT EXISTS venues ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + name STRING(MAX) NOT NULL, + description JSON NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), +); + +CREATE TABLE IF NOT EXISTS concerts ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + venue_id INT64 NOT NULL, + singer_id INT64 NOT NULL, + name STRING(MAX) NOT NULL, + start_time TIMESTAMP NOT NULL, + end_time TIMESTAMP NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + CONSTRAINT fk_concerts_venues FOREIGN KEY (venue_id) REFERENCES venues (id), + CONSTRAINT fk_concerts_singers FOREIGN KEY (singer_id) REFERENCES singers (id), + CONSTRAINT chk_end_time_after_start_time CHECK (end_time > start_time) +); diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/drop_schema.sql b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/drop_schema.sql new file mode 100644 index 000000000000..23e7b65d3bb1 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/main/resources/drop_schema.sql @@ -0,0 +1,5 @@ +drop table if exists concerts; +drop table if exists venues; +drop table if exists tracks; +drop table if exists albums; +drop table if exists singers; diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java new file mode 100644 index 000000000000..7681e2a6861e --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java @@ -0,0 +1,69 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.connection.SpannerPool; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.springframework.boot.SpringApplication; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.images.PullPolicy; +import org.testcontainers.utility.DockerImageName; + +@RunWith(JUnit4.class) +public class ApplicationEmulatorTest { + private static GenericContainer emulator; + + @BeforeClass + public static void startEmulator() { + assumeTrue(DockerClientFactory.instance().isDockerAvailable()); + + emulator = + new GenericContainer<>( + DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator:latest")) + .withImagePullPolicy(PullPolicy.alwaysPull()) + .withExposedPorts(9010) + .waitingFor(Wait.forListeningPorts(9010)); + emulator.start(); + } + + @AfterClass + public static void cleanup() { + SpannerPool.closeSpannerPool(); + if (emulator != null) { + emulator.stop(); + } + } + + @Test + public void testRunApplication() { + System.setProperty("open_telemetry.enabled", "false"); + System.setProperty("open_telemetry.project", "test-project"); + System.setProperty("spanner.emulator", "true"); + System.setProperty("spanner.auto_start_emulator", "false"); + System.setProperty( + "spanner.endpoint", String.format("//localhost:%d", emulator.getMappedPort(9010))); + SpringApplication.run(Application.class).close(); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/README.md b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/README.md new file mode 100644 index 000000000000..da1d69532e84 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/README.md @@ -0,0 +1,95 @@ +# Spring Data JDBC Sample Application with Cloud Spanner PostgreSQL + +This sample application shows how to develop portable applications using Spring Data JDBC in +combination with Cloud Spanner PostgreSQL. This application can be configured to run on either a +[Cloud Spanner PostgreSQL](https://cloud.google.com/spanner/docs/postgresql-interface) database or +an open-source PostgreSQL database. The only change that is needed to switch between the two is +changing the active Spring profile that is used by the application. + +The application uses the Cloud Spanner JDBC driver to connect to Cloud Spanner PostgreSQL, and it +uses the PostgreSQL JDBC driver to connect to open-source PostgreSQL. Spring Data JDBC works with +both drivers and offers a single consistent API to the application developer, regardless of the +actual database or JDBC driver being used. + +This sample shows: + +1. How to use Spring Data JDBC with Cloud Spanner PostgreSQL. +2. How to develop a portable application that runs on both Google Cloud Spanner PostgreSQL and + open-source PostgreSQL with the same code base. +3. How to use bit-reversed sequences to automatically generate primary key values for entities. + +__NOTE__: This application does __not require PGAdapter__. Instead, it connects to Cloud Spanner +PostgreSQL using the Cloud Spanner JDBC driver. + +## Cloud Spanner PostgreSQL + +Cloud Spanner PostgreSQL provides language support by expressing Spanner database functionality +through a subset of open-source PostgreSQL language constructs, with extensions added to support +Spanner functionality like interleaved tables and hinting. + +The PostgreSQL interface makes the capabilities of Spanner —__fully managed, unlimited scale, strong +consistency, high performance, and up to 99.999% global availability__— accessible using the +PostgreSQL dialect. Unlike other services that manage actual PostgreSQL database instances, Spanner +uses PostgreSQL-compatible syntax to expose its existing scale-out capabilities. This provides +familiarity for developers and portability for applications, but not 100% PostgreSQL compatibility. +The SQL syntax that Spanner supports is semantically equivalent PostgreSQL, meaning schemas +and queries written against the PostgreSQL interface can be easily ported to another PostgreSQL +environment. + +This sample showcases this portability with an application that works on both Cloud Spanner PostgreSQL +and open-source PostgreSQL with the same code base. + +## Spring Data JDBC + +[Spring Data JDBC](https://spring.io/projects/spring-data-jdbc) is part of the larger Spring Data +family. It makes it easy to implement JDBC based repositories. This module deals with enhanced +support for JDBC based data access layers. + +Spring Data JDBC aims at being conceptually easy. In order to achieve this it does NOT offer caching, +lazy loading, write behind or many other features of JPA. This makes Spring Data JDBC a simple, +limited, opinionated ORM. + +## Sample Application + +This sample shows how to create a portable application using Spring Data JDBC and the Cloud Spanner +PostgreSQL dialect. The application works on both Cloud Spanner PostgreSQL and open-source +PostgreSQL. You can switch between the two by changing the active Spring profile: +* Profile `cs` runs the application on Cloud Spanner PostgreSQL. +* Profile `pg` runs the application on open-source PostgreSQL. + +The default profile is `cs`. You can change the default profile by modifying the +[application.properties](src/main/resources/application.properties) file. + +### Running the Application + +1. Choose the database system that you want to use by choosing a profile. The default profile is + `cs`, which runs the application on Cloud Spanner PostgreSQL. Modify the default profile in the + [application.properties](src/main/resources/application.properties) file. +2. Modify either [application-cs.properties](src/main/resources/application-cs.properties) or + [application-pg.properties](src/main/resources/application-pg.properties) to point to an existing + database. If you use Cloud Spanner, the database that the configuration file references must be a + database that uses the PostgreSQL dialect. +3. Run the application with `mvn spring-boot:run`. + +### Main Application Components + +The main application components are: +* [DatabaseSeeder.java](src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java): This + class is responsible for creating the database schema and inserting some initial test data. The + schema is created from the [create_schema.sql](src/main/resources/create_schema.sql) file. The + `DatabaseSeeder` class loads this file into memory and executes it on the active database using + standard JDBC APIs. The class also removes Cloud Spanner-specific extensions to the PostgreSQL + dialect when the application runs on open-source PostgreSQL. +* [JdbcConfiguration.java](src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java): + Spring Data JDBC by default detects the database dialect based on the JDBC driver that is used. + This class overrides this default and instructs Spring Data JDBC to also use the PostgreSQL + dialect for Cloud Spanner PostgreSQL. +* [AbstractEntity.java](src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java): + This is the shared base class for all entities in this sample application. It defines a number of + standard attributes, such as the identifier (primary key). The primary key is automatically + generated using a (bit-reversed) sequence. [Bit-reversed sequential values](https://cloud.google.com/spanner/docs/schema-design#bit_reverse_primary_key) + are considered a good choice for primary keys on Cloud Spanner. +* [Application.java](src/main/java/com/google/cloud/spanner/sample/Application.java): The starter + class of the application. It contains a command-line runner that executes a selection of queries + and updates on the database. + diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/pom.xml b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/pom.xml new file mode 100644 index 000000000000..2272eb5ca8fa --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/pom.xml @@ -0,0 +1,160 @@ + + + 4.0.0 + + org.example + cloud-spanner-spring-data-jdbc-postgresql-example + 1.0-SNAPSHOT + + Sample application showing how to use Spring Data JDBC with Cloud Spanner PostgreSQL. + + + + 17 + 17 + 17 + UTF-8 + + + + + + org.springframework.data + spring-data-bom + 2025.0.5 + import + pom + + + com.google.cloud + google-cloud-spanner-bom + 6.111.1 + import + pom + + + com.google.cloud + grpc-gcp + + + com.google.cloud + libraries-bom + 26.76.0 + import + pom + + + io.opentelemetry + opentelemetry-bom + 1.59.0 + pom + import + + + + + + + org.springframework.boot + spring-boot-starter-data-jdbc + 4.0.3 + + + + + com.google.cloud + google-cloud-spanner-jdbc + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + + + org.postgresql + postgresql + 42.7.10 + + + + + io.opentelemetry + opentelemetry-sdk + + + com.google.cloud.opentelemetry + exporter-trace + 0.36.0 + + + com.google.cloud.opentelemetry + exporter-metrics + 0.36.0 + + + + com.google.collections + google-collections + 1.0 + + + + + com.google.cloud + google-cloud-spanner + test-jar + test + + + com.google.api + gax-grpc + testlib + test + + + + net.bytebuddy + byte-buddy + 1.18.5 + test + + + net.bytebuddy + byte-buddy-agent + 1.18.5 + test + + + junit + junit + 4.13.2 + test + + + org.testcontainers + testcontainers + 2.0.3 + test + + + + + + + com.spotify.fmt + fmt-maven-plugin + 2.29 + + + + format + + + + + + + diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/Application.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/Application.java new file mode 100644 index 000000000000..cf992b64fcc3 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/Application.java @@ -0,0 +1,258 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.cloud.spanner.connection.SavepointSupport; +import com.google.cloud.spanner.jdbc.CloudSpannerJdbcConnection; +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.entities.Track; +import com.google.cloud.spanner.sample.repositories.AlbumRepository; +import com.google.cloud.spanner.sample.repositories.SingerRepository; +import com.google.cloud.spanner.sample.repositories.TrackRepository; +import com.google.cloud.spanner.sample.service.SingerService; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Savepoint; +import java.sql.Statement; +import javax.sql.DataSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application implements CommandLineRunner { + private static final Logger logger = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + SpringApplication.run(Application.class, args).close(); + } + + private final DatabaseSeeder databaseSeeder; + + private final SingerService singerService; + + private final SingerRepository singerRepository; + + private final AlbumRepository albumRepository; + + private final TrackRepository trackRepository; + + private final Tracer tracer; + + private final DataSource dataSource; + + public Application( + SingerService singerService, + DatabaseSeeder databaseSeeder, + SingerRepository singerRepository, + AlbumRepository albumRepository, + TrackRepository trackRepository, + Tracer tracer, + DataSource dataSource) { + this.databaseSeeder = databaseSeeder; + this.singerService = singerService; + this.singerRepository = singerRepository; + this.albumRepository = albumRepository; + this.trackRepository = trackRepository; + this.tracer = tracer; + this.dataSource = dataSource; + } + + @Override + public void run(String... args) { + // Set the system property 'drop_schema' to true to drop any existing database + // schema when the application is executed. + if (Boolean.parseBoolean(System.getProperty("drop_schema", "false"))) { + logger.info("Dropping existing schema if it exists"); + databaseSeeder.dropDatabaseSchemaIfExists(); + } + + logger.info("Creating database schema if it does not already exist"); + databaseSeeder.createDatabaseSchemaIfNotExists(); + logger.info("Deleting existing test data"); + databaseSeeder.deleteTestData(); + logger.info("Inserting fresh test data"); + databaseSeeder.insertTestData(); + + Iterable allSingers = singerRepository.findAll(); + for (Singer singer : allSingers) { + logger.info( + "Found singer: {} with {} albums", + singer, + albumRepository.countAlbumsBySingerId(singer.getId())); + for (Album album : albumRepository.findAlbumsBySingerId(singer.getId())) { + logger.info("\tAlbum: {}, released at {}", album, album.getReleaseDate()); + } + } + + // Create a new singer and three albums in a transaction. + Singer insertedSinger = + singerService.createSingerAndAlbums( + new Singer("Amethyst", "Jiang"), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle())); + logger.info( + "Inserted singer {} {} {}", + insertedSinger.getId(), + insertedSinger.getFirstName(), + insertedSinger.getLastName()); + + // Create a new track record and insert it into the database. + Album album = albumRepository.getFirst().orElseThrow(); + Track track = new Track(album, 1, DatabaseSeeder.randomTitle()); + track.setSampleRate(3.14d); + // Spring Data JDBC supports the same base CRUD operations on entities as for example + // Spring Data JPA. + trackRepository.save(track); + + // List all singers that have a last name starting with an 'J'. + logger.info("All singers with a last name starting with an 'J':"); + for (Singer singer : singerRepository.findSingersByLastNameStartingWith("J")) { + logger.info("\t{}", singer.getFullName()); + } + + // The singerService.listSingersWithLastNameStartingWith(..) method uses a read-only + // transaction. You should prefer read-only transactions to read/write transactions whenever + // possible, as read-only transactions do not take locks. + logger.info("All singers with a last name starting with an 'A', 'B', or 'C'."); + for (Singer singer : singerService.listSingersWithLastNameStartingWith("A", "B", "C")) { + logger.info("\t{}", singer.getFullName()); + } + + // Run two concurrent transactions that conflict with each other to show the automatic retry + // behavior built into the JDBC driver. + concurrentTransactions(); + + // Use a savepoint to roll back to a previous point in a transaction. + savepoints(); + } + + void concurrentTransactions() { + // Create two transactions that conflict with each other to trigger a transaction retry. + // This sample is intended to show a couple of things: + // 1. Spanner will abort transactions that conflict. The Spanner JDBC driver will automatically + // retry aborted transactions internally, which ensures that both these transactions + // succeed without any errors. See + // https://cloud.google.com/spanner/docs/jdbc-session-mgmt-commands#retry_aborts_internally + // for more information on how the JDBC driver retries aborted transactions. + // 2. The JDBC driver adds information to the OpenTelemetry tracing that makes it easier to find + // transactions that were aborted and retried. + logger.info("Executing two concurrent transactions"); + Span span = tracer.spanBuilder("update-singers").startSpan(); + try (Scope ignore = span.makeCurrent(); + Connection connection1 = dataSource.getConnection(); + Connection connection2 = dataSource.getConnection(); + Statement statement1 = connection1.createStatement(); + Statement statement2 = connection2.createStatement()) { + statement1.execute("begin"); + statement1.execute("set spanner.transaction_tag='update-singer-1'"); + statement2.execute("begin"); + statement2.execute("set spanner.transaction_tag='update-singer-2'"); + long id = 0L; + statement1.execute("set spanner.statement_tag='fetch-singer-id'"); + try (ResultSet resultSet = statement1.executeQuery("select id from singers limit 1")) { + while (resultSet.next()) { + id = resultSet.getLong(1); + } + } + String sql = "update singers set active=not active where id=?"; + statement1.execute("set spanner.statement_tag='update-singer-1'"); + try (PreparedStatement preparedStatement = connection1.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + statement2.execute("set spanner.statement_tag='update-singer-2'"); + try (PreparedStatement preparedStatement = connection2.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + statement1.execute("commit"); + statement2.execute("commit"); + } catch (SQLException exception) { + span.recordException(exception); + throw new RuntimeException(exception); + } finally { + span.end(); + } + } + + void savepoints() { + // Run a transaction with a savepoint, and rollback to that savepoint. + logger.info("Executing a transaction with a savepoint"); + Span span = tracer.spanBuilder("savepoint-sample").startSpan(); + try (Scope ignore = span.makeCurrent(); + Connection connection = dataSource.getConnection(); + Statement statement = connection.createStatement()) { + // Enable savepoints for this connection. + connection + .unwrap(CloudSpannerJdbcConnection.class) + .setSavepointSupport(SavepointSupport.ENABLED); + + statement.execute("begin"); + statement.execute("set spanner.transaction_tag='transaction-with-savepoint'"); + + // Fetch a random album. + long id = 0L; + try (ResultSet resultSet = + statement.executeQuery( + "/*@statement_tag='fetch-album-id'*/ select id from albums limit 1")) { + while (resultSet.next()) { + id = resultSet.getLong(1); + } + } + // Set a savepoint that we can roll back to at a later moment in the transaction. + // Note that the savepoint name must be a valid identifier. + Savepoint savepoint = connection.setSavepoint("fetched_album_id"); + + String sql = + "/*@statement_tag='update-album-marketing-budget-by-10-percent'*/ update albums set marketing_budget=marketing_budget * 1.1 where id=?"; + try (PreparedStatement preparedStatement = connection.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + + // Rollback to the savepoint that we set at an earlier stage, and then update the marketing + // budget by 20 percent instead. + connection.rollback(savepoint); + + sql = + "/*@statement_tag='update-album-marketing-budget-by-20-percent'*/ update albums set marketing_budget=marketing_budget * 1.2 where id=?"; + try (PreparedStatement preparedStatement = connection.prepareStatement(sql)) { + preparedStatement.setLong(1, id); + preparedStatement.executeUpdate(); + } + statement.execute("commit"); + + // Reset the state of the connection before returning it to the connection pool. + statement.execute("reset all"); + } catch (SQLException exception) { + span.recordException(exception); + throw new RuntimeException(exception); + } finally { + span.end(); + } + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java new file mode 100644 index 000000000000..cd1664331b52 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java @@ -0,0 +1,374 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.context.Scope; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.io.UncheckedIOException; +import java.math.BigDecimal; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.time.LocalDate; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.function.Supplier; +import javax.annotation.Nonnull; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.io.Resource; +import org.springframework.jdbc.core.BatchPreparedStatementSetter; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; +import org.springframework.util.FileCopyUtils; + +/** This component creates the database schema and seeds it with some random test data. */ +@Component +public class DatabaseSeeder { + + /** Randomly generated names. */ + public static final ImmutableList INITIAL_SINGERS = + ImmutableList.of( + new Singer("Aaliyah", "Smith"), + new Singer("Benjamin", "Jones"), + new Singer("Chloe", "Brown"), + new Singer("David", "Williams"), + new Singer("Elijah", "Johnson"), + new Singer("Emily", "Miller"), + new Singer("Gabriel", "Garcia"), + new Singer("Hannah", "Rodriguez"), + new Singer("Isabella", "Hernandez"), + new Singer("Jacob", "Perez")); + + private static final Random RANDOM = new Random(); + + private final JdbcTemplate jdbcTemplate; + + private final Tracer tracer; + + @Value("classpath:create_schema.sql") + private Resource createSchemaFile; + + @Value("classpath:drop_schema.sql") + private Resource dropSchemaFile; + + /** This value is determined once using a system query, and then cached. */ + private final Supplier isCloudSpannerPG; + + public DatabaseSeeder(JdbcTemplate jdbcTemplate, Tracer tracer) { + this.jdbcTemplate = jdbcTemplate; + this.tracer = tracer; + this.isCloudSpannerPG = + Suppliers.memoize(() -> JdbcConfiguration.isCloudSpannerPG(jdbcTemplate)); + } + + /** Reads a resource file into a string. */ + private static String resourceAsString(Resource resource) { + try (Reader reader = new InputStreamReader(resource.getInputStream(), UTF_8)) { + return FileCopyUtils.copyToString(reader); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Returns true if this application is currently running on a Cloud Spanner PostgreSQL database, + * and false if it is running on an open-source PostgreSQL database. + */ + private boolean isCloudSpanner() { + return isCloudSpannerPG.get(); + } + + /** + * Removes all statements that start with a 'skip_on_open_source_pg' comment if the application is + * running on open-source PostgreSQL. This ensures that we can use the same DDL script both on + * Cloud Spanner and on open-source PostgreSQL. It also removes any empty statements in the given + * array. + */ + private String[] updateDdlStatements(String[] statements) { + if (!isCloudSpanner()) { + for (int i = 0; i < statements.length; i++) { + // Replace any line that starts with '/* skip_on_open_source_pg */' with an empty string. + statements[i] = + statements[i].replaceAll("(?m)^\\s*/\\*\\s*skip_on_open_source_pg\\s*\\*/.+$", ""); + } + } + // Remove any empty statements from the script. + return Arrays.stream(statements) + .filter(statement -> !statement.isBlank()) + .toArray(String[]::new); + } + + /** Creates the database schema if it does not yet exist. */ + public void createDatabaseSchemaIfNotExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(createSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Cloud Spanner will apply + // all statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Drops the database schema if it exists. */ + public void dropDatabaseSchemaIfExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(dropSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Cloud Spanner will apply + // all statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Deletes all data currently in the sample tables. */ + public void deleteTestData() { + Span span = tracer.spanBuilder("deleteTestData").startSpan(); + try (Scope ignore = span.makeCurrent()) { + // Delete all data in one batch. + jdbcTemplate.execute("set spanner.statement_tag='batch_delete_test_data'"); + jdbcTemplate.batchUpdate( + "delete from concerts", + "delete from venues", + "delete from tracks", + "delete from albums", + "delete from singers"); + } catch (Throwable t) { + span.recordException(t); + throw t; + } finally { + span.end(); + } + } + + /** Inserts some initial test data into the database. */ + public void insertTestData() { + Span span = tracer.spanBuilder("insertTestData").startSpan(); + try (Scope ignore = span.makeCurrent()) { + jdbcTemplate.execute("begin"); + jdbcTemplate.execute("set spanner.transaction_tag='insert_test_data'"); + jdbcTemplate.execute("set spanner.statement_tag='insert_singers'"); + jdbcTemplate.batchUpdate( + "insert into singers (first_name, last_name) values (?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, INITIAL_SINGERS.get(i).getFirstName()); + preparedStatement.setString(2, INITIAL_SINGERS.get(i).getLastName()); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size(); + } + }); + + List singerIds = + jdbcTemplate.query( + "select id from singers", + resultSet -> { + ImmutableList.Builder builder = ImmutableList.builder(); + while (resultSet.next()) { + builder.add(resultSet.getLong(1)); + } + return builder.build(); + }); + jdbcTemplate.execute("set spanner.statement_tag='insert_albums'"); + jdbcTemplate.batchUpdate( + "insert into albums (title, marketing_budget, release_date, cover_picture, singer_id) values (?, ?, ?, ?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, randomTitle()); + preparedStatement.setBigDecimal(2, randomBigDecimal()); + preparedStatement.setObject(3, randomDate()); + preparedStatement.setBytes(4, randomBytes()); + preparedStatement.setLong(5, randomElement(singerIds)); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size() * 20; + } + }); + jdbcTemplate.execute("commit"); + } catch (Throwable t) { + try { + jdbcTemplate.execute("rollback"); + } catch (Exception ignore) { + } + span.recordException(t); + throw t; + } finally { + span.end(); + } + } + + /** Generates a random title for an album or a track. */ + static String randomTitle() { + return randomElement(ADJECTIVES) + " " + randomElement(NOUNS); + } + + /** Returns a random element from the given list. */ + static T randomElement(List list) { + return list.get(RANDOM.nextInt(list.size())); + } + + /** Generates a random {@link BigDecimal}. */ + BigDecimal randomBigDecimal() { + return BigDecimal.valueOf(RANDOM.nextDouble()); + } + + /** Generates a random {@link LocalDate}. */ + static LocalDate randomDate() { + return LocalDate.of(RANDOM.nextInt(200) + 1800, RANDOM.nextInt(12) + 1, RANDOM.nextInt(28) + 1); + } + + /** Generates a random byte array with a length between 4 and 1024 bytes. */ + static byte[] randomBytes() { + int size = RANDOM.nextInt(1020) + 4; + byte[] res = new byte[size]; + RANDOM.nextBytes(res); + return res; + } + + /** Some randomly generated nouns that are used to generate random titles. */ + private static final ImmutableList NOUNS = + ImmutableList.of( + "apple", + "banana", + "cherry", + "dog", + "elephant", + "fish", + "grass", + "house", + "key", + "lion", + "monkey", + "nail", + "orange", + "pen", + "queen", + "rain", + "shoe", + "tree", + "umbrella", + "van", + "whale", + "xylophone", + "zebra"); + + /** Some randomly generated adjectives that are used to generate random titles. */ + private static final ImmutableList ADJECTIVES = + ImmutableList.of( + "able", + "angelic", + "artistic", + "athletic", + "attractive", + "autumnal", + "calm", + "careful", + "cheerful", + "clever", + "colorful", + "confident", + "courageous", + "creative", + "curious", + "daring", + "determined", + "different", + "dreamy", + "efficient", + "elegant", + "energetic", + "enthusiastic", + "exciting", + "expressive", + "faithful", + "fantastic", + "funny", + "gentle", + "gifted", + "great", + "happy", + "helpful", + "honest", + "hopeful", + "imaginative", + "intelligent", + "interesting", + "inventive", + "joyful", + "kind", + "knowledgeable", + "loving", + "loyal", + "magnificent", + "mature", + "mysterious", + "natural", + "nice", + "optimistic", + "peaceful", + "perfect", + "pleasant", + "powerful", + "proud", + "quick", + "relaxed", + "reliable", + "responsible", + "romantic", + "safe", + "sensitive", + "sharp", + "simple", + "sincere", + "skillful", + "smart", + "sociable", + "strong", + "successful", + "sweet", + "talented", + "thankful", + "thoughtful", + "unique", + "upbeat", + "valuable", + "victorious", + "vivacious", + "warm", + "wealthy", + "wise", + "wonderful", + "worthy", + "youthful"); +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java new file mode 100644 index 000000000000..e0310420edd2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java @@ -0,0 +1,56 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.cloud.spanner.jdbc.CloudSpannerJdbcConnection; +import io.opentelemetry.api.OpenTelemetry; +import javax.annotation.Nonnull; +import org.springframework.context.annotation.Configuration; +import org.springframework.data.jdbc.repository.config.AbstractJdbcConfiguration; +import org.springframework.data.relational.core.dialect.Dialect; +import org.springframework.data.relational.core.dialect.PostgresDialect; +import org.springframework.jdbc.core.ConnectionCallback; +import org.springframework.jdbc.core.JdbcOperations; +import org.springframework.jdbc.core.namedparam.NamedParameterJdbcOperations; + +@Configuration +public class JdbcConfiguration extends AbstractJdbcConfiguration { + + // OpenTelemetry is added to the constructor here to ensure the OpenTelemetry configuration is + // initialized before this configuration. + public JdbcConfiguration(OpenTelemetry ignore) {} + + /** Override the dialect auto-detection, so it also returns PostgreSQL for Cloud Spanner. */ + @Override + public Dialect jdbcDialect(@Nonnull NamedParameterJdbcOperations operations) { + if (isCloudSpannerPG(operations.getJdbcOperations())) { + return PostgresDialect.INSTANCE; + } + return super.jdbcDialect(operations); + } + + /** Returns true if the current database is a Cloud Spanner PostgreSQL database. */ + public static boolean isCloudSpannerPG(JdbcOperations operations) { + return Boolean.TRUE.equals( + operations.execute( + (ConnectionCallback) + connection -> + connection.isWrapperFor(CloudSpannerJdbcConnection.class) + && com.google.cloud.spanner.Dialect.POSTGRESQL.equals( + connection.unwrap(CloudSpannerJdbcConnection.class).getDialect()))); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/OpenTelemetryConfiguration.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/OpenTelemetryConfiguration.java new file mode 100644 index 000000000000..b833e48b7c50 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/OpenTelemetryConfiguration.java @@ -0,0 +1,117 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.opentelemetry.metric.GoogleCloudMetricExporter; +import com.google.cloud.opentelemetry.metric.MetricConfiguration; +import com.google.cloud.opentelemetry.trace.TraceConfiguration; +import com.google.cloud.opentelemetry.trace.TraceExporter; +import com.google.cloud.spanner.SpannerOptions; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import java.io.IOException; +import java.util.concurrent.ThreadLocalRandom; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class OpenTelemetryConfiguration { + + @Value("${open_telemetry.enabled}") + private boolean enabled; + + @Value("${open_telemetry.project}") + private String project; + + @Bean + public OpenTelemetry openTelemetry() { + if (!enabled) { + return OpenTelemetry.noop(); + } + + // Enable OpenTelemetry tracing in Spanner. + SpannerOptions.enableOpenTelemetryTraces(); + SpannerOptions.enableOpenTelemetryMetrics(); + + if (!hasDefaultCredentials()) { + // Create a no-op OpenTelemetry object if this environment does not have any default + // credentials configured. This could for example be on local test environments that use + // the Spanner emulator. + return OpenTelemetry.noop(); + } + + TraceConfiguration.Builder traceConfigurationBuilder = TraceConfiguration.builder(); + TraceConfiguration traceConfiguration = traceConfigurationBuilder.setProjectId(project).build(); + SpanExporter traceExporter = TraceExporter.createWithConfiguration(traceConfiguration); + + MetricConfiguration.Builder metricConfigurationBuilder = MetricConfiguration.builder(); + MetricConfiguration metricConfiguration = + metricConfigurationBuilder.setProjectId(project).build(); + MetricExporter metricExporter = + GoogleCloudMetricExporter.createWithConfiguration(metricConfiguration); + + SdkMeterProvider sdkMeterProvider = + SdkMeterProvider.builder() + .registerMetricReader(PeriodicMetricReader.builder(metricExporter).build()) + .build(); + + // Create an OpenTelemetry object and register it as the global OpenTelemetry object. This + // will automatically be picked up by the Spanner libraries and used for tracing. + return OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder() + // Set sampling to 'AlwaysOn' in this example. In production, you want to reduce + // this to a smaller fraction to limit the number of traces that are being + // collected. + .setSampler(Sampler.alwaysOn()) + .setResource( + Resource.builder() + .put( + "service.name", + "spanner-jdbc-spring-data-sample-" + + ThreadLocalRandom.current().nextInt()) + .build()) + .addSpanProcessor(BatchSpanProcessor.builder(traceExporter).build()) + .build()) + .setMeterProvider(sdkMeterProvider) + .buildAndRegisterGlobal(); + } + + @Bean + public Tracer tracer(OpenTelemetry openTelemetry) { + return openTelemetry.getTracer("com.google.cloud.spanner.jdbc.sample.spring-data-jdbc"); + } + + private boolean hasDefaultCredentials() { + try { + return GoogleCredentials.getApplicationDefault() != null; + } catch (IOException exception) { + return false; + } + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java new file mode 100644 index 000000000000..5992f262de5f --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java @@ -0,0 +1,80 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; +import org.springframework.data.annotation.CreatedDate; +import org.springframework.data.annotation.Id; +import org.springframework.data.annotation.LastModifiedDate; +import org.springframework.data.annotation.PersistenceCreator; + +public abstract class AbstractEntity { + + /** This ID is generated using a (bit-reversed) sequence. */ + @Id private Long id; + + @CreatedDate private OffsetDateTime createdAt; + + @LastModifiedDate private OffsetDateTime updatedAt; + + @PersistenceCreator + public AbstractEntity() {} + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractEntity)) { + return false; + } + AbstractEntity other = (AbstractEntity) o; + if (this == other) { + return true; + } + return this.getClass().equals(other.getClass()) + && this.id != null + && other.id != null + && this.id.equals(other.id); + } + + @Override + public int hashCode() { + return this.id == null ? 0 : this.id.hashCode(); + } + + public Long getId() { + return id; + } + + protected void setId(Long id) { + this.id = id; + } + + public OffsetDateTime getCreatedAt() { + return createdAt; + } + + protected void setCreatedAt(OffsetDateTime createdAt) { + this.createdAt = createdAt; + } + + public OffsetDateTime getUpdatedAt() { + return updatedAt; + } + + protected void setUpdatedAt(OffsetDateTime updatedAt) { + this.updatedAt = updatedAt; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java new file mode 100644 index 000000000000..b6b7ac8b01a4 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java @@ -0,0 +1,88 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.math.BigDecimal; +import java.time.LocalDate; +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Table; + +@Table("albums") +public class Album extends AbstractEntity { + + private String title; + + private BigDecimal marketingBudget; + + private LocalDate releaseDate; + + private byte[] coverPicture; + + private Long singerId; + + @PersistenceCreator + public Album() {} + + public Album(String title) { + this.title = title; + } + + @Override + public String toString() { + return getTitle(); + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public BigDecimal getMarketingBudget() { + return marketingBudget; + } + + public void setMarketingBudget(BigDecimal marketingBudget) { + this.marketingBudget = marketingBudget; + } + + public LocalDate getReleaseDate() { + return releaseDate; + } + + public void setReleaseDate(LocalDate releaseDate) { + this.releaseDate = releaseDate; + } + + public byte[] getCoverPicture() { + return coverPicture; + } + + public void setCoverPicture(byte[] coverPicture) { + this.coverPicture = coverPicture; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java new file mode 100644 index 000000000000..1075eed3d503 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java @@ -0,0 +1,84 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Table; + +@Table("concerts") +public class Concert extends AbstractEntity { + + private Long venueId; + + private Long singerId; + + private String name; + + private OffsetDateTime startTime; + + private OffsetDateTime endTime; + + @PersistenceCreator + public Concert() {} + + public Concert(Venue venue, Singer singer, String name) { + this.venueId = venue.getId(); + this.singerId = singer.getId(); + this.name = name; + } + + public Long getVenueId() { + return venueId; + } + + public void setVenueId(Long venueId) { + this.venueId = venueId; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public OffsetDateTime getStartTime() { + return startTime; + } + + public void setStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + } + + public OffsetDateTime getEndTime() { + return endTime; + } + + public void setEndTime(OffsetDateTime endTime) { + this.endTime = endTime; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java new file mode 100644 index 000000000000..4b68f139232a --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java @@ -0,0 +1,75 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.annotation.ReadOnlyProperty; +import org.springframework.data.relational.core.mapping.Table; + +@Table("singers") +public class Singer extends AbstractEntity { + + private String firstName; + + private String lastName; + + /** Mark fullName as a {@link ReadOnlyProperty}, as it is generated by the database. */ + @ReadOnlyProperty private String fullName; + + private Boolean active; + + @PersistenceCreator + public Singer() {} + + public Singer(String firstName, String lastName) { + this.firstName = firstName; + this.lastName = lastName; + } + + @Override + public String toString() { + return getFullName(); + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + public String getLastName() { + return lastName; + } + + public void setLastName(String lastName) { + this.lastName = lastName; + } + + public String getFullName() { + return fullName; + } + + public Boolean getActive() { + return active; + } + + public void setActive(Boolean active) { + this.active = active; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java new file mode 100644 index 000000000000..c255a08bc85c --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java @@ -0,0 +1,88 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Column; +import org.springframework.data.relational.core.mapping.Table; + +/** + * The "tracks" table is interleaved in "albums". That means that the first part of the primary key + * (the "id" column) references the Album that this Track belongs to. That again means that we do + * not auto-generate the id for this entity. We can achieve this by adding an extra property, + * albumId, that is mapped to the "id" column. We can then manually set an albumId value before + * inserting the record in the database. + */ +@Table("tracks") +public class Track extends AbstractEntity { + + /** + * We need to map this to the "id" column to be able to explicitly set it, instead of letting + * Spring Data generate it. This is necessary, because Track is interleaved in Album. That again + * means that we must use the ID value of the Album for a Track. + */ + @Column("id") + private Long albumId; + + /** This is the second part of the primary key of a Track. */ + private int trackNumber; + + private String title; + + private Double sampleRate; + + @PersistenceCreator + public Track() {} + + public Track(Album album, int trackNumber, String title) { + setAlbumId(album.getId()); + this.trackNumber = trackNumber; + this.title = title; + } + + public Long getAlbumId() { + return albumId; + } + + private void setAlbumId(Long albumId) { + this.albumId = albumId; + } + + public int getTrackNumber() { + return trackNumber; + } + + public void setTrackNumber(int trackNumber) { + this.trackNumber = trackNumber; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public Double getSampleRate() { + return sampleRate; + } + + public void setSampleRate(Double sampleRate) { + this.sampleRate = sampleRate; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java new file mode 100644 index 000000000000..82dc5973d894 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java @@ -0,0 +1,50 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import org.springframework.data.annotation.PersistenceCreator; +import org.springframework.data.relational.core.mapping.Table; + +@Table("venues") +public class Venue extends AbstractEntity { + private String name; + + private String description; + + @PersistenceCreator + public Venue() {} + + public Venue(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/AlbumRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/AlbumRepository.java new file mode 100644 index 000000000000..de90bf7985dc --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/AlbumRepository.java @@ -0,0 +1,40 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Album; +import java.util.List; +import java.util.Optional; +import org.springframework.data.jdbc.repository.query.Query; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface AlbumRepository extends CrudRepository { + + /** + * The implementation for this method is automatically generated and will fetch all albums of the + * given singer. + */ + List findAlbumsBySingerId(Long singerId); + + long countAlbumsBySingerId(Long singerId); + + /** Returns the first album in the database. */ + @Query("select * from albums limit 1") + Optional getFirst(); +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/ConcertRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/ConcertRepository.java new file mode 100644 index 000000000000..fb2ce9c5931a --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/ConcertRepository.java @@ -0,0 +1,24 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Concert; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface ConcertRepository extends CrudRepository {} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/SingerRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/SingerRepository.java new file mode 100644 index 000000000000..a55d02f054be --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/SingerRepository.java @@ -0,0 +1,34 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Singer; +import java.util.List; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface SingerRepository extends CrudRepository { + + /** + * The implementation for this method is automatically generated and will fetch all singers with + * the given last name. + */ + List findSingersByLastName(String lastName); + + List findSingersByLastNameStartingWith(String prefix); +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/TrackRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/TrackRepository.java new file mode 100644 index 000000000000..597df5f7cc04 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/TrackRepository.java @@ -0,0 +1,24 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Track; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface TrackRepository extends CrudRepository {} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/VenueRepository.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/VenueRepository.java new file mode 100644 index 000000000000..f029979adbc9 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/repositories/VenueRepository.java @@ -0,0 +1,24 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.repositories; + +import com.google.cloud.spanner.sample.entities.Venue; +import org.springframework.data.repository.CrudRepository; +import org.springframework.stereotype.Repository; + +@Repository +public interface VenueRepository extends CrudRepository {} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java new file mode 100644 index 000000000000..afb496e352b7 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java @@ -0,0 +1,67 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.service; + +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.repositories.AlbumRepository; +import com.google.cloud.spanner.sample.repositories.SingerRepository; +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +public class SingerService { + private final SingerRepository singerRepository; + + private final AlbumRepository albumRepository; + + public SingerService(SingerRepository singerRepository, AlbumRepository albumRepository) { + this.singerRepository = singerRepository; + this.albumRepository = albumRepository; + } + + /** Creates a singer and a list of albums in a transaction. */ + @Transactional + public Singer createSingerAndAlbums(Singer singer, Album... albums) { + // Saving a singer will return an updated singer entity that has the primary key value set. + singer = singerRepository.save(singer); + for (Album album : albums) { + // Set the singerId that was generated on the Album before saving it. + album.setSingerId(singer.getId()); + albumRepository.save(album); + } + return singer; + } + + /** + * Searches for all singers that have a last name starting with any of the given prefixes. This + * method uses a read-only transaction. Read-only transactions should be preferred to read/write + * transactions whenever possible, as read-only transactions do not take locks. + */ + @Transactional(readOnly = true) + public List listSingersWithLastNameStartingWith(String... prefixes) { + ImmutableList.Builder result = ImmutableList.builder(); + // This is not the most efficient way to search for this, but the main purpose of this method is + // to show how to use read-only transactions. + for (String prefix : prefixes) { + result.addAll(singerRepository.findSingersByLastNameStartingWith(prefix)); + } + return result.build(); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application-cs.properties b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application-cs.properties new file mode 100644 index 000000000000..289349833aa8 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application-cs.properties @@ -0,0 +1,13 @@ + +# This profile uses a Cloud Spanner PostgreSQL database. + +spanner.project=my-project +spanner.instance=my-instance +spanner.database=spring-data-jdbc + +# Setting this property to true instructs the Spanner JDBC driver to include the SQL statement that +# is executed in the trace. This makes it easier to identify slow queries in your application. +spanner.enable_extended_tracing=true + +spring.datasource.url=jdbc:cloudspanner:/projects/${spanner.project}/instances/${spanner.instance}/databases/${spanner.database}?enableExtendedTracing=${spanner.enable_extended_tracing} +spring.datasource.driver-class-name=com.google.cloud.spanner.jdbc.JdbcDriver diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application-pg.properties b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application-pg.properties new file mode 100644 index 000000000000..894f63ebae0d --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application-pg.properties @@ -0,0 +1,7 @@ + +# This profile uses an open-source PostgreSQL database. + +spring.datasource.url=jdbc:postgresql://localhost:5432/spring-data-jdbc +spring.datasource.driver-class-name=org.postgresql.Driver +spring.datasource.username=postgres +spring.datasource.password=mysecretpassword diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application.properties b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application.properties new file mode 100644 index 000000000000..642b9768d055 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/application.properties @@ -0,0 +1,13 @@ + +# This application can use both a Cloud Spanner PostgreSQL database or an open-source PostgreSQL +# database. Which database is used is determined by the active profile: +# 1. 'cs' means use Cloud Spanner. +# 2. 'pg' means use open-source PostgreSQL. + +# Activate the Cloud Spanner profile by default. +# Change to 'pg' to activate the PostgreSQL profile. +spring.profiles.default=cs + +# Enable/disable OpenTelemetry tracing and export these to Google Cloud Trace. +open_telemetry.enabled=true +open_telemetry.project=${spanner.project} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/create_schema.sql b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/create_schema.sql new file mode 100644 index 000000000000..60552d3ad107 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/create_schema.sql @@ -0,0 +1,68 @@ +/* + This script creates the database schema for this sample application. + All lines that start with /* skip_on_open_source_pg */ are skipped when the application is running on a + normal PostgreSQL database. The same lines are executed when the application is running on a Cloud + Spanner database. The script is executed by the DatabaseSeeder class. +*/ + +create sequence if not exists id_generator +/* skip_on_open_source_pg */ bit_reversed_positive +; + +create table if not exists singers ( + id bigint not null primary key default nextval('id_generator'), + first_name varchar, + last_name varchar, + full_name varchar generated always as (CASE WHEN first_name IS NULL THEN last_name + WHEN last_name IS NULL THEN first_name + ELSE first_name || ' ' || last_name END) stored, + active boolean default true, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp +); + +create table if not exists albums ( + id bigint not null primary key default nextval('id_generator'), + title varchar not null, + marketing_budget numeric, + release_date date, + cover_picture bytea, + singer_id bigint not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp, + constraint fk_albums_singers foreign key (singer_id) references singers (id) +); + +create table if not exists tracks ( + id bigint not null, + track_number bigint not null, + title varchar not null, + sample_rate float8 not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp, + primary key (id, track_number) +) +/* skip_on_open_source_pg */ interleave in parent albums on delete cascade +; + +create table if not exists venues ( + id bigint not null primary key default nextval('id_generator'), + name varchar not null, + description jsonb not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp +); + +create table if not exists concerts ( + id bigint not null primary key default nextval('id_generator'), + venue_id bigint not null, + singer_id bigint not null, + name varchar not null, + start_time timestamptz not null, + end_time timestamptz not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp, + constraint fk_concerts_venues foreign key (venue_id) references venues (id), + constraint fk_concerts_singers foreign key (singer_id) references singers (id), + constraint chk_end_time_after_start_time check (end_time > start_time) +); diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/drop_schema.sql b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/drop_schema.sql new file mode 100644 index 000000000000..23e7b65d3bb1 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/main/resources/drop_schema.sql @@ -0,0 +1,5 @@ +drop table if exists concerts; +drop table if exists venues; +drop table if exists tracks; +drop table if exists albums; +drop table if exists singers; diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java new file mode 100644 index 000000000000..6d1afd6273b0 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.connection.SpannerPool; +import java.util.Properties; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.springframework.boot.SpringApplication; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; + +@Ignore("The emulator does not return DML statistics when using a RETURNING clause") +@RunWith(JUnit4.class) +public class ApplicationEmulatorTest { + private static GenericContainer emulator; + + private static Properties properties; + + @BeforeClass + public static void startEmulator() { + assumeTrue(DockerClientFactory.instance().isDockerAvailable()); + + emulator = + new GenericContainer<>( + DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator:latest")) + .withExposedPorts(9010) + .waitingFor(Wait.forListeningPorts(9010)); + emulator.start(); + properties = new Properties(); + properties.setProperty("autoConfigEmulator", "true"); + properties.setProperty( + "endpoint", String.format("%s:%d", emulator.getHost(), emulator.getMappedPort(9010))); + } + + @AfterClass + public static void cleanup() { + SpannerPool.closeSpannerPool(); + if (emulator != null) { + emulator.stop(); + } + } + + @Test + public void testRunApplication() { + System.setProperty("open_telemetry.project", "test-project"); + System.setProperty("port", String.valueOf(emulator.getMappedPort(9010))); + SpringApplication.run(Application.class).close(); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java new file mode 100644 index 000000000000..c597b75d5e3c --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java @@ -0,0 +1,836 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static com.google.cloud.spanner.sample.DatabaseSeeder.INITIAL_SINGERS; +import static com.google.cloud.spanner.sample.DatabaseSeeder.randomDate; +import static com.google.cloud.spanner.sample.DatabaseSeeder.randomTitle; +import static junit.framework.TestCase.assertEquals; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.common.collect.Streams; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Value; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.LongStream; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.springframework.boot.SpringApplication; + +@RunWith(JUnit4.class) +public class ApplicationTest extends AbstractMockServerTest { + + @BeforeClass + public static void setupQueryResults() { + // Set the database dialect. + mockSpanner.putStatementResult(StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + // Set up a result for the dialect check that is executed by the JdbcConfiguration class. + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of( + "select 1 " + + "from information_schema.database_options " + + "where schema_name='public' " + + "and option_name='database_dialect' " + + "and option_value='POSTGRESQL'"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build())); + // Add a DDL response to the server. + addDdlResponseToSpannerAdmin(); + + // Set up results for the 'delete all test data' operations. + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("delete from concerts"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from venues"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from tracks"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from albums"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from singers"), 0L)); + + // Set up results for inserting test data. + for (Singer singer : INITIAL_SINGERS) { + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder("insert into singers (first_name, last_name) values ($1, $2)") + .bind("p1") + .to(singer.getFirstName()) + .bind("p2") + .to(singer.getLastName()) + .build(), + 1L)); + } + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("select id from singers"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addAllRows( + LongStream.rangeClosed(1L, INITIAL_SINGERS.size()) + .mapToObj( + id -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(id))) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.update( + Statement.of( + "INSERT INTO \"albums\" (\"cover_picture\", \"created_at\", \"marketing_budget\", \"release_date\", \"singer_id\", \"title\", \"updated_at\") VALUES ($1, $2, $3, $4, $5, $6, $7)"), + 1L)); + mockSpanner.putPartialStatementResult( + StatementResult.update( + Statement.of( + "insert into albums (title, marketing_budget, release_date, cover_picture, singer_id) values ($1, $2, $3, $4, $5)"), + 1L)); + + // Set up results for the queries that the application runs. + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of( + "SELECT \"singers\".\"id\" AS \"id\", \"singers\".\"active\" AS \"active\", " + + "\"singers\".\"last_name\" AS \"last_name\", \"singers\".\"full_name\" AS \"full_name\", " + + "\"singers\".\"updated_at\" AS \"updated_at\", \"singers\".\"created_at\" AS \"created_at\", " + + "\"singers\".\"first_name\" AS \"first_name\" FROM \"singers\""), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addAllRows( + Streams.mapWithIndex( + INITIAL_SINGERS.stream(), + (singer, index) -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(index + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setStringValue( + singer.getFirstName() + " " + singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getFirstName()) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of("SELECT COUNT(*) FROM \"albums\" WHERE \"albums\".\"singer_id\" = $1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("10").build()) + .build()) + .build())); + for (long singerId : LongStream.rangeClosed(1L, INITIAL_SINGERS.size()).toArray()) { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder( + "SELECT \"albums\".\"id\" AS \"id\", \"albums\".\"title\" AS \"title\", " + + "\"albums\".\"singer_id\" AS \"singer_id\", \"albums\".\"updated_at\" AS \"updated_at\", " + + "\"albums\".\"created_at\" AS \"created_at\", \"albums\".\"release_date\" AS \"release_date\", " + + "\"albums\".\"cover_picture\" AS \"cover_picture\", \"albums\".\"marketing_budget\" AS \"marketing_budget\" " + + "FROM \"albums\" WHERE \"albums\".\"singer_id\" = $1") + .bind("p1") + .to(Long.reverse(singerId)) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType( + Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addAllRows( + IntStream.rangeClosed(1, 10) + .mapToObj( + albumId -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(albumId * singerId))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(singerId)))) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(randomDate().toString()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + } + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder( + "INSERT INTO \"singers\" (\"active\", \"created_at\", \"first_name\", \"last_name\", \"updated_at\") VALUES ($1, $2, $3, $4, $5)\n" + + "RETURNING *") + .bind("p1") + .to((Boolean) null) + .bind("p2") + .to((Timestamp) null) + .bind("p3") + .to("Amethyst") + .bind("p4") + .to("Jiang") + .bind("p5") + .to((Timestamp) null) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(INITIAL_SINGERS.size() + 2))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues(Value.newBuilder().setStringValue("Amethyst").build()) + .addValues(Value.newBuilder().setStringValue("Amethyst Jiang").build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setStringValue("Jiang").build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT INTO \"albums\" (\"cover_picture\", \"created_at\", \"marketing_budget\", \"release_date\", \"singer_id\", \"title\", \"updated_at\") VALUES ($1, $2, $3, $4, $5, $6, $7)\n" + + "RETURNING *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setStringValue(String.valueOf(1L))) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomDate().toString()).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("select * from albums limit 1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setStringValue(String.valueOf(1L))) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomDate().toString()).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT INTO \"tracks\" (\"created_at\", \"id\", \"sample_rate\", \"title\", \"track_number\", \"updated_at\") VALUES ($1, $2, $3, $4, $5, $6)\n" + + "RETURNING *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("sample_rate") + .setType( + Type.newBuilder().setCode(TypeCode.FLOAT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("track_number") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setStringValue("1").build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setStringValue(randomTitle()).build()) + .addValues(Value.newBuilder().setStringValue("1").build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + for (String prefix : new String[] {"J%", "A%", "B%", "C%"}) { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder( + "SELECT \"singers\".\"id\" AS \"id\", \"singers\".\"active\" AS \"active\", " + + "\"singers\".\"last_name\" AS \"last_name\", \"singers\".\"full_name\" AS \"full_name\", " + + "\"singers\".\"updated_at\" AS \"updated_at\", \"singers\".\"created_at\" AS \"created_at\", " + + "\"singers\".\"first_name\" AS \"first_name\" " + + "FROM \"singers\" WHERE \"singers\".\"last_name\" LIKE $1") + .bind("p1") + .to(prefix) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addAllRows( + Streams.mapWithIndex( + INITIAL_SINGERS.stream() + .filter( + singer -> + singer.getLastName().startsWith(prefix.substring(0, 1))), + (singer, index) -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(index + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setStringValue( + singer.getFirstName() + + " " + + singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getFirstName()) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + } + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("select id from singers limit 1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder("update singers set active=not active where id=$1") + .bind("p1") + .to(1L) + .build(), + 1L)); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of(" select id from albums limit 1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder( + " update albums set marketing_budget=marketing_budget * 1.1 where id=$1") + .bind("p1") + .to(1L) + .build(), + 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder( + " update albums set marketing_budget=marketing_budget * 1.2 where id=$1") + .bind("p1") + .to(1L) + .build(), + 1L)); + } + + @Test + public void testRunApplication() { + System.setProperty("open_telemetry.project", "test-project"); + System.setProperty("port", String.valueOf(getPort())); + System.setProperty("emulator", "false"); + SpringApplication.run(Application.class).close(); + + assertEquals( + 42, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> !request.getSql().equals("SELECT 1")) + .count()); + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(7, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + private static void addDdlResponseToSpannerAdmin() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application-cs.properties b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application-cs.properties new file mode 100644 index 000000000000..11df10ec2253 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application-cs.properties @@ -0,0 +1,9 @@ + +# This profile uses a Cloud Spanner PostgreSQL database. + +spanner.project=my-project +spanner.instance=my-instance +spanner.database=spring-data-jdbc + +spring.datasource.url=jdbc:cloudspanner://localhost:${port}/projects/${spanner.project}/instances/${spanner.instance}/databases/${spanner.database}?usePlainText=true;autoConfigEmulator=${emulator};dialect=postgresql +spring.datasource.driver-class-name=com.google.cloud.spanner.jdbc.JdbcDriver diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application-pg.properties b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application-pg.properties new file mode 100644 index 000000000000..894f63ebae0d --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application-pg.properties @@ -0,0 +1,7 @@ + +# This profile uses an open-source PostgreSQL database. + +spring.datasource.url=jdbc:postgresql://localhost:5432/spring-data-jdbc +spring.datasource.driver-class-name=org.postgresql.Driver +spring.datasource.username=postgres +spring.datasource.password=mysecretpassword diff --git a/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application.properties b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application.properties new file mode 100644 index 000000000000..a42f6047c9b0 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-jdbc/postgresql/src/test/resources/application.properties @@ -0,0 +1,11 @@ + +# This application can use both a Cloud Spanner PostgreSQL database or an open-source PostgreSQL +# database. Which database is used is determined by the active profile: +# 1. 'cs' means use Cloud Spanner. +# 2. 'pg' means use open-source PostgreSQL. + +# Activate the Cloud Spanner profile by default. +# Change to 'pg' to activate the PostgreSQL profile. +spring.profiles.default=cs + +open_telemetry.enabled=false diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/README.md b/java-spanner-jdbc/samples/spring-data-mybatis/README.md new file mode 100644 index 000000000000..04e1b0950b8f --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/README.md @@ -0,0 +1,9 @@ +# Spring Data MyBatis + +This directory contains two sample applications for using Spring Data MyBatis +with the Spanner JDBC driver. + +- [GoogleSQL](googlesql): This sample uses the Spanner GoogleSQL dialect. +- [PostgreSQL](postgresql): This sample uses the Spanner PostgreSQL dialect and the Spanner JDBC + driver. It does not use PGAdapter. The sample application can also be configured to run on open + source PostgreSQL, and shows how a portable application be developed using this setup. diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/README.md b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/README.md new file mode 100644 index 000000000000..7badfb6980ff --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/README.md @@ -0,0 +1,52 @@ +# Spring Data MyBatis Sample Application with Spanner GoogleSQL + +This sample application shows how to develop applications using Spring Data MyBatis in +combination with Spanner GoogleSQL. + +This sample shows: + +1. How to use Spring Data MyBatis with a Spanner GoogleSQL database. +2. How to use bit-reversed identity columns to automatically generate primary key values for entities. +3. How to set the transaction isolation level that is used by the Spanner JDBC driver. +4. How to use the Spanner Emulator for development in combination with Spring Data. + +## MyBatis Spring +[MyBatis Spring](http://mybatis.org/spring/) integrates MyBatis with the popular Java Spring +framework. This allows MyBatis to participate in Spring transactions and to automatically inject +MyBatis mappers into other beans. + +### Running the Application + +1. The sample by default starts an instance of the Spanner Emulator together with the application and + runs the application against the emulator. +2. To run the sample on a real Spanner database, modify + [application.properties](src/main/resources/application.properties) and set the + `spanner.emulator` property to `false`. Modify the `spanner.project`, `spanner.instance`, and + `spanner.database` properties to point to an existing Spanner database. + The database must use the GoogleSQL dialect. +3. Run the application with `mvn spring-boot:run`. + +### Main Application Components + +The main application components are: +* [DatabaseSeeder.java](src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java): This + class is responsible for creating the database schema and inserting some initial test data. The + schema is created from the [create_schema.sql](src/main/resources/create_schema.sql) file. The + `DatabaseSeeder` class loads this file into memory and executes it on the active database using + standard JDBC APIs. +* [EmulatorInitializer.java](src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java): + This ApplicationListener automatically starts the Spanner emulator as a Docker container if the + sample has been configured to run on the emulator. You can disable this with the `spanner.emulator` + property in `application.properties`. +* [AbstractEntity.java](src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java): + This is the shared base class for all entities in this sample application. It defines a number of + standard attributes, such as the identifier (primary key). The primary key is automatically + generated using a (bit-reversed) identity column. [Bit-reversed sequential values](https://cloud.google.com/spanner/docs/schema-design#bit_reverse_primary_key) + are considered a good choice for primary keys in Spanner. +* [Application.java](src/main/java/com/google/cloud/spanner/sample/Application.java): The starter + class of the application. It contains a command-line runner that executes a selection of queries + and updates on the database. +* [SingerService](src/main/java/com/google/cloud/spanner/sample/service/SingerService.java) and + [AlbumService](src/main/java/com/google/cloud/spanner/sample/service/SingerService.java) are + standard Spring service beans that contain business logic that can be executed as transactions. + This includes both read/write and read-only transactions. diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/pom.xml b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/pom.xml new file mode 100644 index 000000000000..5777b429a3f2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/pom.xml @@ -0,0 +1,147 @@ + + + 4.0.0 + + org.example + cloud-spanner-spring-data-mybatis-googlesql-example + 1.0-SNAPSHOT + + Sample application showing how to use Spring Data MyBatis with Spanner GoogleSQL. + + + org.springframework.boot + spring-boot-starter-parent + 3.5.11 + + + + 17 + 17 + 17 + UTF-8 + + + + + + org.springframework.data + spring-data-bom + 2025.0.5 + import + pom + + + com.google.cloud + google-cloud-spanner-bom + 6.111.1 + import + pom + + + com.google.cloud + grpc-gcp + + + com.google.cloud + libraries-bom + 26.76.0 + import + pom + + + org.testcontainers + testcontainers-bom + 2.0.3 + import + pom + + + + + + + org.mybatis.spring.boot + mybatis-spring-boot-starter + 3.0.5 + + + org.mybatis.dynamic-sql + mybatis-dynamic-sql + 1.5.2 + + + + + com.google.cloud + google-cloud-spanner-jdbc + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + + + org.testcontainers + testcontainers + + + + com.google.collections + google-collections + 1.0 + + + + + com.google.cloud + google-cloud-spanner + 6.111.1 + test-jar + test + + + com.google.api + gax-grpc + testlib + test + + + + net.bytebuddy + byte-buddy + 1.18.5 + test + + + net.bytebuddy + byte-buddy-agent + 1.18.5 + test + + + junit + junit + 4.13.2 + + + + + + + com.spotify.fmt + fmt-maven-plugin + 2.29 + + + + format + + + + + + + diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/Application.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/Application.java new file mode 100644 index 000000000000..cf9ab71d34b8 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/Application.java @@ -0,0 +1,161 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.entities.Track; +import com.google.cloud.spanner.sample.mappers.AlbumMapper; +import com.google.cloud.spanner.sample.mappers.SingerMapper; +import com.google.cloud.spanner.sample.service.AlbumService; +import com.google.cloud.spanner.sample.service.SingerService; +import java.util.concurrent.ThreadLocalRandom; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application implements CommandLineRunner { + private static final Logger logger = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + // Start the Spanner emulator in a Docker container if the `spanner.auto_start_emulator` + // property has been set to true. If not, then this is a no-op. + EmulatorInitializer emulatorInitializer = new EmulatorInitializer(); + try { + SpringApplication application = new SpringApplication(Application.class); + application.addListeners(emulatorInitializer); + application.run(args).close(); + } finally { + SpannerPool.closeSpannerPool(); + emulatorInitializer.stopEmulator(); + } + } + + private final DatabaseSeeder databaseSeeder; + + private final SingerService singerService; + + private final AlbumService albumService; + + private final SingerMapper singerMapper; + + private final AlbumMapper albumMapper; + + public Application( + SingerService singerService, + AlbumService albumService, + DatabaseSeeder databaseSeeder, + SingerMapper singerMapper, + AlbumMapper albumMapper) { + this.databaseSeeder = databaseSeeder; + this.singerService = singerService; + this.albumService = albumService; + this.singerMapper = singerMapper; + this.albumMapper = albumMapper; + } + + @Override + public void run(String... args) { + // Set the system property 'drop_schema' to true to drop any existing database + // schema when the application is executed. + if (Boolean.parseBoolean(System.getProperty("drop_schema", "false"))) { + logger.info("Dropping existing schema if it exists"); + databaseSeeder.dropDatabaseSchemaIfExists(); + } + + logger.info("Creating database schema if it does not already exist"); + databaseSeeder.createDatabaseSchemaIfNotExists(); + logger.info("Deleting existing test data"); + databaseSeeder.deleteTestData(); + logger.info("Inserting fresh test data"); + databaseSeeder.insertTestData(); + + Iterable allSingers = singerMapper.findAll(); + for (Singer singer : allSingers) { + logger.info( + "Found singer: {} with {} albums", + singer, + albumMapper.countAlbumsBySingerId(singer.getId())); + for (Album album : albumMapper.findAlbumsBySingerId(singer.getId())) { + logger.info("\tAlbum: {}, released at {}", album, album.getReleaseDate()); + } + } + + // Create a new singer and three albums in a transaction. + Singer insertedSinger = + singerService.createSingerAndAlbums( + new Singer("Amethyst", "Jiang"), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle())); + logger.info( + "Inserted singer {} {} {}", + insertedSinger.getId(), + insertedSinger.getFirstName(), + insertedSinger.getLastName()); + + // Create a new Album and some Tracks in a read/write transaction. + // Track is an interleaved table. + Album album = new Album(DatabaseSeeder.randomTitle()); + album.setSingerId(insertedSinger.getId()); + albumService.createAlbumAndTracks( + album, + new Track(album, 1, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 2, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 3, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 4, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 5, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 6, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 7, DatabaseSeeder.randomTitle(), 3.14d)); + logger.info("Inserted album {}", album.getTitle()); + + // List all singers that have a last name starting with an 'J'. + logger.info("All singers with a last name starting with an 'J':"); + for (Singer singer : singerMapper.findSingersByLastNameStartingWith("J")) { + logger.info("\t{}", singer.getFullName()); + } + + // The singerService.listSingersWithLastNameStartingWith(..) method uses a read-only + // transaction. You should prefer read-only transactions to read/write transactions whenever + // possible, as read-only transactions do not take locks. + logger.info("All singers with a last name starting with an 'A', 'B', or 'C'."); + for (Singer singer : singerService.listSingersWithLastNameStartingWith("A", "B", "C")) { + logger.info("\t{}", singer.getFullName()); + } + + // Execute an insert-or-update for a Singer record. + // For this, we either get a random Singer from the database, or create a new Singer entity + // and assign it a random ID. + logger.info("Executing an insert-or-update statement for a Singer record"); + Singer singer; + if (ThreadLocalRandom.current().nextBoolean()) { + singer = singerMapper.getRandom(); + } else { + singer = new Singer(); + singer.setId(ThreadLocalRandom.current().nextLong()); + } + singer.setFirstName("Beatriz"); + singer.setLastName("Russel"); + singer.setActive(true); + // This executes an INSERT OR UPDATE statement. + singerMapper.insertOrUpdate(singer); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java new file mode 100644 index 000000000000..73898784ef13 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java @@ -0,0 +1,316 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.io.UncheckedIOException; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.time.LocalDate; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import javax.annotation.Nonnull; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.io.Resource; +import org.springframework.jdbc.core.BatchPreparedStatementSetter; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; +import org.springframework.util.FileCopyUtils; + +/** This component creates the database schema and seeds it with some random test data. */ +@Component +public class DatabaseSeeder { + + /** Randomly generated names. */ + public static final ImmutableList INITIAL_SINGERS = + ImmutableList.of( + new Singer("Aaliyah", "Smith"), + new Singer("Benjamin", "Jones"), + new Singer("Chloe", "Brown"), + new Singer("David", "Williams"), + new Singer("Elijah", "Johnson"), + new Singer("Emily", "Miller"), + new Singer("Gabriel", "Garcia"), + new Singer("Hannah", "Rodriguez"), + new Singer("Isabella", "Hernandez"), + new Singer("Jacob", "Perez")); + + private static final Random RANDOM = new Random(); + + private final JdbcTemplate jdbcTemplate; + + @Value("classpath:create_schema.sql") + private Resource createSchemaFile; + + @Value("classpath:drop_schema.sql") + private Resource dropSchemaFile; + + public DatabaseSeeder(JdbcTemplate jdbcTemplate) { + this.jdbcTemplate = jdbcTemplate; + } + + /** Reads a resource file into a string. */ + private static String resourceAsString(Resource resource) { + try (Reader reader = new InputStreamReader(resource.getInputStream(), UTF_8)) { + return FileCopyUtils.copyToString(reader); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** Removes all empty statements in the DDL script. */ + private String[] updateDdlStatements(String[] statements) { + // Remove any empty statements from the script. + return Arrays.stream(statements) + .filter(statement -> !statement.isBlank()) + .toArray(String[]::new); + } + + /** Creates the database schema if it does not yet exist. */ + public void createDatabaseSchemaIfNotExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(createSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Spanner will apply all + // statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Drops the database schema if it exists. */ + public void dropDatabaseSchemaIfExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(dropSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Spanner will apply all + // statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Deletes all data currently in the sample tables. */ + public void deleteTestData() { + // Delete all data in one batch. + jdbcTemplate.batchUpdate( + "delete from concerts where true", + "delete from venues where true", + "delete from tracks where true", + "delete from albums where true", + "delete from singers where true"); + } + + /** Inserts some initial test data into the database. */ + public void insertTestData() { + jdbcTemplate.batchUpdate( + "insert into singers (first_name, last_name) values (?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, INITIAL_SINGERS.get(i).getFirstName()); + preparedStatement.setString(2, INITIAL_SINGERS.get(i).getLastName()); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size(); + } + }); + + List singerIds = + jdbcTemplate.query( + "select id from singers", + resultSet -> { + ImmutableList.Builder builder = ImmutableList.builder(); + while (resultSet.next()) { + builder.add(resultSet.getLong(1)); + } + return builder.build(); + }); + jdbcTemplate.batchUpdate( + "insert into albums (title, marketing_budget, release_date, cover_picture, singer_id) values (?, ?, ?, ?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, randomTitle()); + preparedStatement.setBigDecimal(2, randomBigDecimal()); + preparedStatement.setObject(3, randomDate()); + preparedStatement.setBytes(4, randomBytes()); + preparedStatement.setLong(5, randomElement(singerIds)); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size() * 20; + } + }); + } + + /** Generates a random title for an album or a track. */ + static String randomTitle() { + return randomElement(ADJECTIVES) + " " + randomElement(NOUNS); + } + + /** Returns a random element from the given list. */ + static T randomElement(List list) { + return list.get(RANDOM.nextInt(list.size())); + } + + /** Generates a random {@link BigDecimal}. */ + BigDecimal randomBigDecimal() { + return BigDecimal.valueOf(RANDOM.nextDouble()).setScale(9, RoundingMode.HALF_UP); + } + + /** Generates a random {@link LocalDate}. */ + static LocalDate randomDate() { + return LocalDate.of(RANDOM.nextInt(200) + 1800, RANDOM.nextInt(12) + 1, RANDOM.nextInt(28) + 1); + } + + /** Generates a random byte array with a length between 4 and 1024 bytes. */ + static byte[] randomBytes() { + int size = RANDOM.nextInt(1020) + 4; + byte[] res = new byte[size]; + RANDOM.nextBytes(res); + return res; + } + + /** Some randomly generated nouns that are used to generate random titles. */ + private static final ImmutableList NOUNS = + ImmutableList.of( + "apple", + "banana", + "cherry", + "dog", + "elephant", + "fish", + "grass", + "house", + "key", + "lion", + "monkey", + "nail", + "orange", + "pen", + "queen", + "rain", + "shoe", + "tree", + "umbrella", + "van", + "whale", + "xylophone", + "zebra"); + + /** Some randomly generated adjectives that are used to generate random titles. */ + private static final ImmutableList ADJECTIVES = + ImmutableList.of( + "able", + "angelic", + "artistic", + "athletic", + "attractive", + "autumnal", + "calm", + "careful", + "cheerful", + "clever", + "colorful", + "confident", + "courageous", + "creative", + "curious", + "daring", + "determined", + "different", + "dreamy", + "efficient", + "elegant", + "energetic", + "enthusiastic", + "exciting", + "expressive", + "faithful", + "fantastic", + "funny", + "gentle", + "gifted", + "great", + "happy", + "helpful", + "honest", + "hopeful", + "imaginative", + "intelligent", + "interesting", + "inventive", + "joyful", + "kind", + "knowledgeable", + "loving", + "loyal", + "magnificent", + "mature", + "mysterious", + "natural", + "nice", + "optimistic", + "peaceful", + "perfect", + "pleasant", + "powerful", + "proud", + "quick", + "relaxed", + "reliable", + "responsible", + "romantic", + "safe", + "sensitive", + "sharp", + "simple", + "sincere", + "skillful", + "smart", + "sociable", + "strong", + "successful", + "sweet", + "talented", + "thankful", + "thoughtful", + "unique", + "upbeat", + "valuable", + "victorious", + "vivacious", + "warm", + "wealthy", + "wise", + "wonderful", + "worthy", + "youthful"); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java new file mode 100644 index 000000000000..6c6130be6928 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java @@ -0,0 +1,57 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import org.springframework.boot.context.event.ApplicationEnvironmentPreparedEvent; +import org.springframework.context.ApplicationListener; +import org.springframework.core.env.ConfigurableEnvironment; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.images.PullPolicy; +import org.testcontainers.utility.DockerImageName; + +public class EmulatorInitializer + implements ApplicationListener { + private GenericContainer emulator; + + @Override + public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) { + ConfigurableEnvironment environment = event.getEnvironment(); + boolean useEmulator = + Boolean.TRUE.equals(environment.getProperty("spanner.emulator", Boolean.class)); + boolean autoStartEmulator = + Boolean.TRUE.equals(environment.getProperty("spanner.auto_start_emulator", Boolean.class)); + if (!(useEmulator && autoStartEmulator)) { + return; + } + + emulator = + new GenericContainer<>(DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator")); + emulator.withImagePullPolicy(PullPolicy.alwaysPull()); + emulator.addExposedPort(9010); + emulator.setWaitStrategy(Wait.forListeningPorts(9010)); + emulator.start(); + + System.setProperty("spanner.endpoint", "//localhost:" + emulator.getMappedPort(9010)); + } + + public void stopEmulator() { + if (this.emulator != null) { + this.emulator.stop(); + } + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java new file mode 100644 index 000000000000..dcf64a22d6a5 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java @@ -0,0 +1,73 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; + +public abstract class AbstractEntity { + + /** This ID is generated using a (bit-reversed) identity column. */ + private Long id; + + private OffsetDateTime createdAt; + + private OffsetDateTime updatedAt; + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractEntity)) { + return false; + } + AbstractEntity other = (AbstractEntity) o; + if (this == other) { + return true; + } + return this.getClass().equals(other.getClass()) + && this.id != null + && other.id != null + && this.id.equals(other.id); + } + + @Override + public int hashCode() { + return this.id == null ? 0 : this.id.hashCode(); + } + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public OffsetDateTime getCreatedAt() { + return createdAt; + } + + protected void setCreatedAt(OffsetDateTime createdAt) { + this.createdAt = createdAt; + } + + public OffsetDateTime getUpdatedAt() { + return updatedAt; + } + + protected void setUpdatedAt(OffsetDateTime updatedAt) { + this.updatedAt = updatedAt; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java new file mode 100644 index 000000000000..9ea238506da2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java @@ -0,0 +1,84 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.math.BigDecimal; +import java.time.LocalDate; + +public class Album extends AbstractEntity { + + private String title; + + private BigDecimal marketingBudget; + + private LocalDate releaseDate; + + private byte[] coverPicture; + + private Long singerId; + + public Album() {} + + public Album(String title) { + this.title = title; + } + + @Override + public String toString() { + return getTitle(); + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public BigDecimal getMarketingBudget() { + return marketingBudget; + } + + public void setMarketingBudget(BigDecimal marketingBudget) { + this.marketingBudget = marketingBudget; + } + + public LocalDate getReleaseDate() { + return releaseDate; + } + + public void setReleaseDate(LocalDate releaseDate) { + this.releaseDate = releaseDate; + } + + public byte[] getCoverPicture() { + return coverPicture; + } + + public void setCoverPicture(byte[] coverPicture) { + this.coverPicture = coverPicture; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java new file mode 100644 index 000000000000..ac13102afd98 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java @@ -0,0 +1,78 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; + +public class Concert extends AbstractEntity { + + private Long venueId; + + private Long singerId; + + private String name; + + private OffsetDateTime startTime; + + private OffsetDateTime endTime; + + public Concert(Venue venue, Singer singer, String name) { + this.venueId = venue.getId(); + this.singerId = singer.getId(); + this.name = name; + } + + public Long getVenueId() { + return venueId; + } + + public void setVenueId(Long venueId) { + this.venueId = venueId; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public OffsetDateTime getStartTime() { + return startTime; + } + + public void setStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + } + + public OffsetDateTime getEndTime() { + return endTime; + } + + public void setEndTime(OffsetDateTime endTime) { + this.endTime = endTime; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java new file mode 100644 index 000000000000..b3f6d1c4f97d --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java @@ -0,0 +1,73 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +public class Singer extends AbstractEntity { + + private String firstName; + + private String lastName; + + /** The full name is generated by the database using a generated column. */ + private String fullName; + + private Boolean active; + + public Singer() {} + + public Singer(String firstName, String lastName) { + this.firstName = firstName; + this.lastName = lastName; + } + + @Override + public String toString() { + return getFullName(); + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + public String getLastName() { + return lastName; + } + + public void setLastName(String lastName) { + this.lastName = lastName; + } + + public String getFullName() { + return fullName; + } + + public void setFullName(String fullName) { + this.fullName = fullName; + } + + public Boolean getActive() { + return active; + } + + public void setActive(Boolean active) { + this.active = active; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java new file mode 100644 index 000000000000..8191c696c757 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java @@ -0,0 +1,66 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +/** + * The "tracks" table is interleaved in "albums". That means that the first part of the primary key + * (the "id" column) references the Album that this Track belongs to. That again means that we do + * not auto-generate the id for this entity. + */ +public class Track extends AbstractEntity { + + /** + * This is the second part of the primary key of a Track. The first part, the 'id' column is + * defined in the {@link AbstractEntity} super class. + */ + private int trackNumber; + + private String title; + + private Double sampleRate; + + public Track(Album album, int trackNumber, String title, Double sampleRate) { + setId(album.getId()); + this.trackNumber = trackNumber; + this.title = title; + this.sampleRate = sampleRate; + } + + public int getTrackNumber() { + return trackNumber; + } + + public void setTrackNumber(int trackNumber) { + this.trackNumber = trackNumber; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public Double getSampleRate() { + return sampleRate; + } + + public void setSampleRate(Double sampleRate) { + this.sampleRate = sampleRate; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java new file mode 100644 index 000000000000..f5eb4443d4a6 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java @@ -0,0 +1,43 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +public class Venue extends AbstractEntity { + private String name; + + private String description; + + public Venue(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/AlbumMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/AlbumMapper.java new file mode 100644 index 000000000000..f39b08a338dc --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/AlbumMapper.java @@ -0,0 +1,48 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Album; +import java.util.List; +import java.util.Optional; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Options; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface AlbumMapper { + + @Select("SELECT * FROM albums WHERE id = #{albumId}") + Album get(@Param("albumId") long albumId); + + @Select("SELECT * FROM albums LIMIT 1") + Optional getFirst(); + + @Select("SELECT COUNT(1) FROM albums WHERE singer_id = #{singerId}") + long countAlbumsBySingerId(@Param("singerId") long singerId); + + @Select("SELECT * FROM albums WHERE singer_id = #{singerId}") + List findAlbumsBySingerId(@Param("singerId") long singerId); + + @Insert( + "INSERT INTO albums (title, marketing_budget, release_date, cover_picture, singer_id) " + + "VALUES (#{title}, #{marketingBudget}, #{releaseDate}, #{coverPicture}, #{singerId})") + @Options(useGeneratedKeys = true, keyProperty = "id") + int insert(Album album); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/ConcertMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/ConcertMapper.java new file mode 100644 index 000000000000..1b52c603fd8d --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/ConcertMapper.java @@ -0,0 +1,29 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Venue; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface ConcertMapper { + + @Select("SELECT * FROM concerts WHERE id = #{concertId}") + Venue get(@Param("concertId") long concertId); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/SingerMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/SingerMapper.java new file mode 100644 index 000000000000..65ddb72edc2e --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/SingerMapper.java @@ -0,0 +1,73 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Singer; +import java.util.List; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Options; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; +import org.apache.ibatis.annotations.Update; + +@Mapper +public interface SingerMapper { + + @Select("SELECT * FROM singers WHERE id = #{singerId}") + Singer get(@Param("singerId") long singerId); + + @Select("SELECT * FROM singers TABLESAMPLE RESERVOIR (1 ROWS)") + Singer getRandom(); + + @Select("SELECT * FROM singers ORDER BY last_name, first_name, id") + List findAll(); + + @Select("SELECT * FROM singers WHERE starts_with(last_name, #{lastName})") + List findSingersByLastNameStartingWith(@Param("lastName") String lastName); + + /** + * Inserts a new singer record and returns both the generated primary key value and the generated + * full name. + */ + @Insert( + "INSERT INTO singers (first_name, last_name, active) " + + "VALUES (#{firstName}, #{lastName}, #{active})") + @Options(useGeneratedKeys = true, keyProperty = "id,fullName") + int insert(Singer singer); + + /** + * Executes an insert-or-update statement for a Singer record. Note that the id must have been set + * manually on the Singer entity before calling this method. The statement only returns the + * 'fullName' property, because the 'id' is already known. + */ + @Insert( + "INSERT OR UPDATE singers (id, first_name, last_name, active) " + + "VALUES (#{id}, #{firstName}, #{lastName}, #{active})") + @Options(useGeneratedKeys = true, keyProperty = "fullName") + int insertOrUpdate(Singer singer); + + /** Updates an existing singer and returns the generated full name. */ + @Update( + "UPDATE singers SET " + + "first_name=#{first_name}, " + + "last_name=#{last_name}, " + + "active=#{active} " + + "WHERE id=#{id}") + @Options(useGeneratedKeys = true, keyProperty = "fullName") + int update(Singer singer); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/TrackMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/TrackMapper.java new file mode 100644 index 000000000000..729c56fa63d2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/TrackMapper.java @@ -0,0 +1,37 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Track; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Options; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface TrackMapper { + + @Select("SELECT * FROM tracks WHERE id = #{albumId} AND track_number = #{trackNumber}") + Track get(@Param("albumId") long albumId, @Param("trackNumber") long trackNumber); + + @Insert( + "INSERT INTO tracks (id, track_number, title, sample_rate) " + + "VALUES (#{id}, #{trackNumber}, #{title}, #{sampleRate})") + @Options(useGeneratedKeys = true, keyProperty = "id") + int insert(Track track); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/VenueMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/VenueMapper.java new file mode 100644 index 000000000000..be220f867fe1 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/mappers/VenueMapper.java @@ -0,0 +1,29 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Venue; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface VenueMapper { + + @Select("SELECT * FROM venues WHERE id = #{venueId}") + Venue get(@Param("venueId") long venueId); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/service/AlbumService.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/service/AlbumService.java new file mode 100644 index 000000000000..1a7e125f09e6 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/service/AlbumService.java @@ -0,0 +1,49 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.service; + +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Track; +import com.google.cloud.spanner.sample.mappers.AlbumMapper; +import com.google.cloud.spanner.sample.mappers.TrackMapper; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +public class AlbumService { + private final AlbumMapper albumMapper; + + private final TrackMapper trackMapper; + + public AlbumService(AlbumMapper albumMapper, TrackMapper trackMapper) { + this.albumMapper = albumMapper; + this.trackMapper = trackMapper; + } + + /** Creates an album and a set of tracks in a read/write transaction. */ + @Transactional + public Album createAlbumAndTracks(Album album, Track... tracks) { + // Saving an album will update the album entity with the generated primary key. + albumMapper.insert(album); + for (Track track : tracks) { + // Set the id that was generated on the Album before saving it. + track.setId(album.getId()); + trackMapper.insert(track); + } + return album; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java new file mode 100644 index 000000000000..c56893a1b75e --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java @@ -0,0 +1,67 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.service; + +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.mappers.AlbumMapper; +import com.google.cloud.spanner.sample.mappers.SingerMapper; +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +public class SingerService { + private final SingerMapper singerRepository; + + private final AlbumMapper albumRepository; + + public SingerService(SingerMapper singerRepository, AlbumMapper albumRepository) { + this.singerRepository = singerRepository; + this.albumRepository = albumRepository; + } + + /** Creates a singer and a list of albums in a read/write transaction. */ + @Transactional + public Singer createSingerAndAlbums(Singer singer, Album... albums) { + // Saving a singer will update the singer entity with the generated primary key. + singerRepository.insert(singer); + for (Album album : albums) { + // Set the singerId that was generated on the Album before saving it. + album.setSingerId(singer.getId()); + albumRepository.insert(album); + } + return singer; + } + + /** + * Searches for all singers that have a last name starting with any of the given prefixes. This + * method uses a read-only transaction. Read-only transactions should be preferred to read/write + * transactions whenever possible, as read-only transactions do not take locks. + */ + @Transactional(readOnly = true) + public List listSingersWithLastNameStartingWith(String... prefixes) { + ImmutableList.Builder result = ImmutableList.builder(); + // This is not the most efficient way to search for this, but the main purpose of this method is + // to show how to use read-only transactions. + for (String prefix : prefixes) { + result.addAll(singerRepository.findSingersByLastNameStartingWith(prefix)); + } + return result.build(); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/application.properties b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/application.properties new file mode 100644 index 000000000000..e3eb847a68bd --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/application.properties @@ -0,0 +1,36 @@ + +# Map column names with an underscore to property names in camel case. +# E.g. column 'full_name' maps to Java property 'fullName'. +mybatis.configuration.map-underscore-to-camel-case=true + +# The sample by default uses the Spanner emulator. +# Disable this flag to run the sample on a real Spanner instance. +spanner.emulator=true + +# The sample by default starts an emulator instance in Docker. +# Disable this flag to run the sample on an Emulator instance that +# you start manually, for example if you don't have Docker installed +# on your local machine. Keep the 'spanner.emulator=true' line above +# to connect to the emulator that you have started. +spanner.auto_start_emulator=true + +# Update these properties to match your project, instance, and database. +spanner.project=my-project +spanner.instance=my-instance +spanner.database=mybatis-sample + +# Sets the isolation level that will be used by default for read/write transactions. +# Spanner supports the isolation levels SERIALIZABLE and REPEATABLE READ. +spanner.default_isolation_level=SERIALIZABLE + +spring.datasource.url=jdbc:cloudspanner:${spanner.endpoint}/projects/${spanner.project}/instances/${spanner.instance}/databases/${spanner.database};default_isolation_level=${spanner.default_isolation_level};autoConfigEmulator=${spanner.emulator};${spanner.additional_properties} +spring.datasource.driver-class-name=com.google.cloud.spanner.jdbc.JdbcDriver + + +# These properties are only used for testing. + +# This property is automatically set to point to the Spanner emulator that is automatically +# started together with the application. It remains empty if the application is executed +# against a real Spanner instance. +spanner.endpoint= +spanner.additional_properties= diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/create_schema.sql b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/create_schema.sql new file mode 100644 index 000000000000..f54ef649222a --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/create_schema.sql @@ -0,0 +1,59 @@ + +-- This script creates the database schema for this sample application. +-- The script is executed by the DatabaseSeeder class. + +CREATE TABLE IF NOT EXISTS singers ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + first_name STRING(MAX), + last_name STRING(MAX), + full_name STRING(MAX) AS (CASE WHEN first_name IS NULL THEN last_name + WHEN last_name IS NULL THEN first_name + ELSE first_name || ' ' || last_name END) STORED, + active BOOL DEFAULT (TRUE), + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), +); + +CREATE TABLE IF NOT EXISTS albums ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + title STRING(MAX) NOT NULL, + marketing_budget NUMERIC, + release_date DATE, + cover_picture BYTES(MAX), + singer_id INT64 NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + CONSTRAINT fk_albums_singers FOREIGN KEY (singer_id) REFERENCES singers (id) +); + +CREATE TABLE IF NOT EXISTS tracks ( + id INT64 NOT NULL, + track_number INT64 NOT NULL, + title STRING(MAX) NOT NULL, + sample_rate FLOAT64 NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), +) PRIMARY KEY (id, track_number), INTERLEAVE IN PARENT albums ON DELETE CASCADE +; + +CREATE TABLE IF NOT EXISTS venues ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + name STRING(MAX) NOT NULL, + description JSON NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), +); + +CREATE TABLE IF NOT EXISTS concerts ( + id INT64 NOT NULL GENERATED BY DEFAULT AS IDENTITY (BIT_REVERSED_POSITIVE) PRIMARY KEY, + venue_id INT64 NOT NULL, + singer_id INT64 NOT NULL, + name STRING(MAX) NOT NULL, + start_time TIMESTAMP NOT NULL, + end_time TIMESTAMP NOT NULL, + created_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + updated_at TIMESTAMP DEFAULT (CURRENT_TIMESTAMP), + CONSTRAINT fk_concerts_venues FOREIGN KEY (venue_id) REFERENCES venues (id), + CONSTRAINT fk_concerts_singers FOREIGN KEY (singer_id) REFERENCES singers (id), + CONSTRAINT chk_end_time_after_start_time CHECK (end_time > start_time) +); diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/drop_schema.sql b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/drop_schema.sql new file mode 100644 index 000000000000..23e7b65d3bb1 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/main/resources/drop_schema.sql @@ -0,0 +1,5 @@ +drop table if exists concerts; +drop table if exists venues; +drop table if exists tracks; +drop table if exists albums; +drop table if exists singers; diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java new file mode 100644 index 000000000000..abdfdcdef7de --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java @@ -0,0 +1,42 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static org.junit.Assume.assumeTrue; + +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.testcontainers.DockerClientFactory; + +@RunWith(JUnit4.class) +public class ApplicationEmulatorTest { + + @BeforeClass + public static void checkDocker() { + assumeTrue( + "Docker is required for this test", DockerClientFactory.instance().isDockerAvailable()); + } + + @Test + public void testRunApplicationOnEmulator() { + System.setProperty("spanner.emulator", "true"); + System.setProperty("spanner.auto_start_emulator", "true"); + Application.main(new String[] {}); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java new file mode 100644 index 000000000000..348f31281a43 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/googlesql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java @@ -0,0 +1,990 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static com.google.cloud.spanner.sample.DatabaseSeeder.INITIAL_SINGERS; +import static com.google.cloud.spanner.sample.DatabaseSeeder.randomDate; +import static com.google.cloud.spanner.sample.DatabaseSeeder.randomTitle; +import static junit.framework.TestCase.assertEquals; +import static junit.framework.TestCase.assertTrue; +import static org.junit.Assert.assertNotEquals; + +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.common.collect.Streams; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Value; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.LongStream; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ApplicationTest extends AbstractMockServerTest { + + @BeforeClass + public static void setupQueryResults() { + // Add a DDL response to the server. + addDdlResponseToSpannerAdmin(); + + // Set up results for the 'delete all test data' operations. + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("delete from concerts where true"), 0L)); + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("delete from venues where true"), 0L)); + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("delete from tracks where true"), 0L)); + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("delete from albums where true"), 0L)); + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("delete from singers where true"), 0L)); + + // Set up results for inserting test data. + for (Singer singer : INITIAL_SINGERS) { + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder("insert into singers (first_name, last_name) values (@p1, @p2)") + .bind("p1") + .to(singer.getFirstName()) + .bind("p2") + .to(singer.getLastName()) + .build(), + 1L)); + } + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("select id from singers"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addAllRows( + LongStream.rangeClosed(1L, INITIAL_SINGERS.size()) + .mapToObj( + id -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(id))) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.update( + Statement.of( + "insert into albums (title, marketing_budget, release_date, cover_picture, singer_id) values (@p1, @p2, @p3, @p4, @p5)"), + 1L)); + + // Set up results for the queries that the application runs. + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT * FROM singers ORDER BY last_name, first_name, id"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addAllRows( + Streams.mapWithIndex( + INITIAL_SINGERS.stream(), + (singer, index) -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(index + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setStringValue( + singer.getFirstName() + " " + singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getFirstName()) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of("SELECT COUNT(1) FROM albums WHERE singer_id = @p1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("10").build()) + .build()) + .build())); + for (long singerId : LongStream.rangeClosed(1L, INITIAL_SINGERS.size()).toArray()) { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder("SELECT * FROM albums WHERE singer_id = @p1") + .bind("p1") + .to(Long.reverse(singerId)) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType( + Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addAllRows( + IntStream.rangeClosed(1, 10) + .mapToObj( + albumId -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(albumId * singerId))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(singerId)))) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(randomDate().toString()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + } + int singerIndex = ThreadLocalRandom.current().nextInt(INITIAL_SINGERS.size()); + Singer randomSinger = INITIAL_SINGERS.get(singerIndex); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT * FROM singers TABLESAMPLE RESERVOIR (1 ROWS)"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(singerIndex + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder().setStringValue(randomSinger.getLastName()).build()) + .addValues( + Value.newBuilder() + .setStringValue( + randomSinger.getFirstName() + " " + randomSinger.getLastName()) + .build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomSinger.getFirstName()).build()) + .build()) + .build())); + + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder( + "INSERT INTO singers (first_name, last_name, active) VALUES (@p1, @p2, @p3)\n" + + "THEN RETURN *") + .bind("p1") + .to("Amethyst") + .bind("p2") + .to("Jiang") + .bind("p3") + .to((com.google.cloud.spanner.Value) null) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(INITIAL_SINGERS.size() + 2))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues(Value.newBuilder().setStringValue("Amethyst").build()) + .addValues(Value.newBuilder().setStringValue("Amethyst Jiang").build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setStringValue("Jiang").build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT INTO albums (title, marketing_budget, release_date, cover_picture, singer_id) VALUES (@p1, @p2, @p3, @p4, @p5)\n" + + "THEN RETURN *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setStringValue(String.valueOf(1L))) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomDate().toString()).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT INTO tracks (id, track_number, title, sample_rate) VALUES (@p1, @p2, @p3, @p4)\n" + + "THEN RETURN *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("track_number") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("sample_rate") + .setType( + Type.newBuilder().setCode(TypeCode.FLOAT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue("1").build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setNumberValue(3.14d)) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("select * from albums limit 1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setStringValue(String.valueOf(1L))) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomDate().toString()).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + for (String prefix : new String[] {"J", "A", "B", "C"}) { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder("SELECT * FROM singers WHERE starts_with(last_name, @p1)") + .bind("p1") + .to(prefix) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addAllRows( + Streams.mapWithIndex( + INITIAL_SINGERS.stream() + .filter( + singer -> + singer.getLastName().startsWith(prefix.substring(0, 1))), + (singer, index) -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(index + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setStringValue( + singer.getFirstName() + + " " + + singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getFirstName()) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT OR UPDATE singers (id, first_name, last_name, active) VALUES (@p1, @p2, @p3, @p4)\n" + + "THEN RETURN *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(ThreadLocalRandom.current().nextLong())) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues(Value.newBuilder().setStringValue("Russel").build()) + .addValues(Value.newBuilder().setStringValue("Beatriz Russel").build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setStringValue("Beatriz").build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + } + } + + @Test + public void testRunApplication() { + System.setProperty("spanner.emulator", "false"); + System.setProperty("spanner.endpoint", "//localhost:" + getPort()); + System.setProperty("spanner.additional_properties", "usePlainText=true"); + Application.main(new String[] {}); + + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(6, mockSpanner.countRequestsOfType(CommitRequest.class)); + + // Verify that the service methods use transactions. + String insertSingerSql = + "INSERT INTO singers (first_name, last_name, active) VALUES (@p1, @p2, @p3)\nTHEN RETURN *"; + assertEquals( + 1, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertSingerSql)) + .count()); + ExecuteSqlRequest insertSingerRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertSingerSql)) + .findFirst() + .orElseThrow(); + assertTrue(insertSingerRequest.hasTransaction()); + assertTrue(insertSingerRequest.getTransaction().hasBegin()); + assertTrue(insertSingerRequest.getTransaction().getBegin().hasReadWrite()); + String insertAlbumSql = + "INSERT INTO albums (title, marketing_budget, release_date, cover_picture, singer_id) " + + "VALUES (@p1, @p2, @p3, @p4, @p5)\nTHEN RETURN *"; + assertEquals( + 4, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertAlbumSql)) + .count()); + // The first 3 requests belong to the transaction that is executed together with the 'INSERT + // INTO singers' statement. + List insertAlbumRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertAlbumSql)) + .toList() + .subList(0, 3); + ExecuteSqlRequest firstInsertAlbumRequest = insertAlbumRequests.get(0); + for (ExecuteSqlRequest request : insertAlbumRequests) { + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasId()); + assertEquals( + firstInsertAlbumRequest.getTransaction().getId(), request.getTransaction().getId()); + } + // Verify that the transaction is committed. + assertEquals( + 1, + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request + .getTransactionId() + .equals(firstInsertAlbumRequest.getTransaction().getId())) + .count()); + + // The last 'INSERT INTO albums' request belong in a transaction with 8 'INSERT INTO tracks' + // requests. + ExecuteSqlRequest lastInsertAlbumRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertAlbumSql)) + .toList() + .get(3); + assertNotEquals( + lastInsertAlbumRequest.getTransaction().getId(), + firstInsertAlbumRequest.getTransaction().getId()); + assertTrue(lastInsertAlbumRequest.hasTransaction()); + assertTrue(lastInsertAlbumRequest.getTransaction().hasBegin()); + assertTrue(lastInsertAlbumRequest.getTransaction().getBegin().hasReadWrite()); + String insertTrackSql = + "INSERT INTO tracks (id, track_number, title, sample_rate) " + + "VALUES (@p1, @p2, @p3, @p4)\nTHEN RETURN *"; + assertEquals( + 7, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertTrackSql)) + .count()); + List insertTrackRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertTrackSql)) + .toList(); + for (ExecuteSqlRequest request : insertTrackRequests) { + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasId()); + assertEquals( + insertTrackRequests.get(0).getTransaction().getId(), request.getTransaction().getId()); + } + // Verify that the transaction is committed. + assertEquals( + 1, + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request + .getTransactionId() + .equals(insertTrackRequests.get(0).getTransaction().getId())) + .count()); + + // Verify that the SingerService#listSingersWithLastNameStartingWith(..) method uses a read-only + // transaction. + assertEquals( + 1, + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).stream() + .filter(request -> request.getOptions().hasReadOnly()) + .count()); + String selectSingersSql = "SELECT * FROM singers WHERE starts_with(last_name, @p1)"; + assertEquals( + 4, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(selectSingersSql)) + .count()); + List selectSingersRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(selectSingersSql)) + .toList() + .subList(1, 4); + ExecuteSqlRequest firstSelectSingersRequest = selectSingersRequests.get(0); + for (ExecuteSqlRequest request : selectSingersRequests) { + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasId()); + } + // Verify that the read-only transaction is not committed. + assertEquals( + 0, + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request + .getTransactionId() + .equals(firstSelectSingersRequest.getTransaction().getId())) + .count()); + } + + private static void addDdlResponseToSpannerAdmin() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/README.md b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/README.md new file mode 100644 index 000000000000..61dc46f48eeb --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/README.md @@ -0,0 +1,100 @@ +# Spring Data MyBatis Sample Application with Cloud Spanner PostgreSQL + +This sample application shows how to develop portable applications using Spring Data MyBatis in +combination with Cloud Spanner PostgreSQL. This application can be configured to run on either a +[Cloud Spanner PostgreSQL](https://cloud.google.com/spanner/docs/postgresql-interface) database or +an open-source PostgreSQL database. The only change that is needed to switch between the two is +changing the active Spring profile that is used by the application. + +The application uses the Cloud Spanner JDBC driver to connect to Cloud Spanner PostgreSQL, and it +uses the PostgreSQL JDBC driver to connect to open-source PostgreSQL. Spring Data MyBatis works with +both drivers and offers a single consistent API to the application developer, regardless of the +actual database or JDBC driver being used. + +This sample shows: + +1. How to use Spring Data MyBatis with Cloud Spanner PostgreSQL. +2. How to develop a portable application that runs on both Google Cloud Spanner PostgreSQL and + open-source PostgreSQL with the same code base. +3. How to use bit-reversed sequences to automatically generate primary key values for entities. +4. How to use the Spanner Emulator for development in combination with Spring Data. + +__NOTE__: This application does __not require PGAdapter__. Instead, it connects to Cloud Spanner +PostgreSQL using the Cloud Spanner JDBC driver. + +## Cloud Spanner PostgreSQL + +Cloud Spanner PostgreSQL provides language support by expressing Spanner database functionality +through a subset of open-source PostgreSQL language constructs, with extensions added to support +Spanner functionality like interleaved tables and hinting. + +The PostgreSQL interface makes the capabilities of Spanner —__fully managed, unlimited scale, strong +consistency, high performance, and up to 99.999% global availability__— accessible using the +PostgreSQL dialect. Unlike other services that manage actual PostgreSQL database instances, Spanner +uses PostgreSQL-compatible syntax to expose its existing scale-out capabilities. This provides +familiarity for developers and portability for applications, but not 100% PostgreSQL compatibility. +The SQL syntax that Spanner supports is semantically equivalent PostgreSQL, meaning schemas +and queries written against the PostgreSQL interface can be easily ported to another PostgreSQL +environment. + +This sample showcases this portability with an application that works on both Cloud Spanner PostgreSQL +and open-source PostgreSQL with the same code base. + +## MyBatis Spring +[MyBatis Spring](http://mybatis.org/spring/) integrates MyBatis with the popular Java Spring +framework. This allows MyBatis to participate in Spring transactions and to automatically inject +MyBatis mappers into other beans. + +## Sample Application + +This sample shows how to create a portable application using Spring Data MyBatis and the Cloud Spanner +PostgreSQL dialect. The application works on both Cloud Spanner PostgreSQL and open-source +PostgreSQL. You can switch between the two by changing the active Spring profile: +* Profile `cs` runs the application on Cloud Spanner PostgreSQL. +* Profile `pg` runs the application on open-source PostgreSQL. + +The default profile is `cs`. You can change the default profile by modifying the +[application.properties](src/main/resources/application.properties) file. + +### Running the Application + +1. Choose the database system that you want to use by choosing a profile. The default profile is + `cs`, which runs the application on Cloud Spanner PostgreSQL. +2. The sample by default starts an instance of the Spanner Emulator together with the application and + runs the application against the emulator. +3. Modify the default profile in the [application.properties](src/main/resources/application.properties) + file to run the sample on an open-source PostgreSQL database. +4. Modify either [application-cs.properties](src/main/resources/application-cs.properties) or + [application-pg.properties](src/main/resources/application-pg.properties) to point to an existing + database. If you use Cloud Spanner, the database that the configuration file references must be a + database that uses the PostgreSQL dialect. +5. Run the application with `mvn spring-boot:run`. + +### Main Application Components + +The main application components are: +* [DatabaseSeeder.java](src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java): This + class is responsible for creating the database schema and inserting some initial test data. The + schema is created from the [create_schema.sql](src/main/resources/create_schema.sql) file. The + `DatabaseSeeder` class loads this file into memory and executes it on the active database using + standard JDBC APIs. The class also removes Cloud Spanner-specific extensions to the PostgreSQL + dialect when the application runs on open-source PostgreSQL. +* [JdbcConfiguration.java](src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java): + This utility class is used to determine whether the application is running on Cloud Spanner + PostgreSQL or open-source PostgreSQL. This can be used if you have specific features that should + only be executed on one of the two systems. +* [EmulatorInitializer.java](src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java): + This ApplicationListener automatically starts the Spanner emulator as a Docker container if the + sample has been configured to run on the emulator. +* [AbstractEntity.java](src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java): + This is the shared base class for all entities in this sample application. It defines a number of + standard attributes, such as the identifier (primary key). The primary key is automatically + generated using a (bit-reversed) sequence. [Bit-reversed sequential values](https://cloud.google.com/spanner/docs/schema-design#bit_reverse_primary_key) + are considered a good choice for primary keys on Cloud Spanner. +* [Application.java](src/main/java/com/google/cloud/spanner/sample/Application.java): The starter + class of the application. It contains a command-line runner that executes a selection of queries + and updates on the database. +* [SingerService](src/main/java/com/google/cloud/spanner/sample/service/SingerService.java) and + [AlbumService](src/main/java/com/google/cloud/spanner/sample/service/SingerService.java) are + standard Spring service beans that contain business logic that can be executed as transactions. + This includes both read/write and read-only transactions. diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/pom.xml b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/pom.xml new file mode 100644 index 000000000000..5836f36ecdf1 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/pom.xml @@ -0,0 +1,140 @@ + + + 4.0.0 + + org.example + cloud-spanner-spring-data-mybatis-postgresql-example + 1.0-SNAPSHOT + + Sample application showing how to use Spring Data MyBatis with Cloud Spanner PostgreSQL. + + + org.springframework.boot + spring-boot-starter-parent + 3.5.11 + + + + 17 + 17 + 17 + UTF-8 + + + + + + org.springframework.data + spring-data-bom + 2025.0.5 + import + pom + + + com.google.cloud + libraries-bom + 26.76.0 + import + pom + + + org.testcontainers + testcontainers-bom + 2.0.3 + import + pom + + + + + + + org.mybatis.spring.boot + mybatis-spring-boot-starter + 3.0.5 + + + org.mybatis.dynamic-sql + mybatis-dynamic-sql + 1.5.2 + + + + + com.google.cloud + google-cloud-spanner-jdbc + + + com.google.api.grpc + proto-google-cloud-spanner-executor-v1 + + + + + org.postgresql + postgresql + 42.7.10 + + + org.testcontainers + testcontainers + + + + com.google.collections + google-collections + 1.0 + + + + + com.google.cloud + google-cloud-spanner + test-jar + test + + + com.google.api + gax-grpc + testlib + test + + + + net.bytebuddy + byte-buddy + 1.18.5 + test + + + net.bytebuddy + byte-buddy-agent + 1.18.5 + test + + + junit + junit + 4.13.2 + + + + + + + com.spotify.fmt + fmt-maven-plugin + 2.29 + + + + format + + + + + + + diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/Application.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/Application.java new file mode 100644 index 000000000000..22d1ba2b2e19 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/Application.java @@ -0,0 +1,160 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.entities.Track; +import com.google.cloud.spanner.sample.mappers.AlbumMapper; +import com.google.cloud.spanner.sample.mappers.SingerMapper; +import com.google.cloud.spanner.sample.service.AlbumService; +import com.google.cloud.spanner.sample.service.SingerService; +import java.util.concurrent.ThreadLocalRandom; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.CommandLineRunner; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class Application implements CommandLineRunner { + private static final Logger logger = LoggerFactory.getLogger(Application.class); + + public static void main(String[] args) { + EmulatorInitializer emulatorInitializer = new EmulatorInitializer(); + try { + SpringApplication application = new SpringApplication(Application.class); + application.addListeners(emulatorInitializer); + application.run(args).close(); + } finally { + SpannerPool.closeSpannerPool(); + emulatorInitializer.stopEmulator(); + } + } + + private final DatabaseSeeder databaseSeeder; + + private final SingerService singerService; + + private final AlbumService albumService; + + private final SingerMapper singerMapper; + + private final AlbumMapper albumMapper; + + public Application( + SingerService singerService, + AlbumService albumService, + DatabaseSeeder databaseSeeder, + SingerMapper singerMapper, + AlbumMapper albumMapper) { + this.databaseSeeder = databaseSeeder; + this.singerService = singerService; + this.albumService = albumService; + this.singerMapper = singerMapper; + this.albumMapper = albumMapper; + } + + @Override + public void run(String... args) { + + // Set the system property 'drop_schema' to true to drop any existing database + // schema when the application is executed. + if (Boolean.parseBoolean(System.getProperty("drop_schema", "false"))) { + logger.info("Dropping existing schema if it exists"); + databaseSeeder.dropDatabaseSchemaIfExists(); + } + + logger.info("Creating database schema if it does not already exist"); + databaseSeeder.createDatabaseSchemaIfNotExists(); + logger.info("Deleting existing test data"); + databaseSeeder.deleteTestData(); + logger.info("Inserting fresh test data"); + databaseSeeder.insertTestData(); + + Iterable allSingers = singerMapper.findAll(); + for (Singer singer : allSingers) { + logger.info( + "Found singer: {} with {} albums", + singer, + albumMapper.countAlbumsBySingerId(singer.getId())); + for (Album album : albumMapper.findAlbumsBySingerId(singer.getId())) { + logger.info("\tAlbum: {}, released at {}", album, album.getReleaseDate()); + } + } + + // Create a new singer and three albums in a transaction. + Singer insertedSinger = + singerService.createSingerAndAlbums( + new Singer("Amethyst", "Jiang"), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle()), + new Album(DatabaseSeeder.randomTitle())); + logger.info( + "Inserted singer {} {} {}", + insertedSinger.getId(), + insertedSinger.getFirstName(), + insertedSinger.getLastName()); + + // Create a new Album and some Tracks in a read/write transaction. + // Track is an interleaved table. + Album album = new Album(DatabaseSeeder.randomTitle()); + album.setSingerId(insertedSinger.getId()); + albumService.createAlbumAndTracks( + album, + new Track(album, 1, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 2, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 3, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 4, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 5, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 6, DatabaseSeeder.randomTitle(), 3.14d), + new Track(album, 7, DatabaseSeeder.randomTitle(), 3.14d)); + logger.info("Inserted album {}", album.getTitle()); + + // List all singers that have a last name starting with an 'J'. + logger.info("All singers with a last name starting with an 'J':"); + for (Singer singer : singerMapper.findSingersByLastNameStartingWith("J")) { + logger.info("\t{}", singer.getFullName()); + } + + // The singerService.listSingersWithLastNameStartingWith(..) method uses a read-only + // transaction. You should prefer read-only transactions to read/write transactions whenever + // possible, as read-only transactions do not take locks. + logger.info("All singers with a last name starting with an 'A', 'B', or 'C'."); + for (Singer singer : singerService.listSingersWithLastNameStartingWith("A", "B", "C")) { + logger.info("\t{}", singer.getFullName()); + } + + // Execute an insert-or-update for a Singer record. + // For this, we either get a random Singer from the database, or create a new Singer entity + // and assign it a random ID. + logger.info("Executing an insert-or-update statement for a Singer record"); + Singer singer; + if (ThreadLocalRandom.current().nextBoolean()) { + singer = singerMapper.getRandom(); + } else { + singer = new Singer(); + singer.setId(ThreadLocalRandom.current().nextLong()); + } + singer.setFirstName("Beatriz"); + singer.setLastName("Russel"); + singer.setActive(true); + // This executes an INSERT ... ON CONFLICT DO UPDATE statement. + singerMapper.insertOrUpdate(singer); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java new file mode 100644 index 000000000000..eabd04c3b1ba --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/DatabaseSeeder.java @@ -0,0 +1,342 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.common.base.Suppliers; +import com.google.common.collect.ImmutableList; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Reader; +import java.io.UncheckedIOException; +import java.math.BigDecimal; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.time.LocalDate; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.function.Supplier; +import javax.annotation.Nonnull; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.core.io.Resource; +import org.springframework.jdbc.core.BatchPreparedStatementSetter; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; +import org.springframework.util.FileCopyUtils; + +/** This component creates the database schema and seeds it with some random test data. */ +@Component +public class DatabaseSeeder { + + /** Randomly generated names. */ + public static final ImmutableList INITIAL_SINGERS = + ImmutableList.of( + new Singer("Aaliyah", "Smith"), + new Singer("Benjamin", "Jones"), + new Singer("Chloe", "Brown"), + new Singer("David", "Williams"), + new Singer("Elijah", "Johnson"), + new Singer("Emily", "Miller"), + new Singer("Gabriel", "Garcia"), + new Singer("Hannah", "Rodriguez"), + new Singer("Isabella", "Hernandez"), + new Singer("Jacob", "Perez")); + + private static final Random RANDOM = new Random(); + + private final JdbcTemplate jdbcTemplate; + + @Value("classpath:create_schema.sql") + private Resource createSchemaFile; + + @Value("classpath:drop_schema.sql") + private Resource dropSchemaFile; + + /** This value is determined once using a system query, and then cached. */ + private final Supplier isCloudSpannerPG; + + public DatabaseSeeder(JdbcTemplate jdbcTemplate) { + this.jdbcTemplate = jdbcTemplate; + this.isCloudSpannerPG = + Suppliers.memoize(() -> JdbcConfiguration.isCloudSpannerPG(jdbcTemplate)); + } + + /** Reads a resource file into a string. */ + private static String resourceAsString(Resource resource) { + try (Reader reader = new InputStreamReader(resource.getInputStream(), UTF_8)) { + return FileCopyUtils.copyToString(reader); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Returns true if this application is currently running on a Cloud Spanner PostgreSQL database, + * and false if it is running on an open-source PostgreSQL database. + */ + private boolean isCloudSpanner() { + return isCloudSpannerPG.get(); + } + + /** + * Removes all statements that start with a 'skip_on_open_source_pg' comment if the application is + * running on open-source PostgreSQL. This ensures that we can use the same DDL script both on + * Cloud Spanner and on open-source PostgreSQL. It also removes any empty statements in the given + * array. + */ + private String[] updateDdlStatements(String[] statements) { + if (!isCloudSpanner()) { + for (int i = 0; i < statements.length; i++) { + // Replace any line that starts with '/* skip_on_open_source_pg */' with an empty string. + statements[i] = + statements[i].replaceAll("(?m)^\\s*/\\*\\s*skip_on_open_source_pg\\s*\\*/.+$", ""); + } + } + // Remove any empty statements from the script. + return Arrays.stream(statements) + .filter(statement -> !statement.isBlank()) + .toArray(String[]::new); + } + + /** Creates the database schema if it does not yet exist. */ + public void createDatabaseSchemaIfNotExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(createSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Cloud Spanner will apply + // all statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Drops the database schema if it exists. */ + public void dropDatabaseSchemaIfExists() { + // We can safely just split the script based on ';', as we know that there are no literals or + // other strings that contain semicolons in the script. + String[] statements = updateDdlStatements(resourceAsString(dropSchemaFile).split(";")); + // Execute all the DDL statements as a JDBC batch. That ensures that Cloud Spanner will apply + // all statements in a single DDL batch, which again is a lot more efficient than executing them + // one-by-one. + jdbcTemplate.batchUpdate(statements); + } + + /** Deletes all data currently in the sample tables. */ + public void deleteTestData() { + // Delete all data in one batch. + jdbcTemplate.batchUpdate( + "delete from concerts", + "delete from venues", + "delete from tracks", + "delete from albums", + "delete from singers"); + } + + /** Inserts some initial test data into the database. */ + public void insertTestData() { + jdbcTemplate.batchUpdate( + "insert into singers (first_name, last_name) values (?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, INITIAL_SINGERS.get(i).getFirstName()); + preparedStatement.setString(2, INITIAL_SINGERS.get(i).getLastName()); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size(); + } + }); + + List singerIds = + jdbcTemplate.query( + "select id from singers", + resultSet -> { + ImmutableList.Builder builder = ImmutableList.builder(); + while (resultSet.next()) { + builder.add(resultSet.getLong(1)); + } + return builder.build(); + }); + jdbcTemplate.batchUpdate( + "insert into albums (title, marketing_budget, release_date, cover_picture, singer_id) values (?, ?, ?, ?, ?)", + new BatchPreparedStatementSetter() { + @Override + public void setValues(@Nonnull PreparedStatement preparedStatement, int i) + throws SQLException { + preparedStatement.setString(1, randomTitle()); + preparedStatement.setBigDecimal(2, randomBigDecimal()); + preparedStatement.setObject(3, randomDate()); + preparedStatement.setBytes(4, randomBytes()); + preparedStatement.setLong(5, randomElement(singerIds)); + } + + @Override + public int getBatchSize() { + return INITIAL_SINGERS.size() * 20; + } + }); + } + + /** Generates a random title for an album or a track. */ + static String randomTitle() { + return randomElement(ADJECTIVES) + " " + randomElement(NOUNS); + } + + /** Returns a random element from the given list. */ + static T randomElement(List list) { + return list.get(RANDOM.nextInt(list.size())); + } + + /** Generates a random {@link BigDecimal}. */ + BigDecimal randomBigDecimal() { + return BigDecimal.valueOf(RANDOM.nextDouble()); + } + + /** Generates a random {@link LocalDate}. */ + static LocalDate randomDate() { + return LocalDate.of(RANDOM.nextInt(200) + 1800, RANDOM.nextInt(12) + 1, RANDOM.nextInt(28) + 1); + } + + /** Generates a random byte array with a length between 4 and 1024 bytes. */ + static byte[] randomBytes() { + int size = RANDOM.nextInt(1020) + 4; + byte[] res = new byte[size]; + RANDOM.nextBytes(res); + return res; + } + + /** Some randomly generated nouns that are used to generate random titles. */ + private static final ImmutableList NOUNS = + ImmutableList.of( + "apple", + "banana", + "cherry", + "dog", + "elephant", + "fish", + "grass", + "house", + "key", + "lion", + "monkey", + "nail", + "orange", + "pen", + "queen", + "rain", + "shoe", + "tree", + "umbrella", + "van", + "whale", + "xylophone", + "zebra"); + + /** Some randomly generated adjectives that are used to generate random titles. */ + private static final ImmutableList ADJECTIVES = + ImmutableList.of( + "able", + "angelic", + "artistic", + "athletic", + "attractive", + "autumnal", + "calm", + "careful", + "cheerful", + "clever", + "colorful", + "confident", + "courageous", + "creative", + "curious", + "daring", + "determined", + "different", + "dreamy", + "efficient", + "elegant", + "energetic", + "enthusiastic", + "exciting", + "expressive", + "faithful", + "fantastic", + "funny", + "gentle", + "gifted", + "great", + "happy", + "helpful", + "honest", + "hopeful", + "imaginative", + "intelligent", + "interesting", + "inventive", + "joyful", + "kind", + "knowledgeable", + "loving", + "loyal", + "magnificent", + "mature", + "mysterious", + "natural", + "nice", + "optimistic", + "peaceful", + "perfect", + "pleasant", + "powerful", + "proud", + "quick", + "relaxed", + "reliable", + "responsible", + "romantic", + "safe", + "sensitive", + "sharp", + "simple", + "sincere", + "skillful", + "smart", + "sociable", + "strong", + "successful", + "sweet", + "talented", + "thankful", + "thoughtful", + "unique", + "upbeat", + "valuable", + "victorious", + "vivacious", + "warm", + "wealthy", + "wise", + "wonderful", + "worthy", + "youthful"); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java new file mode 100644 index 000000000000..ba2c57eb5d24 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/EmulatorInitializer.java @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import org.springframework.boot.context.event.ApplicationEnvironmentPreparedEvent; +import org.springframework.context.ApplicationListener; +import org.springframework.core.env.ConfigurableEnvironment; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.images.PullPolicy; +import org.testcontainers.utility.DockerImageName; + +public class EmulatorInitializer + implements ApplicationListener { + private GenericContainer emulator; + + @Override + public void onApplicationEvent(ApplicationEnvironmentPreparedEvent event) { + ConfigurableEnvironment environment = event.getEnvironment(); + boolean useEmulator = + Boolean.TRUE.equals(environment.getProperty("spanner.emulator", Boolean.class)); + if (!useEmulator) { + return; + } + + emulator = + new GenericContainer<>(DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator")); + emulator.withImagePullPolicy(PullPolicy.alwaysPull()); + emulator.addExposedPort(9010); + emulator.setWaitStrategy(Wait.forListeningPorts(9010)); + emulator.start(); + + System.setProperty("spanner.endpoint", "//localhost:" + emulator.getMappedPort(9010)); + } + + public void stopEmulator() { + if (this.emulator != null) { + this.emulator.stop(); + } + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java new file mode 100644 index 000000000000..7480d80c0cbb --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/JdbcConfiguration.java @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import com.google.cloud.spanner.jdbc.CloudSpannerJdbcConnection; +import org.springframework.context.annotation.Configuration; +import org.springframework.jdbc.core.ConnectionCallback; +import org.springframework.jdbc.core.JdbcOperations; + +@Configuration +public class JdbcConfiguration { + + /** Returns true if the current database is a Cloud Spanner PostgreSQL database. */ + public static boolean isCloudSpannerPG(JdbcOperations operations) { + return Boolean.TRUE.equals( + operations.execute( + (ConnectionCallback) + connection -> + connection.isWrapperFor(CloudSpannerJdbcConnection.class) + && com.google.cloud.spanner.Dialect.POSTGRESQL.equals( + connection.unwrap(CloudSpannerJdbcConnection.class).getDialect()))); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java new file mode 100644 index 000000000000..2dbffd0e76b5 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/AbstractEntity.java @@ -0,0 +1,73 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; + +public abstract class AbstractEntity { + + /** This ID is generated using a (bit-reversed) sequence. */ + private Long id; + + private OffsetDateTime createdAt; + + private OffsetDateTime updatedAt; + + @Override + public boolean equals(Object o) { + if (!(o instanceof AbstractEntity)) { + return false; + } + AbstractEntity other = (AbstractEntity) o; + if (this == other) { + return true; + } + return this.getClass().equals(other.getClass()) + && this.id != null + && other.id != null + && this.id.equals(other.id); + } + + @Override + public int hashCode() { + return this.id == null ? 0 : this.id.hashCode(); + } + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public OffsetDateTime getCreatedAt() { + return createdAt; + } + + protected void setCreatedAt(OffsetDateTime createdAt) { + this.createdAt = createdAt; + } + + public OffsetDateTime getUpdatedAt() { + return updatedAt; + } + + protected void setUpdatedAt(OffsetDateTime updatedAt) { + this.updatedAt = updatedAt; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java new file mode 100644 index 000000000000..57df330bfca2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Album.java @@ -0,0 +1,84 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.math.BigDecimal; +import java.time.LocalDate; + +public class Album extends AbstractEntity { + + private String title; + + private BigDecimal marketingBudget; + + private LocalDate releaseDate; + + private byte[] coverPicture; + + private Long singerId; + + public Album() {} + + public Album(String title) { + this.title = title; + } + + @Override + public String toString() { + return getTitle(); + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public BigDecimal getMarketingBudget() { + return marketingBudget; + } + + public void setMarketingBudget(BigDecimal marketingBudget) { + this.marketingBudget = marketingBudget; + } + + public LocalDate getReleaseDate() { + return releaseDate; + } + + public void setReleaseDate(LocalDate releaseDate) { + this.releaseDate = releaseDate; + } + + public byte[] getCoverPicture() { + return coverPicture; + } + + public void setCoverPicture(byte[] coverPicture) { + this.coverPicture = coverPicture; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java new file mode 100644 index 000000000000..fa219b16dfe8 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Concert.java @@ -0,0 +1,78 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +import java.time.OffsetDateTime; + +public class Concert extends AbstractEntity { + + private Long venueId; + + private Long singerId; + + private String name; + + private OffsetDateTime startTime; + + private OffsetDateTime endTime; + + public Concert(Venue venue, Singer singer, String name) { + this.venueId = venue.getId(); + this.singerId = singer.getId(); + this.name = name; + } + + public Long getVenueId() { + return venueId; + } + + public void setVenueId(Long venueId) { + this.venueId = venueId; + } + + public Long getSingerId() { + return singerId; + } + + public void setSingerId(Long singerId) { + this.singerId = singerId; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public OffsetDateTime getStartTime() { + return startTime; + } + + public void setStartTime(OffsetDateTime startTime) { + this.startTime = startTime; + } + + public OffsetDateTime getEndTime() { + return endTime; + } + + public void setEndTime(OffsetDateTime endTime) { + this.endTime = endTime; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java new file mode 100644 index 000000000000..90dc0e1e0f53 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Singer.java @@ -0,0 +1,73 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +public class Singer extends AbstractEntity { + + private String firstName; + + private String lastName; + + /** The full name is generated by the database using a generated column. */ + private String fullName; + + private Boolean active; + + public Singer() {} + + public Singer(String firstName, String lastName) { + this.firstName = firstName; + this.lastName = lastName; + } + + @Override + public String toString() { + return getFullName(); + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + public String getLastName() { + return lastName; + } + + public void setLastName(String lastName) { + this.lastName = lastName; + } + + public String getFullName() { + return fullName; + } + + public void setFullName(String fullName) { + this.fullName = fullName; + } + + public Boolean getActive() { + return active; + } + + public void setActive(Boolean active) { + this.active = active; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java new file mode 100644 index 000000000000..51fb756c90fc --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Track.java @@ -0,0 +1,66 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +/** + * The "tracks" table is interleaved in "albums". That means that the first part of the primary key + * (the "id" column) references the Album that this Track belongs to. That again means that we do + * not auto-generate the id for this entity. + */ +public class Track extends AbstractEntity { + + /** + * This is the second part of the primary key of a Track. The first part, the 'id' column is + * defined in the {@link AbstractEntity} super class. + */ + private int trackNumber; + + private String title; + + private Double sampleRate; + + public Track(Album album, int trackNumber, String title, Double sampleRate) { + setId(album.getId()); + this.trackNumber = trackNumber; + this.title = title; + this.sampleRate = sampleRate; + } + + public int getTrackNumber() { + return trackNumber; + } + + public void setTrackNumber(int trackNumber) { + this.trackNumber = trackNumber; + } + + public String getTitle() { + return title; + } + + public void setTitle(String title) { + this.title = title; + } + + public Double getSampleRate() { + return sampleRate; + } + + public void setSampleRate(Double sampleRate) { + this.sampleRate = sampleRate; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java new file mode 100644 index 000000000000..ff7ee5049a5d --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/entities/Venue.java @@ -0,0 +1,43 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.entities; + +public class Venue extends AbstractEntity { + private String name; + + private String description; + + public Venue(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/AlbumMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/AlbumMapper.java new file mode 100644 index 000000000000..85a05f28e652 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/AlbumMapper.java @@ -0,0 +1,48 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Album; +import java.util.List; +import java.util.Optional; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Options; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface AlbumMapper { + + @Select("SELECT * FROM albums WHERE id = #{albumId}") + Album get(@Param("albumId") long albumId); + + @Select("SELECT * FROM albums LIMIT 1") + Optional getFirst(); + + @Select("SELECT COUNT(1) FROM albums WHERE singer_id = #{singerId}") + long countAlbumsBySingerId(@Param("singerId") long singerId); + + @Select("SELECT * FROM albums WHERE singer_id = #{singerId}") + List findAlbumsBySingerId(@Param("singerId") long singerId); + + @Insert( + "INSERT INTO albums (title, marketing_budget, release_date, cover_picture, singer_id) " + + "VALUES (#{title}, #{marketingBudget}, #{releaseDate}, #{coverPicture}, #{singerId})") + @Options(useGeneratedKeys = true, keyProperty = "id") + int insert(Album album); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/ConcertMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/ConcertMapper.java new file mode 100644 index 000000000000..d268b4327fcd --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/ConcertMapper.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Venue; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface ConcertMapper { + + @Select("SELECT * FROM concerts WHERE id = #{concertId}") + Venue get(@Param("concertId") long concertId); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/SingerMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/SingerMapper.java new file mode 100644 index 000000000000..e7a0c5f98f99 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/SingerMapper.java @@ -0,0 +1,80 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Singer; +import java.util.List; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Options; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; +import org.apache.ibatis.annotations.Update; + +@Mapper +public interface SingerMapper { + + @Select("SELECT * FROM singers WHERE id = #{singerId}") + Singer get(@Param("singerId") long singerId); + + @Select("SELECT * FROM singers ORDER BY sha256(last_name::bytea) LIMIT 1") + Singer getRandom(); + + @Select("SELECT * FROM singers ORDER BY last_name, first_name, id") + List findAll(); + + @Select("SELECT * FROM singers WHERE starts_with(last_name, #{lastName})") + List findSingersByLastNameStartingWith(@Param("lastName") String lastName); + + /** + * Inserts a new singer record and returns both the generated primary key value and the generated + * full name. + */ + @Insert( + "INSERT INTO singers (first_name, last_name, active) " + + "VALUES (#{firstName}, #{lastName}, #{active})") + @Options(useGeneratedKeys = true, keyProperty = "id,fullName") + int insert(Singer singer); + + /** + * Executes an insert-or-update statement for a Singer record. Note that the id must have been set + * manually on the Singer entity before calling this method, and that Spanner requires that all + * columns for the INSERT statement must also be included in the UPDATE statement, including the + * 'id' column. The statement only returns the 'fullName' property, because the 'id' is already + * known. + */ + @Insert( + "INSERT INTO singers (id, first_name, last_name, active) " + + "VALUES (#{id}, #{firstName}, #{lastName}, #{active}) " + + "ON CONFLICT (id) DO UPDATE SET " + + "id=excluded.id, " + + "first_name=excluded.first_name, " + + "last_name=excluded.last_name, " + + "active=excluded.active") + @Options(useGeneratedKeys = true, keyProperty = "fullName") + int insertOrUpdate(Singer singer); + + /** Updates an existing singer and returns the generated full name. */ + @Update( + "UPDATE singers SET " + + "first_name=#{first_name}, " + + "last_name=#{last_name}, " + + "active=#{active} " + + "WHERE id=#{id}") + @Options(useGeneratedKeys = true, keyProperty = "fullName") + int update(Singer singer); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/TrackMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/TrackMapper.java new file mode 100644 index 000000000000..5972e27116fc --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/TrackMapper.java @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Track; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Options; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface TrackMapper { + + @Select("SELECT * FROM tracks WHERE id = #{albumId} AND track_number = #{trackNumber}") + Track get(@Param("albumId") long albumId, @Param("trackNumber") long trackNumber); + + @Insert( + "INSERT INTO tracks (id, track_number, title, sample_rate) " + + "VALUES (#{id}, #{trackNumber}, #{title}, #{sampleRate})") + @Options(useGeneratedKeys = true, keyProperty = "id") + int insert(Track track); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/VenueMapper.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/VenueMapper.java new file mode 100644 index 000000000000..ab81c45cd54c --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/mappers/VenueMapper.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.mappers; + +import com.google.cloud.spanner.sample.entities.Venue; +import org.apache.ibatis.annotations.Mapper; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Select; + +@Mapper +public interface VenueMapper { + + @Select("SELECT * FROM venues WHERE id = #{venueId}") + Venue get(@Param("venueId") long venueId); +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/service/AlbumService.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/service/AlbumService.java new file mode 100644 index 000000000000..4e326a47cc9a --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/service/AlbumService.java @@ -0,0 +1,49 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.service; + +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Track; +import com.google.cloud.spanner.sample.mappers.AlbumMapper; +import com.google.cloud.spanner.sample.mappers.TrackMapper; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +public class AlbumService { + private final AlbumMapper albumMapper; + + private final TrackMapper trackMapper; + + public AlbumService(AlbumMapper albumMapper, TrackMapper trackMapper) { + this.albumMapper = albumMapper; + this.trackMapper = trackMapper; + } + + /** Creates an album and a set of tracks in a read/write transaction. */ + @Transactional + public Album createAlbumAndTracks(Album album, Track... tracks) { + // Saving an album will update the album entity with the generated primary key. + albumMapper.insert(album); + for (Track track : tracks) { + // Set the id that was generated on the Album before saving it. + track.setId(album.getId()); + trackMapper.insert(track); + } + return album; + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java new file mode 100644 index 000000000000..6298bb63ebd2 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/java/com/google/cloud/spanner/sample/service/SingerService.java @@ -0,0 +1,67 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample.service; + +import com.google.cloud.spanner.sample.entities.Album; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.cloud.spanner.sample.mappers.AlbumMapper; +import com.google.cloud.spanner.sample.mappers.SingerMapper; +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.springframework.stereotype.Service; +import org.springframework.transaction.annotation.Transactional; + +@Service +public class SingerService { + private final SingerMapper singerRepository; + + private final AlbumMapper albumRepository; + + public SingerService(SingerMapper singerRepository, AlbumMapper albumRepository) { + this.singerRepository = singerRepository; + this.albumRepository = albumRepository; + } + + /** Creates a singer and a list of albums in a read/write transaction. */ + @Transactional + public Singer createSingerAndAlbums(Singer singer, Album... albums) { + // Saving a singer will update the singer entity with the generated primary key. + singerRepository.insert(singer); + for (Album album : albums) { + // Set the singerId that was generated on the Album before saving it. + album.setSingerId(singer.getId()); + albumRepository.insert(album); + } + return singer; + } + + /** + * Searches for all singers that have a last name starting with any of the given prefixes. This + * method uses a read-only transaction. Read-only transactions should be preferred to read/write + * transactions whenever possible, as read-only transactions do not take locks. + */ + @Transactional(readOnly = true) + public List listSingersWithLastNameStartingWith(String... prefixes) { + ImmutableList.Builder result = ImmutableList.builder(); + // This is not the most efficient way to search for this, but the main purpose of this method is + // to show how to use read-only transactions. + for (String prefix : prefixes) { + result.addAll(singerRepository.findSingersByLastNameStartingWith(prefix)); + } + return result.build(); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application-cs.properties b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application-cs.properties new file mode 100644 index 000000000000..602c8da7c517 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application-cs.properties @@ -0,0 +1,20 @@ + +# This profile uses a Cloud Spanner PostgreSQL database. + +# The sample by default uses the Spanner emulator. +# Disable this flag to run the sample on a real Spanner instance. +spanner.emulator=true +# This property is automatically set to point to the Spanner emulator that is automatically +# started together with the application. It remains empty if the application is executed +# against a real Spanner instance. +spanner.endpoint= +# Used for testing +spanner.additional_properties= + +# Update these properties to match your project, instance, and database. +spanner.project=my-project +spanner.instance=my-instance +spanner.database=mybatis-sample + +spring.datasource.url=jdbc:cloudspanner:${spanner.endpoint}/projects/${spanner.project}/instances/${spanner.instance}/databases/${spanner.database};dialect=POSTGRESQL;autoConfigEmulator=${spanner.emulator};${spanner.additional_properties} +spring.datasource.driver-class-name=com.google.cloud.spanner.jdbc.JdbcDriver diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application-pg.properties b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application-pg.properties new file mode 100644 index 000000000000..0605cd3ab438 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application-pg.properties @@ -0,0 +1,7 @@ + +# This profile uses an open-source PostgreSQL database. + +spring.datasource.url=jdbc:postgresql://localhost:5432/mybatis-sample +spring.datasource.driver-class-name=org.postgresql.Driver +spring.datasource.username=postgres +spring.datasource.password=mysecretpassword diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application.properties b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application.properties new file mode 100644 index 000000000000..a6900a8ef0c6 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/application.properties @@ -0,0 +1,13 @@ + +# This application can use both a Cloud Spanner PostgreSQL database or an open-source PostgreSQL +# database. Which database is used is determined by the active profile: +# 1. 'cs' means use Cloud Spanner. +# 2. 'pg' means use open-source PostgreSQL. + +# Activate the Cloud Spanner profile by default. +# Change to 'pg' to activate the PostgreSQL profile. +spring.profiles.default=cs + +# Map column names with an underscore to property names in camel case. +# E.g. column 'full_name' maps to Java property 'fullName'. +mybatis.configuration.map-underscore-to-camel-case=true diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/create_schema.sql b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/create_schema.sql new file mode 100644 index 000000000000..60552d3ad107 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/create_schema.sql @@ -0,0 +1,68 @@ +/* + This script creates the database schema for this sample application. + All lines that start with /* skip_on_open_source_pg */ are skipped when the application is running on a + normal PostgreSQL database. The same lines are executed when the application is running on a Cloud + Spanner database. The script is executed by the DatabaseSeeder class. +*/ + +create sequence if not exists id_generator +/* skip_on_open_source_pg */ bit_reversed_positive +; + +create table if not exists singers ( + id bigint not null primary key default nextval('id_generator'), + first_name varchar, + last_name varchar, + full_name varchar generated always as (CASE WHEN first_name IS NULL THEN last_name + WHEN last_name IS NULL THEN first_name + ELSE first_name || ' ' || last_name END) stored, + active boolean default true, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp +); + +create table if not exists albums ( + id bigint not null primary key default nextval('id_generator'), + title varchar not null, + marketing_budget numeric, + release_date date, + cover_picture bytea, + singer_id bigint not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp, + constraint fk_albums_singers foreign key (singer_id) references singers (id) +); + +create table if not exists tracks ( + id bigint not null, + track_number bigint not null, + title varchar not null, + sample_rate float8 not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp, + primary key (id, track_number) +) +/* skip_on_open_source_pg */ interleave in parent albums on delete cascade +; + +create table if not exists venues ( + id bigint not null primary key default nextval('id_generator'), + name varchar not null, + description jsonb not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp +); + +create table if not exists concerts ( + id bigint not null primary key default nextval('id_generator'), + venue_id bigint not null, + singer_id bigint not null, + name varchar not null, + start_time timestamptz not null, + end_time timestamptz not null, + created_at timestamptz default current_timestamp, + updated_at timestamptz default current_timestamp, + constraint fk_concerts_venues foreign key (venue_id) references venues (id), + constraint fk_concerts_singers foreign key (singer_id) references singers (id), + constraint chk_end_time_after_start_time check (end_time > start_time) +); diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/drop_schema.sql b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/drop_schema.sql new file mode 100644 index 000000000000..23e7b65d3bb1 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/main/resources/drop_schema.sql @@ -0,0 +1,5 @@ +drop table if exists concerts; +drop table if exists venues; +drop table if exists tracks; +drop table if exists albums; +drop table if exists singers; diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java new file mode 100644 index 000000000000..9c6c900a3949 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationEmulatorTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ApplicationEmulatorTest { + + @Test + public void testRunApplicationOnEmulator() { + System.setProperty("spanner.emulator", "true"); + Application.main(new String[] {}); + } +} diff --git a/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java new file mode 100644 index 000000000000..f0cc17f2f783 --- /dev/null +++ b/java-spanner-jdbc/samples/spring-data-mybatis/postgresql/src/test/java/com/google/cloud/spanner/sample/ApplicationTest.java @@ -0,0 +1,1026 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.sample; + +import static com.google.cloud.spanner.sample.DatabaseSeeder.INITIAL_SINGERS; +import static com.google.cloud.spanner.sample.DatabaseSeeder.randomDate; +import static com.google.cloud.spanner.sample.DatabaseSeeder.randomTitle; +import static junit.framework.TestCase.assertEquals; +import static junit.framework.TestCase.assertTrue; +import static org.junit.Assert.assertNotEquals; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.sample.entities.Singer; +import com.google.common.collect.Streams; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Value; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.LongStream; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class ApplicationTest extends AbstractMockServerTest { + + @BeforeClass + public static void setupQueryResults() { + // Set the database dialect. + mockSpanner.putStatementResult(StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + // Set up a result for the dialect check that is executed by the JdbcConfiguration class. + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of( + "select 1 " + + "from information_schema.database_options " + + "where schema_name='public' " + + "and option_name='database_dialect' " + + "and option_value='POSTGRESQL'"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build())); + // Add a DDL response to the server. + addDdlResponseToSpannerAdmin(); + + // Set up results for the 'delete all test data' operations. + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("delete from concerts"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from venues"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from tracks"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from albums"), 0L)); + mockSpanner.putStatementResult(StatementResult.update(Statement.of("delete from singers"), 0L)); + + // Set up results for inserting test data. + for (Singer singer : INITIAL_SINGERS) { + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder("insert into singers (first_name, last_name) values ($1, $2)") + .bind("p1") + .to(singer.getFirstName()) + .bind("p2") + .to(singer.getLastName()) + .build(), + 1L)); + } + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("select id from singers"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addAllRows( + LongStream.rangeClosed(1L, INITIAL_SINGERS.size()) + .mapToObj( + id -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(id))) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.update( + Statement.of( + "insert into albums (title, marketing_budget, release_date, cover_picture, singer_id) values ($1, $2, $3, $4, $5)"), + 1L)); + + // Set up results for the queries that the application runs. + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT * FROM singers ORDER BY last_name, first_name, id"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addAllRows( + Streams.mapWithIndex( + INITIAL_SINGERS.stream(), + (singer, index) -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(index + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setStringValue( + singer.getFirstName() + " " + singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getFirstName()) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of("SELECT COUNT(1) FROM albums WHERE singer_id = $1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("c") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("10").build()) + .build()) + .build())); + for (long singerId : LongStream.rangeClosed(1L, INITIAL_SINGERS.size()).toArray()) { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder("SELECT * FROM albums WHERE singer_id = $1") + .bind("p1") + .to(Long.reverse(singerId)) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType( + Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addAllRows( + IntStream.rangeClosed(1, 10) + .mapToObj( + albumId -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(albumId * singerId))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(singerId)))) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(randomDate().toString()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + } + int singerIndex = ThreadLocalRandom.current().nextInt(INITIAL_SINGERS.size()); + Singer randomSinger = INITIAL_SINGERS.get(singerIndex); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("SELECT * FROM singers ORDER BY sha256(last_name::bytea) LIMIT 1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(singerIndex + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder().setStringValue(randomSinger.getLastName()).build()) + .addValues( + Value.newBuilder() + .setStringValue( + randomSinger.getFirstName() + " " + randomSinger.getLastName()) + .build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomSinger.getFirstName()).build()) + .build()) + .build())); + + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder( + "INSERT INTO singers (first_name, last_name, active) VALUES ($1, $2, $3)\n" + + "RETURNING *") + .bind("p1") + .to("Amethyst") + .bind("p2") + .to("Jiang") + .bind("p3") + .to((com.google.cloud.spanner.Value) null) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(INITIAL_SINGERS.size() + 2))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues(Value.newBuilder().setStringValue("Amethyst").build()) + .addValues(Value.newBuilder().setStringValue("Amethyst Jiang").build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setStringValue("Jiang").build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT INTO albums (title, marketing_budget, release_date, cover_picture, singer_id) VALUES ($1, $2, $3, $4, $5)\n" + + "RETURNING *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setStringValue(String.valueOf(1L))) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomDate().toString()).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT INTO tracks (id, track_number, title, sample_rate) VALUES ($1, $2, $3, $4)\n" + + "RETURNING *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("track_number") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("sample_rate") + .setType( + Type.newBuilder().setCode(TypeCode.FLOAT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue("1").build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setNumberValue(3.14d)) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of("select * from albums limit 1"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("title") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("singer_id") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("release_date") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("cover_picture") + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("marketing_budget") + .setType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue(String.valueOf(Long.reverse(1L))) + .build()) + .addValues(Value.newBuilder().setStringValue(randomTitle())) + .addValues(Value.newBuilder().setStringValue(String.valueOf(1L))) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues( + Value.newBuilder().setStringValue(randomDate().toString()).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + for (String prefix : new String[] {"J", "A", "B", "C"}) { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder("SELECT * FROM singers WHERE starts_with(last_name, $1)") + .bind("p1") + .to(prefix) + .build(), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addAllRows( + Streams.mapWithIndex( + INITIAL_SINGERS.stream() + .filter( + singer -> + singer.getLastName().startsWith(prefix.substring(0, 1))), + (singer, index) -> + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(Long.reverse(index + 1))) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setStringValue( + singer.getFirstName() + + " " + + singer.getLastName()) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build()) + .addValues( + Value.newBuilder() + .setStringValue(singer.getFirstName()) + .build()) + .build()) + .collect(Collectors.toList())) + .build())); + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "INSERT INTO singers (id, first_name, last_name, active) VALUES ($1, $2, $3, $4) ON CONFLICT (id) DO UPDATE SET id=excluded.id, first_name=excluded.first_name, last_name=excluded.last_name, active=excluded.active\n" + + "RETURNING *"), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("id") + .setType( + Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("active") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("last_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("full_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("updated_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("created_at") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("first_name") + .setType( + Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues( + Value.newBuilder() + .setStringValue( + String.valueOf(ThreadLocalRandom.current().nextLong())) + .build()) + .addValues(Value.newBuilder().setBoolValue(true).build()) + .addValues(Value.newBuilder().setStringValue("Russel").build()) + .addValues(Value.newBuilder().setStringValue("Beatriz Russel").build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build()) + .addValues(Value.newBuilder().setStringValue("Beatriz").build()) + .build()) + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + } + } + + @Test + public void testRunApplication() { + System.setProperty("spanner.emulator", "false"); + System.setProperty("spanner.endpoint", "//localhost:" + getPort()); + System.setProperty("spanner.additional_properties", "usePlainText=true"); + Application.main(new String[] {}); + + assertEquals( + 39, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter( + request -> + !request.getSql().equals("SELECT 1") + && !request + .getSql() + .equals( + "SELECT * FROM singers ORDER BY sha256(last_name::bytea) LIMIT 1")) + .count()); + assertEquals(3, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(6, mockSpanner.countRequestsOfType(CommitRequest.class)); + + // Verify that the service methods use transactions. + String insertSingerSql = + "INSERT INTO singers (first_name, last_name, active) VALUES ($1, $2, $3)\nRETURNING *"; + assertEquals( + 1, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertSingerSql)) + .count()); + ExecuteSqlRequest insertSingerRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertSingerSql)) + .findFirst() + .orElseThrow(); + assertTrue(insertSingerRequest.hasTransaction()); + assertTrue(insertSingerRequest.getTransaction().hasBegin()); + assertTrue(insertSingerRequest.getTransaction().getBegin().hasReadWrite()); + String insertAlbumSql = + "INSERT INTO albums (title, marketing_budget, release_date, cover_picture, singer_id) " + + "VALUES ($1, $2, $3, $4, $5)\nRETURNING *"; + assertEquals( + 4, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertAlbumSql)) + .count()); + // The first 3 requests belong to the transaction that is executed together with the 'INSERT + // INTO singers' statement. + List insertAlbumRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertAlbumSql)) + .toList() + .subList(0, 3); + ExecuteSqlRequest firstInsertAlbumRequest = insertAlbumRequests.get(0); + for (ExecuteSqlRequest request : insertAlbumRequests) { + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasId()); + assertEquals( + firstInsertAlbumRequest.getTransaction().getId(), request.getTransaction().getId()); + } + // Verify that the transaction is committed. + assertEquals( + 1, + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request + .getTransactionId() + .equals(firstInsertAlbumRequest.getTransaction().getId())) + .count()); + + // The last 'INSERT INTO albums' request belong in a transaction with 8 'INSERT INTO tracks' + // requests. + ExecuteSqlRequest lastInsertAlbumRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertAlbumSql)) + .toList() + .get(3); + assertNotEquals( + lastInsertAlbumRequest.getTransaction().getId(), + firstInsertAlbumRequest.getTransaction().getId()); + assertTrue(lastInsertAlbumRequest.hasTransaction()); + assertTrue(lastInsertAlbumRequest.getTransaction().hasBegin()); + assertTrue(lastInsertAlbumRequest.getTransaction().getBegin().hasReadWrite()); + String insertTrackSql = + "INSERT INTO tracks (id, track_number, title, sample_rate) " + + "VALUES ($1, $2, $3, $4)\nRETURNING *"; + assertEquals( + 7, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertTrackSql)) + .count()); + List insertTrackRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(insertTrackSql)) + .toList(); + for (ExecuteSqlRequest request : insertTrackRequests) { + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasId()); + assertEquals( + insertTrackRequests.get(0).getTransaction().getId(), request.getTransaction().getId()); + } + // Verify that the transaction is committed. + assertEquals( + 1, + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request + .getTransactionId() + .equals(insertTrackRequests.get(0).getTransaction().getId())) + .count()); + + // Verify that the SingerService#listSingersWithLastNameStartingWith(..) method uses a read-only + // transaction. + assertEquals( + 1, + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).stream() + .filter(request -> request.getOptions().hasReadOnly()) + .count()); + String selectSingersSql = "SELECT * FROM singers WHERE starts_with(last_name, $1)"; + assertEquals( + 4, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(selectSingersSql)) + .count()); + List selectSingersRequests = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(request -> request.getSql().equals(selectSingersSql)) + .toList() + .subList(1, 4); + ExecuteSqlRequest firstSelectSingersRequest = selectSingersRequests.get(0); + for (ExecuteSqlRequest request : selectSingersRequests) { + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasId()); + } + // Verify that the read-only transaction is not committed. + assertEquals( + 0, + mockSpanner.getRequestsOfType(CommitRequest.class).stream() + .filter( + request -> + request + .getTransactionId() + .equals(firstSelectSingersRequest.getTransaction().getId())) + .count()); + } + + private static void addDdlResponseToSpannerAdmin() { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/JdbcDataTypeConverter.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/JdbcDataTypeConverter.java new file mode 100644 index 000000000000..62d52c6c36e8 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/JdbcDataTypeConverter.java @@ -0,0 +1,29 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; + +@InternalApi +public class JdbcDataTypeConverter { + + /** Converts a protobuf type to a Spanner type. */ + @InternalApi + public static Type toSpannerType(com.google.spanner.v1.Type proto) { + return Type.fromProto(proto); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java new file mode 100644 index 000000000000..71fb105ee1fb --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/SessionPoolOptionsHelper.java @@ -0,0 +1,34 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.InternalApi; + +/** + * This class is only here to access a package-private method in the Spanner client library and will + * be removed in the future. + */ +@InternalApi +public class SessionPoolOptionsHelper { + private SessionPoolOptionsHelper() {} + + @InternalApi + public static SessionPoolOptions.Builder useMultiplexedSessions( + SessionPoolOptions.Builder builder) { + return builder.setUseMultiplexedSession(true); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/connection/ConnectionOptionsHelper.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/connection/ConnectionOptionsHelper.java new file mode 100644 index 000000000000..75aef4947a60 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/connection/ConnectionOptionsHelper.java @@ -0,0 +1,41 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.connection; + +import com.google.api.core.InternalApi; +import com.google.cloud.spanner.connection.StatementExecutor.StatementExecutorType; + +@InternalApi +public class ConnectionOptionsHelper { + + @InternalApi + public static ConnectionOptions.Builder useDirectExecutorIfNotUseVirtualThreads( + String uri, ConnectionOptions.Builder builder) { + ConnectionState connectionState = new ConnectionState(ConnectionProperties.parseValues(uri)); + if (!connectionState.getValue(ConnectionProperties.USE_VIRTUAL_THREADS).getValue()) { + return builder.setStatementExecutorType(StatementExecutorType.DIRECT_EXECUTOR); + } + return builder; + } + + @InternalApi + public static boolean usesDirectExecutor(ConnectionOptions options) { + return options.getStatementExecutorType() == StatementExecutorType.DIRECT_EXECUTOR; + } + + private ConnectionOptionsHelper() {} +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcConnection.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcConnection.java new file mode 100644 index 000000000000..00d0823f00cf --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcConnection.java @@ -0,0 +1,288 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.connection.ConnectionOptionsHelper; +import com.google.common.annotations.VisibleForTesting; +import com.google.rpc.Code; +import java.sql.CallableStatement; +import java.sql.ResultSet; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Struct; +import java.util.Properties; +import java.util.concurrent.Executor; + +/** Base class for Cloud Spanner JDBC connections. */ +abstract class AbstractJdbcConnection extends AbstractJdbcWrapper + implements CloudSpannerJdbcConnection { + private static final String CALLABLE_STATEMENTS_UNSUPPORTED = + "Callable statements are not supported"; + private static final String ONLY_SERIALIZABLE_OR_REPEATABLE_READ = + "Only isolation levels TRANSACTION_SERIALIZABLE and TRANSACTION_REPEATABLE_READ are supported"; + private static final String ONLY_CLOSE_ALLOWED = + "Only holdability CLOSE_CURSORS_AT_COMMIT is supported"; + private static final String SQLXML_UNSUPPORTED = "SQLXML is not supported"; + private static final String STRUCTS_UNSUPPORTED = "Structs are not supported"; + private static final String ABORT_UNSUPPORTED = "Abort is not supported"; + private static final String NETWORK_TIMEOUT_UNSUPPORTED = "Network timeout is not supported"; + static final String CLIENT_INFO_NOT_SUPPORTED = + "Cloud Spanner does not support ClientInfo property %s"; + + private final String connectionUrl; + private final ConnectionOptions options; + private final com.google.cloud.spanner.connection.Connection spanner; + private final Properties clientInfo; + private final boolean usesDirectExecutor; + private AbstractStatementParser parser; + + private SQLWarning firstWarning = null; + private SQLWarning lastWarning = null; + + AbstractJdbcConnection(String connectionUrl, ConnectionOptions options) throws SQLException { + this.connectionUrl = connectionUrl; + this.options = options; + this.spanner = options.getConnection(); + this.clientInfo = new Properties(JdbcDatabaseMetaData.getDefaultClientInfoProperties()); + this.usesDirectExecutor = ConnectionOptionsHelper.usesDirectExecutor(options); + } + + @Override + public DatabaseId getDatabaseId() { + return this.options.getDatabaseId(); + } + + @Override + public DatabaseClient getDatabaseClient() { + return getSpannerConnection().getDatabaseClient(); + } + + /** Return the corresponding {@link com.google.cloud.spanner.connection.Connection} */ + com.google.cloud.spanner.connection.Connection getSpannerConnection() { + return spanner; + } + + @Override + public String getConnectionUrl() { + return connectionUrl; + } + + ConnectionOptions getConnectionOptions() { + return options; + } + + @Override + public Spanner getSpanner() { + return this.spanner.getSpanner(); + } + + boolean usesDirectExecutor() { + return this.usesDirectExecutor; + } + + @Override + public Dialect getDialect() { + return spanner.getDialect(); + } + + protected AbstractStatementParser getParser() throws SQLException { + if (parser == null) { + try { + parser = AbstractStatementParser.getInstance(spanner.getDialect()); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + return parser; + } + + @Override + public CallableStatement prepareCall(String sql) throws SQLException { + return checkClosedAndThrowUnsupported(CALLABLE_STATEMENTS_UNSUPPORTED); + } + + @Override + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return checkClosedAndThrowUnsupported(CALLABLE_STATEMENTS_UNSUPPORTED); + } + + @Override + public CallableStatement prepareCall( + String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + return checkClosedAndThrowUnsupported(CALLABLE_STATEMENTS_UNSUPPORTED); + } + + @Override + public void setTransactionIsolation(int level) throws SQLException { + checkClosed(); + JdbcPreconditions.checkArgument( + level == TRANSACTION_SERIALIZABLE + || level == TRANSACTION_REPEATABLE_READ + || level == TRANSACTION_READ_UNCOMMITTED + || level == TRANSACTION_READ_COMMITTED, + "Not a transaction isolation level"); + JdbcPreconditions.checkSqlFeatureSupported( + JdbcDatabaseMetaData.supportsIsolationLevel(level), ONLY_SERIALIZABLE_OR_REPEATABLE_READ); + spanner.setDefaultIsolationLevel(IsolationLevelConverter.convertToSpanner(level)); + } + + @Override + public int getTransactionIsolation() throws SQLException { + checkClosed(); + //noinspection MagicConstant + return IsolationLevelConverter.convertToJdbc(spanner.getDefaultIsolationLevel()); + } + + @Override + public void setHoldability(int holdability) throws SQLException { + checkClosed(); + JdbcPreconditions.checkArgument( + holdability == ResultSet.HOLD_CURSORS_OVER_COMMIT + || holdability == ResultSet.CLOSE_CURSORS_AT_COMMIT, + "Not a holdability value"); + JdbcPreconditions.checkSqlFeatureSupported( + holdability == ResultSet.CLOSE_CURSORS_AT_COMMIT, ONLY_CLOSE_ALLOWED); + } + + @Override + public int getHoldability() throws SQLException { + checkClosed(); + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkClosed(); + return firstWarning; + } + + @Override + public void clearWarnings() throws SQLException { + checkClosed(); + firstWarning = null; + lastWarning = null; + } + + @Override + public SQLXML createSQLXML() throws SQLException { + return checkClosedAndThrowUnsupported(SQLXML_UNSUPPORTED); + } + + @Override + public void setClientInfo(String name, String value) throws SQLClientInfoException { + Properties supported; + try { + checkClosed(); + supported = JdbcDatabaseMetaData.getDefaultClientInfoProperties(); + } catch (SQLException e) { + if (e instanceof JdbcSqlException) { + throw JdbcSqlExceptionFactory.clientInfoException( + e.getMessage(), ((JdbcSqlException) e).getCode()); + } else { + throw JdbcSqlExceptionFactory.clientInfoException(e.getMessage(), Code.UNKNOWN); + } + } + if (value == null) { + throw JdbcSqlExceptionFactory.clientInfoException( + "Null-value is not allowed for client info.", Code.INVALID_ARGUMENT); + } + if (value.length() > JdbcDatabaseMetaData.MAX_CLIENT_INFO_VALUE_LENGTH) { + throw JdbcSqlExceptionFactory.clientInfoException( + String.format( + "Max length of value is %d characters.", + JdbcDatabaseMetaData.MAX_CLIENT_INFO_VALUE_LENGTH), + Code.INVALID_ARGUMENT); + } + name = name.toUpperCase(); + if (supported.containsKey(name)) { + clientInfo.setProperty(name, value); + } else { + pushWarning(new SQLWarning(String.format(CLIENT_INFO_NOT_SUPPORTED, name))); + } + } + + @Override + public void setClientInfo(Properties properties) throws SQLClientInfoException { + try { + checkClosed(); + } catch (SQLException e) { + if (e instanceof JdbcSqlException) { + throw JdbcSqlExceptionFactory.clientInfoException( + e.getMessage(), ((JdbcSqlException) e).getCode()); + } else { + throw JdbcSqlExceptionFactory.clientInfoException(e.getMessage(), Code.UNKNOWN); + } + } + clientInfo.clear(); + for (String property : properties.stringPropertyNames()) { + setClientInfo(property, properties.getProperty(property)); + } + } + + @Override + public String getClientInfo(String name) throws SQLException { + checkClosed(); + return clientInfo.getProperty(name.toUpperCase()); + } + + @Override + public Properties getClientInfo() throws SQLException { + checkClosed(); + return (Properties) clientInfo.clone(); + } + + @Override + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + return checkClosedAndThrowUnsupported(STRUCTS_UNSUPPORTED); + } + + @Override + public void abort(Executor executor) throws SQLException { + checkClosedAndThrowUnsupported(ABORT_UNSUPPORTED); + } + + @Override + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + checkClosedAndThrowUnsupported(NETWORK_TIMEOUT_UNSUPPORTED); + } + + @Override + public int getNetworkTimeout() throws SQLException { + return checkClosedAndThrowUnsupported(NETWORK_TIMEOUT_UNSUPPORTED); + } + + @VisibleForTesting + void pushWarning(SQLWarning warning) { + if (lastWarning == null) { + firstWarning = warning; + } else { + lastWarning.setNextWarning(warning); + } + lastWarning = warning; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcPreparedStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcPreparedStatement.java new file mode 100644 index 000000000000..be8df4aff16b --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcPreparedStatement.java @@ -0,0 +1,404 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.rpc.Code; +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLType; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Calendar; + +/** Base class for Cloud Spanner {@link PreparedStatement}s. */ +abstract class AbstractJdbcPreparedStatement extends JdbcStatement implements PreparedStatement { + private static final String METHOD_NOT_ON_PREPARED_STATEMENT = + "This method may not be called on a PreparedStatement"; + private final JdbcParameterStore parameters; + + AbstractJdbcPreparedStatement(JdbcConnection connection) throws SQLException { + super(connection); + parameters = new JdbcParameterStore(connection.getDialect()); + } + + JdbcParameterStore getParameters() { + return parameters; + } + + private T checkClosedAndThrowNotOnPreparedStatement() throws SQLException { + checkClosed(); + throw JdbcSqlExceptionFactory.of(METHOD_NOT_ON_PREPARED_STATEMENT, Code.INVALID_ARGUMENT); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + return checkClosedAndThrowNotOnPreparedStatement(); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + return checkClosedAndThrowNotOnPreparedStatement(); + } + + @Override + public boolean execute(String sql) throws SQLException { + return checkClosedAndThrowNotOnPreparedStatement(); + } + + @Override + public void addBatch(String sql) throws SQLException { + checkClosedAndThrowNotOnPreparedStatement(); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, /* value= */ null, sqlType, /* scaleOrLength= */ null); + } + + @Override + public void setBoolean(int parameterIndex, boolean value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.BOOLEAN); + } + + @Override + public void setByte(int parameterIndex, byte value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.TINYINT); + } + + @Override + public void setShort(int parameterIndex, short value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.SMALLINT); + } + + @Override + public void setInt(int parameterIndex, int value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.INTEGER); + } + + @Override + public void setLong(int parameterIndex, long value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.BIGINT); + } + + @Override + public void setFloat(int parameterIndex, float value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.REAL); + } + + @Override + public void setDouble(int parameterIndex, double value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.DOUBLE); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NUMERIC); + } + + @Override + public void setString(int parameterIndex, String value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NVARCHAR); + } + + @Override + public void setBytes(int parameterIndex, byte[] value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.BINARY); + } + + @Override + public void setDate(int parameterIndex, Date value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.DATE); + } + + @Override + public void setTime(int parameterIndex, Time value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.TIME); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.TIMESTAMP); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream value, int length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.VARCHAR, length); + } + + @SuppressWarnings("DeprecatedIsStillUsed") + @Override + @Deprecated + public void setUnicodeStream(int parameterIndex, InputStream value, int length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NVARCHAR, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream value, int length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.BINARY, length); + } + + @Override + public void clearParameters() throws SQLException { + checkClosed(); + parameters.clearParameters(); + } + + @Override + public void setObject(int parameterIndex, Object value, int targetSqlType) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, targetSqlType, null); + } + + @Override + public void setObject(int parameterIndex, Object value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, (SQLType) null); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, reader, Types.NVARCHAR, length); + } + + @Override + public void setRef(int parameterIndex, Ref value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.REF); + } + + @Override + public void setBlob(int parameterIndex, Blob value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.BLOB); + } + + @Override + public void setClob(int parameterIndex, Clob value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.CLOB); + } + + @Override + public void setArray(int parameterIndex, Array value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.ARRAY); + } + + @Override + public void setDate(int parameterIndex, Date value, Calendar cal) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.DATE); + } + + @Override + public void setTime(int parameterIndex, Time value, Calendar cal) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.TIME); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp value, Calendar cal) throws SQLException { + checkClosed(); + parameters.setParameter( + parameterIndex, + cal == null ? value : JdbcTypeConverter.setTimestampInCalendar(value, cal), + Types.TIMESTAMP); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, null, sqlType, null); + } + + @Override + public void setURL(int parameterIndex, URL value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NVARCHAR); + } + + @Override + public void setRowId(int parameterIndex, RowId value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.ROWID); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NVARCHAR); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NVARCHAR); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NCLOB); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, reader, Types.CLOB); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, inputStream, Types.BLOB); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, reader, Types.NCLOB); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, xmlObject, Types.SQLXML); + } + + @Override + public void setObject(int parameterIndex, Object value, int targetSqlType, int scaleOrLength) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, targetSqlType, scaleOrLength); + } + + @Override + public void setObject(int parameterIndex, Object value, SQLType targetSqlType) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, targetSqlType.getVendorTypeNumber()); + } + + @Override + public void setObject(int parameterIndex, Object value, SQLType targetSqlType, int scaleOrLength) + throws SQLException { + checkClosed(); + parameters.setParameter( + parameterIndex, value, targetSqlType.getVendorTypeNumber(), scaleOrLength); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream value, long length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.VARCHAR); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream value, long length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.BINARY); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) + throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, reader, Types.NVARCHAR); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.VARCHAR); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.BINARY); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, reader, Types.NVARCHAR); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, value, Types.NVARCHAR); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, reader, Types.CLOB); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, inputStream, Types.BLOB); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + checkClosed(); + parameters.setParameter(parameterIndex, reader, Types.NVARCHAR); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcResultSet.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcResultSet.java new file mode 100644 index 000000000000..4b6ac4c130fd --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcResultSet.java @@ -0,0 +1,652 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLType; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; + +/** Base class for Cloud Spanner {@link ResultSet}s. */ +abstract class AbstractJdbcResultSet extends AbstractJdbcWrapper implements ResultSet { + /** The underlying Cloud Spanner {@link com.google.cloud.spanner.ResultSet}. */ + final com.google.cloud.spanner.ResultSet spanner; + + /** Current fetch size hint for this result set. */ + private int fetchSize; + + AbstractJdbcResultSet(com.google.cloud.spanner.ResultSet spanner) { + this.spanner = spanner; + } + + @Override + public SQLWarning getWarnings() { + return null; + } + + @Override + public void clearWarnings() {} + + @Override + public String getCursorName() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean isLast() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void beforeFirst() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void afterLast() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean first() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean last() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean absolute(int row) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean relative(int rows) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public boolean previous() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + JdbcPreconditions.checkArgument(direction == FETCH_FORWARD, direction); + } + + @Override + public int getFetchDirection() { + return FETCH_FORWARD; + } + + @Override + public void setFetchSize(int rows) { + this.fetchSize = rows; + } + + @Override + public int getFetchSize() { + return fetchSize; + } + + @Override + public int getType() { + return TYPE_FORWARD_ONLY; + } + + @Override + public int getConcurrency() { + return CONCUR_READ_ONLY; + } + + @Override + public boolean rowUpdated() { + return false; + } + + @Override + public boolean rowInserted() { + return false; + } + + @Override + public boolean rowDeleted() { + return false; + } + + @Override + public void updateNull(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBoolean(int columnIndex, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateByte(int columnIndex, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateShort(int columnIndex, short x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateInt(int columnIndex, int x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateLong(int columnIndex, long x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateFloat(int columnIndex, float x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDouble(int columnIndex, double x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateString(int columnIndex, String x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBytes(int columnIndex, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDate(int columnIndex, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTime(int columnIndex, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x, SQLType type, int scaleOrLength) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(int columnIndex, Object x, SQLType type) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNull(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBoolean(String columnLabel, boolean x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateByte(String columnLabel, byte x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateShort(String columnLabel, short x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateInt(String columnLabel, int x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateLong(String columnLabel, long x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateFloat(String columnLabel, float x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDouble(String columnLabel, double x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBigDecimal(String columnLabel, BigDecimal x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateString(String columnLabel, String x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBytes(String columnLabel, byte[] x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateDate(String columnLabel, Date x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTime(String columnLabel, Time x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateTimestamp(String columnLabel, Timestamp x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, int length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, int length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, int length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x, SQLType type) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateObject(String columnLabel, Object x, SQLType type, int scaleOrLength) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void insertRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void deleteRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void refreshRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void cancelRowUpdates() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void moveToInsertRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void moveToCurrentRow() throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Ref getRef(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public Ref getRef(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRef(int columnIndex, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRef(String columnLabel, Ref x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, Blob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Clob x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateArray(int columnIndex, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateArray(String columnLabel, Array x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public RowId getRowId(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public RowId getRowId(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRowId(int columnIndex, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateRowId(String columnLabel, RowId x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNString(int columnIndex, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNString(String columnLabel, String nString) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, NClob nClob) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML getSQLXML(int columnIndex) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public SQLXML getSQLXML(String columnLabel) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream, long length) + throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(int columnIndex, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } + + @Override + public void updateNClob(String columnLabel, Reader reader) throws SQLException { + throw new SQLFeatureNotSupportedException(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcStatement.java new file mode 100644 index 000000000000..3b98591abbad --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcStatement.java @@ -0,0 +1,516 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.StatementResult; +import com.google.cloud.spanner.connection.StatementResult.ClientSideStatementType; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Stopwatch; +import com.google.rpc.Code; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.time.Duration; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Function; +import java.util.function.Supplier; +import javax.annotation.Nonnull; + +/** Base class for Cloud Spanner JDBC {@link Statement}s */ +abstract class AbstractJdbcStatement extends AbstractJdbcWrapper implements Statement { + private static final String CURSORS_NOT_SUPPORTED = "Cursors are not supported"; + private static final String ONLY_FETCH_FORWARD_SUPPORTED = "Only fetch_forward is supported"; + final AbstractStatementParser parser; + private final Lock executingLock; + private volatile Thread executingThread; + private final AtomicBoolean cancelled = new AtomicBoolean(); + private boolean closed; + private boolean closeOnCompletion; + private boolean poolable; + private final JdbcConnection connection; + private Duration queryTimeout = Duration.ZERO; + + AbstractJdbcStatement(JdbcConnection connection) throws SQLException { + this.connection = connection; + this.parser = connection.getParser(); + if (connection.usesDirectExecutor()) { + this.executingLock = new ReentrantLock(); + } else { + this.executingLock = null; + } + } + + @Override + public JdbcConnection getConnection() { + return connection; + } + + private Options.QueryOption[] getQueryOptions(QueryOption... options) throws SQLException { + QueryOption[] res = options == null ? new QueryOption[0] : options; + if (getFetchSize() > 0) { + res = Arrays.copyOf(res, res.length + 1); + res[res.length - 1] = Options.prefetchChunks(getFetchSize()); + } + return res; + } + + /** The {@link TimeUnit}s that are supported for timeout and staleness durations */ + private static final TimeUnit[] SUPPORTED_UNITS = + new TimeUnit[] { + TimeUnit.SECONDS, TimeUnit.MILLISECONDS, TimeUnit.MICROSECONDS, TimeUnit.NANOSECONDS + }; + + /** + * Get the {@link TimeUnit} with the least precision that is able to represent the timeout of this + * statement. + */ + private TimeUnit getAppropriateTimeUnit() { + int index = 0; + if (connection.getSpannerConnection().hasStatementTimeout()) { + for (TimeUnit unit : SUPPORTED_UNITS) { + long duration = connection.getSpannerConnection().getStatementTimeout(unit); + if (index + 1 < SUPPORTED_UNITS.length) { + if (duration > 0L + && duration * 1000 + == connection + .getSpannerConnection() + .getStatementTimeout(SUPPORTED_UNITS[index + 1])) { + return unit; + } + } else { + // last unit, we have to use this one + return unit; + } + index++; + } + throw new IllegalStateException("Unsupported duration"); + } + return null; + } + + /** + * Local class to temporarily hold the statement timeout of the Spanner {@link Connection}. The + * Spanner connection API sets the timeout on the connection and applies it to all statements that + * are executed on the {@link Connection}. JDBC specifies a timeout per statement, so we need to + * temporarily hold on to the timeout specified for the connection while using the timeout + * specified for a JDBC statement, and then after executing the JDBC statement setting the timeout + * on the Spanner {@link Connection} again. + */ + static class StatementTimeout { + private final long timeout; + private final TimeUnit unit; + + private static StatementTimeout of(long timeout, TimeUnit unit) { + return new StatementTimeout(timeout, unit); + } + + private StatementTimeout(long timeout, TimeUnit unit) { + this.timeout = timeout; + this.unit = unit; + } + } + + /** Functional interface that throws {@link SQLException}. */ + interface JdbcFunction { + R apply(T t) throws SQLException; + } + + /** Runs the given function with the timeout that has been set on this statement. */ + protected T runWithStatementTimeout(JdbcFunction function) + throws SQLException { + checkClosed(); + StatementTimeout originalTimeout = setTemporaryStatementTimeout(); + try { + return function.apply(getConnection().getSpannerConnection()); + } catch (SpannerException spannerException) { + throw JdbcSqlExceptionFactory.of(spannerException); + } finally { + resetStatementTimeout(originalTimeout); + } + } + + /** + * Sets the statement timeout of the Spanner {@link Connection} to the query timeout of this JDBC + * {@link Statement} and returns the original timeout of the Spanner {@link Connection} so it can + * be reset after the execution of a statement + */ + StatementTimeout setTemporaryStatementTimeout() throws SQLException { + StatementTimeout originalTimeout = null; + if (!getQueryTimeoutDuration().isZero()) { + if (connection.getSpannerConnection().hasStatementTimeout()) { + TimeUnit unit = getAppropriateTimeUnit(); + originalTimeout = + StatementTimeout.of(connection.getSpannerConnection().getStatementTimeout(unit), unit); + } + Duration queryTimeout = getQueryTimeoutDuration(); + if (queryTimeout.getNano() > 0) { + connection + .getSpannerConnection() + .setStatementTimeout(queryTimeout.toMillis(), TimeUnit.MILLISECONDS); + } else { + connection + .getSpannerConnection() + .setStatementTimeout(queryTimeout.getSeconds(), TimeUnit.SECONDS); + } + } + return originalTimeout; + } + + /** + * Resets the statement timeout of the Spanner {@link Connection} after a JDBC {@link Statement} + * has been executed. + */ + void resetStatementTimeout(StatementTimeout originalTimeout) throws SQLException { + if (!getQueryTimeoutDuration().isZero()) { + if (originalTimeout == null) { + connection.getSpannerConnection().clearStatementTimeout(); + } else { + connection + .getSpannerConnection() + .setStatementTimeout(originalTimeout.timeout, originalTimeout.unit); + } + } + } + + /** + * Executes a SQL statement on the connection of this {@link Statement} as a query using the given + * {@link QueryAnalyzeMode}. + * + * @param statement the SQL statement to executed + * @param analyzeMode the {@link QueryAnalyzeMode} to use + * @return the result of the SQL statement as a {@link ResultSet} + * @throws SQLException if a database error occurs. + */ + ResultSet analyzeQuery(com.google.cloud.spanner.Statement statement, QueryAnalyzeMode analyzeMode) + throws SQLException { + return executeQuery(statement, analyzeMode); + } + + /** + * Executes a SQL statement on the connection of this {@link Statement} as a query. + * + * @param statement The SQL statement to executed. + * @param options {@link QueryOption}s that should be applied to the query. + * @return the result of the SQL statement as a {@link ResultSet}. + * @throws SQLException if a database error occurs. + */ + ResultSet executeQuery(com.google.cloud.spanner.Statement statement, QueryOption... options) + throws SQLException { + return executeQuery(statement, null, options); + } + + private ResultSet executeQuery( + com.google.cloud.spanner.Statement statement, + QueryAnalyzeMode analyzeMode, + QueryOption... options) + throws SQLException { + Options.QueryOption[] queryOptions = getQueryOptions(options); + return doWithStatementTimeout( + () -> { + com.google.cloud.spanner.ResultSet resultSet; + if (analyzeMode == null) { + resultSet = connection.getSpannerConnection().executeQuery(statement, queryOptions); + } else { + resultSet = connection.getSpannerConnection().analyzeQuery(statement, analyzeMode); + } + return JdbcResultSet.of(this, resultSet); + }); + } + + private T doWithStatementTimeout(Supplier runnable) throws SQLException { + return doWithStatementTimeout(runnable, ignore -> Boolean.TRUE); + } + + private T doWithStatementTimeout( + Supplier runnable, Function shouldResetTimeout) throws SQLException { + StatementTimeout originalTimeout = setTemporaryStatementTimeout(); + T result = null; + if (this.executingLock != null) { + this.executingLock.lock(); + this.executingThread = Thread.currentThread(); + } + try { + Stopwatch stopwatch = Stopwatch.createStarted(); + result = runnable.get(); + Duration executionDuration = stopwatch.elapsed(); + connection.recordClientLibLatencyMetric(executionDuration.toMillis()); + return result; + } catch (SpannerException spannerException) { + if (this.cancelled.get() + && spannerException.getErrorCode() == ErrorCode.CANCELLED + && this.executingLock != null) { + // Clear the interrupted flag of the thread. + //noinspection ResultOfMethodCallIgnored + Thread.interrupted(); + } + throw JdbcSqlExceptionFactory.of(spannerException); + } finally { + if (this.executingLock != null) { + this.executingThread = null; + this.cancelled.set(false); + this.executingLock.unlock(); + } + if (shouldResetTimeout.apply(result)) { + resetStatementTimeout(originalTimeout); + } + } + } + + /** + * Do a checked cast from long to int. Throws a {@link SQLException} with code {@link + * Code#OUT_OF_RANGE} if the update count is too big to fit in an int. + */ + int checkedCast(long updateCount) throws SQLException { + if (updateCount > Integer.MAX_VALUE) { + throw JdbcSqlExceptionFactory.of( + "update count too large for executeUpdate: " + updateCount, Code.OUT_OF_RANGE); + } + return (int) updateCount; + } + + /** + * Executes a SQL statement on the connection of this {@link Statement}. The SQL statement can be + * any supported SQL statement, including client side statements such as SET AUTOCOMMIT ON|OFF. + * + * @param statement The SQL statement to execute. + * @return a {@link StatementResult} containing either a {@link ResultSet}, an update count or + * nothing depending on the type of SQL statement. + * @throws SQLException if a database error occurs. + */ + StatementResult execute(com.google.cloud.spanner.Statement statement) throws SQLException { + StatementResult statementResult = + doWithStatementTimeout( + () -> connection.getSpannerConnection().execute(statement), + result -> !resultIsSetStatementTimeout(result)); + if (resultIsShowStatementTimeout(statementResult)) { + // We can safely re-run it without first resetting the timeout to the original value, as that + // has already been done by the 'doWithStatementTimeout' function. + return rerunShowStatementTimeout(statement); + } + return statementResult; + } + + /** + * The Spanner Connection API sets the statement timeout on a {@link Connection}. JDBC on the + * other hand sets this on the {@link Statement} object. This means that when a JDBC statement has + * a query timeout set, we need to temporarily set the statement timeout on the underlying Spanner + * {@link Connection}, then execute the actual statement, and then reset the timeout on the + * Spanner connection. But if the executed statement was a SHOW STATEMENT_TIMEOUT or SET + * STATEMENT_TIMEOUT, then we need to handle it differently: + * + *
    + *
  • SHOW STATEMENT_TIMEOUT: Reset the statement timeout on the {@link Connection} to the + * original value and re-run the statement + *
  • SET STATEMENT_TIMEOUT: Do not reset the statement timeout on the {@link Connection} after + * the execution + *
+ * + * @param result The result of a statement that was executed. + * @return true if the {@link StatementResult} indicates that the statement that was + * executed was a SET STATEMENT_TIMEOUT statement. + */ + private boolean resultIsSetStatementTimeout(StatementResult result) { + return result != null + && result.getClientSideStatementType() == ClientSideStatementType.SET_STATEMENT_TIMEOUT; + } + + private boolean resultIsShowStatementTimeout(StatementResult result) { + return result != null + && result.getClientSideStatementType() == ClientSideStatementType.SHOW_STATEMENT_TIMEOUT; + } + + private StatementResult rerunShowStatementTimeout(com.google.cloud.spanner.Statement statement) + throws SQLException { + try { + return connection.getSpannerConnection().execute(statement); + } catch (SpannerException spannerException) { + throw JdbcSqlExceptionFactory.of(spannerException); + } + } + + @Override + public int getQueryTimeout() throws SQLException { + return (int) getQueryTimeoutDuration().getSeconds(); + } + + @VisibleForTesting + @Nonnull + Duration getQueryTimeoutDuration() throws SQLException { + checkClosed(); + return this.queryTimeout; + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + setQueryTimeout(Duration.ofSeconds(seconds)); + } + + @VisibleForTesting + void setQueryTimeout(@Nonnull Duration duration) throws SQLException { + JdbcPreconditions.checkArgument(!duration.isNegative(), "Timeout must be >= 0"); + checkClosed(); + this.queryTimeout = duration; + } + + @Override + public void cancel() throws SQLException { + checkClosed(); + if (this.executingThread != null) { + // This is a best-effort operation. It could be that the executing thread is set to null + // between the if-check and the actual execution. Just ignore if that happens. + try { + this.cancelled.set(true); + this.executingThread.interrupt(); + } catch (NullPointerException ignore) { + // ignore, this just means that the execution finished before we got to the point where we + // could interrupt the thread. + } catch (SecurityException securityException) { + throw JdbcSqlExceptionFactory.of( + securityException.getMessage(), Code.PERMISSION_DENIED, securityException); + } + } else { + connection.getSpannerConnection().cancel(); + } + } + + @Override + public void close() throws SQLException { + this.closed = true; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + checkClosed(); + this.poolable = poolable; + } + + @Override + public boolean isPoolable() throws SQLException { + checkClosed(); + return poolable; + } + + @Override + public void closeOnCompletion() throws SQLException { + checkClosed(); + this.closeOnCompletion = true; + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + checkClosed(); + return closeOnCompletion; + } + + @Override + public int getMaxFieldSize() throws SQLException { + checkClosed(); + return 0; + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + checkClosed(); + } + + @Override + public int getMaxRows() throws SQLException { + checkClosed(); + return 0; + } + + @Override + public long getLargeMaxRows() throws SQLException { + checkClosed(); + return 0L; + } + + @Override + public void setMaxRows(int max) throws SQLException { + checkClosed(); + } + + @Override + public void setLargeMaxRows(long max) throws SQLException { + checkClosed(); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + checkClosed(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + checkClosed(); + return null; + } + + @Override + public void clearWarnings() throws SQLException { + checkClosed(); + } + + @Override + public void setCursorName(String name) throws SQLException { + throw JdbcSqlExceptionFactory.unsupported(CURSORS_NOT_SUPPORTED); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + if (direction != ResultSet.FETCH_FORWARD) { + throw JdbcSqlExceptionFactory.unsupported(ONLY_FETCH_FORWARD_SUPPORTED); + } + } + + @Override + public int getFetchDirection() { + return ResultSet.FETCH_FORWARD; + } + + @Override + public int getResultSetConcurrency() { + return ResultSet.CONCUR_READ_ONLY; + } + + @Override + public int getResultSetType() { + return ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public int getResultSetHoldability() { + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcWrapper.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcWrapper.java new file mode 100644 index 000000000000..7c387d301a4e --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/AbstractJdbcWrapper.java @@ -0,0 +1,520 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.getMainTypeCode; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.Code; +import com.google.common.base.Preconditions; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.sql.Date; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.sql.Wrapper; +import java.util.Calendar; + +/** Base class for all Cloud Spanner JDBC classes that implement the {@link Wrapper} interface. */ +abstract class AbstractJdbcWrapper implements Wrapper { + static final String OTHER_NAME = "OTHER"; + + /** + * Extract {@link java.sql.Types} code from Spanner {@link Type}. + * + * @param type The Cloud Spanner type to convert. May not be null. + */ + static int extractColumnType(Type type) { + Preconditions.checkNotNull(type); + switch (getMainTypeCode(type)) { + case BOOL: + return Types.BOOLEAN; + case BYTES: + case PROTO: + return Types.BINARY; + case DATE: + return Types.DATE; + case FLOAT32: + return Types.REAL; + case FLOAT64: + return Types.DOUBLE; + case INT64: + case ENUM: + return Types.BIGINT; + case NUMERIC: + case PG_NUMERIC: + return Types.NUMERIC; + case STRING: + case JSON: + case PG_JSONB: + return Types.NVARCHAR; + case TIMESTAMP: + return Types.TIMESTAMP; + case ARRAY: + return Types.ARRAY; + case STRUCT: + default: + return Types.OTHER; + } + } + + static String getSpannerTypeName(Type type, Dialect dialect) { + return Preconditions.checkNotNull(type).getSpannerTypeName(dialect); + } + + static String getSpannerColumnTypeName(Type type, Dialect dialect) { + if (dialect == Dialect.POSTGRESQL && type.getCode() == Code.ARRAY) { + return "_" + getSpannerTypeName(type.getArrayElementType(), dialect); + } + return getSpannerTypeName(type, dialect); + } + + /** + * Extract Spanner type name from {@link java.sql.Types} code. + * + * @deprecated Use {@link #getSpannerTypeName(Type, Dialect)} instead. + */ + @Deprecated + static String getSpannerTypeName(int sqlType) { + // TODO: Re-write to be dialect-aware (or remove all-together). + if (sqlType == Types.BOOLEAN) return Type.bool().getCode().name(); + if (sqlType == Types.BINARY) return Type.bytes().getCode().name(); + if (sqlType == Types.DATE) return Type.date().getCode().name(); + if (sqlType == Types.REAL) { + return Type.float32().getCode().name(); + } + if (sqlType == Types.DOUBLE || sqlType == Types.FLOAT) return Type.float64().getCode().name(); + if (sqlType == Types.BIGINT + || sqlType == Types.INTEGER + || sqlType == Types.SMALLINT + || sqlType == Types.TINYINT) return Type.int64().getCode().name(); + if (sqlType == Types.NUMERIC || sqlType == Types.DECIMAL) + return Type.numeric().getCode().name(); + if (sqlType == Types.NVARCHAR) return Type.string().getCode().name(); + if (sqlType == Types.TIMESTAMP || sqlType == Types.TIMESTAMP_WITH_TIMEZONE) + return Type.timestamp().getCode().name(); + if (sqlType == Types.ARRAY) return Code.ARRAY.name(); + + return OTHER_NAME; + } + + /** + * Get corresponding Java class name from {@link java.sql.Types} code. + * + * @deprecated Use {@link #getClassName(Type)} instead. + */ + @Deprecated + static String getClassName(int sqlType) { + if (sqlType == Types.BOOLEAN) return Boolean.class.getName(); + if (sqlType == Types.BINARY) return Byte[].class.getName(); + if (sqlType == Types.DATE) return Date.class.getName(); + if (sqlType == Types.REAL) { + return Float.class.getName(); + } + if (sqlType == Types.DOUBLE || sqlType == Types.FLOAT) return Double.class.getName(); + if (sqlType == Types.BIGINT + || sqlType == Types.INTEGER + || sqlType == Types.SMALLINT + || sqlType == Types.TINYINT) return Long.class.getName(); + if (sqlType == Types.NUMERIC || sqlType == Types.DECIMAL) return BigDecimal.class.getName(); + if (sqlType == Types.NVARCHAR) return String.class.getName(); + if (sqlType == Types.TIMESTAMP || sqlType == Types.TIMESTAMP_WITH_TIMEZONE) + return Timestamp.class.getName(); + if (sqlType == Types.ARRAY) return Object.class.getName(); + + return null; + } + + /** + * Get corresponding Java class name from Spanner {@link Type}. + * + * @param type The Cloud Spanner type to convert. May not be null. + */ + static String getClassName(Type type) { + Preconditions.checkNotNull(type); + switch (getMainTypeCode(type)) { + case BOOL: + return Boolean.class.getName(); + case BYTES: + case PROTO: + return byte[].class.getName(); + case DATE: + return Date.class.getName(); + case FLOAT32: + return Float.class.getName(); + case FLOAT64: + return Double.class.getName(); + case INT64: + case ENUM: + return Long.class.getName(); + case NUMERIC: + case PG_NUMERIC: + return BigDecimal.class.getName(); + case STRING: + case JSON: + case PG_JSONB: + return String.class.getName(); + case TIMESTAMP: + return Timestamp.class.getName(); + case ARRAY: + switch (getMainTypeCode(type.getArrayElementType())) { + case BOOL: + return Boolean[].class.getName(); + case BYTES: + case PROTO: + return byte[][].class.getName(); + case DATE: + return Date[].class.getName(); + case FLOAT32: + return Float[].class.getName(); + case FLOAT64: + return Double[].class.getName(); + case INT64: + case ENUM: + return Long[].class.getName(); + case NUMERIC: + case PG_NUMERIC: + return BigDecimal[].class.getName(); + case STRING: + case JSON: + case PG_JSONB: + return String[].class.getName(); + case TIMESTAMP: + return Timestamp[].class.getName(); + } + case STRUCT: + default: + return null; + } + } + + /** Standard error message for out-of-range values. */ + private static final String OUT_OF_RANGE_MSG = "Value out of range for %s: %s"; + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static byte checkedCastToByte(long val) throws SQLException { + if (val > Byte.MAX_VALUE || val < Byte.MIN_VALUE) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "byte", val), com.google.rpc.Code.OUT_OF_RANGE); + } + return (byte) val; + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static byte checkedCastToByte(BigDecimal val) throws SQLException { + try { + return val.byteValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "byte", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static byte checkedCastToByte(BigInteger val) throws SQLException { + try { + return val.byteValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "byte", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static short checkedCastToShort(long val) throws SQLException { + if (val > Short.MAX_VALUE || val < Short.MIN_VALUE) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "short", val), com.google.rpc.Code.OUT_OF_RANGE); + } + return (short) val; + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static short checkedCastToShort(BigDecimal val) throws SQLException { + try { + return val.shortValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "short", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static short checkedCastToShort(BigInteger val) throws SQLException { + try { + return val.shortValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "short", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static int checkedCastToInt(long val) throws SQLException { + if (val > Integer.MAX_VALUE || val < Integer.MIN_VALUE) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "int", val), com.google.rpc.Code.OUT_OF_RANGE); + } + return (int) val; + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static int checkedCastToInt(BigDecimal val) throws SQLException { + try { + return val.intValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "int", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static int checkedCastToInt(BigInteger val) throws SQLException { + try { + return val.intValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "int", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static float checkedCastToFloat(double val) throws SQLException { + if (Double.isFinite(val) && (val > Float.MAX_VALUE || val < -Float.MAX_VALUE)) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "float", val), com.google.rpc.Code.OUT_OF_RANGE); + } + return (float) val; + } + + /** + * Parses the given string value as a long. Throws {@link SQLException} if the string is not a + * valid long value. + */ + static long parseLong(String val) throws SQLException { + Preconditions.checkNotNull(val); + try { + return Long.parseLong(val); + } catch (NumberFormatException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid number", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static BigInteger checkedCastToBigInteger(BigDecimal val) throws SQLException { + try { + return val.toBigIntegerExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "BigInteger", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static long checkedCastToLong(BigDecimal val) throws SQLException { + try { + return val.longValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "long", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** Cast value and throw {@link SQLException} if out-of-range. */ + static long checkedCastToLong(BigInteger val) throws SQLException { + try { + return val.longValueExact(); + } catch (ArithmeticException e) { + throw JdbcSqlExceptionFactory.of( + String.format(OUT_OF_RANGE_MSG, "long", val), com.google.rpc.Code.OUT_OF_RANGE); + } + } + + /** + * Parses the given string value as a double. Throws {@link SQLException} if the string is not a + * valid double value. + */ + static double parseDouble(String val) throws SQLException { + Preconditions.checkNotNull(val); + try { + return Double.parseDouble(val); + } catch (NumberFormatException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid number", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** + * Parses the given string value as a float. Throws {@link SQLException} if the string is not a + * valid float value. + */ + static float parseFloat(String val) throws SQLException { + Preconditions.checkNotNull(val); + try { + return Float.parseFloat(val); + } catch (NumberFormatException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid number", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** + * Parses the given string value as a {@link Date} value. Throws {@link SQLException} if the + * string is not a valid {@link Date} value. + */ + static Date parseDate(String val) throws SQLException { + Preconditions.checkNotNull(val); + try { + return JdbcTypeConverter.toSqlDate(com.google.cloud.Date.parseDate(val)); + } catch (IllegalArgumentException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid date", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** + * Parses the given string value as a {@link Date} value in the timezone of the given {@link + * Calendar}. Throws {@link SQLException} if the string is not a valid {@link Date} value. + */ + static Date parseDate(String val, Calendar cal) throws SQLException { + Preconditions.checkNotNull(val); + Preconditions.checkNotNull(cal); + try { + return JdbcTypeConverter.toSqlDate(com.google.cloud.Date.parseDate(val), cal); + } catch (IllegalArgumentException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid date", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** + * Parses the given string value as a {@link Time} value. Throws {@link SQLException} if the + * string is not a valid {@link Time} value. + */ + static Time parseTime(String val) throws SQLException { + Preconditions.checkNotNull(val); + try { + return Time.valueOf(val); + } catch (IllegalArgumentException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid time", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** + * Parses the given string value as a {@link Time} value in the timezone of the given {@link + * Calendar}. Throws {@link SQLException} if the string is not a valid {@link Time} value. + */ + static Time parseTime(String val, Calendar cal) throws SQLException { + Preconditions.checkNotNull(val); + Preconditions.checkNotNull(cal); + try { + return JdbcTypeConverter.parseSqlTime(val, cal); + } catch (IllegalArgumentException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid time", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** + * Parses the given string value as a {@link Timestamp} value. Throws {@link SQLException} if the + * string is not a valid {@link Timestamp} value. + */ + static Timestamp parseTimestamp(String val) throws SQLException { + Preconditions.checkNotNull(val); + try { + return JdbcTypeConverter.toSqlTimestamp(com.google.cloud.Timestamp.parseTimestamp(val)); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid timestamp", val), + com.google.rpc.Code.INVALID_ARGUMENT, + e); + } + } + + /** + * Parses the given string value as a {@link Timestamp} value in the timezone of the given {@link + * Calendar}. Throws {@link SQLException} if the string is not a valid {@link Timestamp} value. + */ + static Timestamp parseTimestamp(String val, Calendar cal) throws SQLException { + Preconditions.checkNotNull(val); + Preconditions.checkNotNull(cal); + try { + return JdbcTypeConverter.setTimestampInCalendar( + com.google.cloud.Timestamp.parseTimestamp(val).toSqlTimestamp(), cal); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid timestamp", val), + com.google.rpc.Code.INVALID_ARGUMENT, + e); + } + } + + /** + * Parses the given string value as a {@link BigDecimal} value. Throws {@link SQLException} if the + * string is not a valid {@link BigDecimal} value. + */ + static BigDecimal parseBigDecimal(String val) throws SQLException { + Preconditions.checkNotNull(val); + try { + return new BigDecimal(val); + } catch (NumberFormatException e) { + throw JdbcSqlExceptionFactory.of( + String.format("%s is not a valid number", val), com.google.rpc.Code.INVALID_ARGUMENT, e); + } + } + + /** Should return true if this object has been closed */ + public abstract boolean isClosed(); + + /** Throws a {@link SQLException} if this object is closed */ + void checkClosed() throws SQLException { + if (isClosed()) { + throw JdbcSqlExceptionFactory.of( + "This " + getClass().getName() + " has been closed", + com.google.rpc.Code.FAILED_PRECONDITION); + } + } + + /** + * Throws a {@link SQLException} if this object is closed and otherwise a {@link + * SQLFeatureNotSupportedException} with the given message + */ + T checkClosedAndThrowUnsupported(String message) throws SQLException { + checkClosed(); + throw JdbcSqlExceptionFactory.unsupported(message); + } + + @Override + public boolean isWrapperFor(Class iface) { + return iface != null && iface.isAssignableFrom(getClass()); + } + + @Override + public T unwrap(Class iface) throws SQLException { + if (isWrapperFor(iface)) { + return iface.cast(this); + } + throw JdbcSqlExceptionFactory.of( + "Cannot unwrap to " + iface.getName(), com.google.rpc.Code.INVALID_ARGUMENT); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection.java new file mode 100644 index 000000000000..50f1f5328f89 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection.java @@ -0,0 +1,567 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.CommitStats; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.AutocommitDmlMode; +import com.google.cloud.spanner.connection.SavepointSupport; +import com.google.cloud.spanner.connection.TransactionMode; +import java.io.IOException; +import java.io.InputStream; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Iterator; +import javax.annotation.Nonnull; + +/** + * JDBC connection with a number of additional Cloud Spanner specific methods. JDBC connections that + * are returned by the Cloud Spanner {@link JdbcDriver} will implement this interface. + * + *

Calling {@link Connection#unwrap(Class)} with {@code CloudSpannerJdbcConnection} class as + * input on a {@link Connection} returned by the Cloud Spanner JDBC Driver will return a {@link + * CloudSpannerJdbcConnection} instance. + */ +public interface CloudSpannerJdbcConnection extends Connection { + + /** + * Returns the {@link DatabaseId} of the database that this {@link Connection} is connected to. + */ + default DatabaseId getDatabaseId() { + throw new UnsupportedOperationException(); + } + + /** + * Returns the underlying {@link DatabaseClient} that is used by this connection. Operations that + * are executed on the {@link DatabaseClient} that is returned has no impact on this {@link + * Connection}, e.g. starting a read/write transaction on the {@link DatabaseClient} will not + * start a transaction on this connection. + */ + default DatabaseClient getDatabaseClient() { + throw new UnsupportedOperationException(); + } + + /** Returns the underlying {@link Spanner} instance that is used by this connection. */ + default Spanner getSpanner() { + throw new UnsupportedOperationException(); + } + + /** + * Sets the transaction tag to use for the current transaction. This method may only be called + * when in a transaction, and before the transaction is actually started, i.e. before any + * statements have been executed in the transaction. + * + *

The tag will be set as the transaction tag of all statements during the transaction, and as + * the transaction tag of the commit. + * + *

The transaction tag will automatically be cleared after the transaction has ended. + * + * @param tag The tag to use. + */ + default void setTransactionTag(String tag) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * @return The transaction tag of the current transaction. + */ + default String getTransactionTag() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets the statement tag to use for the next statement that will be executed. The tag is + * automatically cleared after the statement is executed. Statement tags can be used both with + * autocommit=true and autocommit=false, and can be used for partitioned DML. + * + * @param tag The statement tag to use with the next statement that will be executed on this + * connection. + */ + default void setStatementTag(String tag) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * @return The statement tag that will be used with the next statement that is executed on this + * connection. + */ + default String getStatementTag() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets the transaction mode to use for current transaction. This method may only be called when + * in a transaction, and before the transaction is actually started, i.e. before any statements + * have been executed in the transaction. + * + * @param transactionMode The transaction mode to use for the current transaction. + *

    + *
  • {@link TransactionMode#READ_ONLY_TRANSACTION} will create a read-only transaction and + * prevent any changes to written to the database through this transaction. The read + * timestamp to be used will be determined based on the current readOnlyStaleness + * setting of this connection. It is recommended to use {@link + * TransactionMode#READ_ONLY_TRANSACTION} instead of {@link + * TransactionMode#READ_WRITE_TRANSACTION} when possible, as read-only transactions do + * not acquire locks on Cloud Spanner, and read-only transactions never abort. + *
  • {@link TransactionMode#READ_WRITE_TRANSACTION} this value is only allowed when the + * connection is not in read-only mode and will create a read-write transaction. If + * {@link CloudSpannerJdbcConnection#isRetryAbortsInternally()} is true, + * each read/write transaction will keep track of a running SHA256 checksum for each + * {@link ResultSet} that is returned in order to be able to retry the transaction in + * case the transaction is aborted by Spanner. + *
+ */ + void setTransactionMode(TransactionMode transactionMode) throws SQLException; + + /** + * @return the transaction mode of the current transaction. This method may only be called when + * the connection is in a transaction. + */ + TransactionMode getTransactionMode() throws SQLException; + + /** + * Sets the mode for executing DML statements in autocommit mode for this connection. This setting + * is only used when the connection is in autocommit mode, and may only be set while the + * transaction is in autocommit mode and not in a temporary transaction. The autocommit + * transaction mode is reset to its default value of {@link AutocommitDmlMode#TRANSACTIONAL} when + * autocommit mode is changed on the connection. + * + * @param mode The DML autocommit mode to use + *
    + *
  • {@link AutocommitDmlMode#TRANSACTIONAL} DML statements are executed as single + * read-write transaction. After successful execution, the DML statement is guaranteed + * to have been applied exactly once to the database + *
  • {@link AutocommitDmlMode#PARTITIONED_NON_ATOMIC} DML statements are executed as + * partitioned DML transactions. If an error occurs during the execution of the DML + * statement, it is possible that the statement has been applied to some but not all of + * the rows specified in the statement. + *
+ */ + void setAutocommitDmlMode(AutocommitDmlMode mode) throws SQLException; + + /** + * @return the current {@link AutocommitDmlMode} setting for this connection. This method may only + * be called on a connection that is in autocommit mode and not while in a temporary + * transaction. + */ + AutocommitDmlMode getAutocommitDmlMode() throws SQLException; + + /** + * Sets the staleness to use for the current read-only transaction. This method may only be called + * when the transaction mode of the current transaction is {@link + * TransactionMode#READ_ONLY_TRANSACTION} and there is no transaction that has started, or when + * the connection is in read-only and autocommit mode. + * + * @param staleness The staleness to use for the current but not yet started read-only transaction + */ + void setReadOnlyStaleness(TimestampBound staleness) throws SQLException; + + /** + * @return the read-only staleness setting for the current read-only transaction. This method may + * only be called when the current transaction is a read-only transaction, or when the + * connection is in read-only and autocommit mode. + */ + TimestampBound getReadOnlyStaleness() throws SQLException; + + /** + * Sets the query optimizer version to use for this connection. + * + * @param optimizerVersion The query optimizer version to use. Must be a valid optimizer version + * number, the string LATEST or an empty string. The empty string will instruct + * the connection to use the optimizer version that is defined in the environment variable + * SPANNER_OPTIMIZER_VERSION. If no value is specified in the environment + * variable, the default query optimizer of Cloud Spanner is used. + */ + void setOptimizerVersion(String optimizerVersion) throws SQLException; + + /** + * Gets the current query optimizer version of this connection. + * + * @return The query optimizer version that is currently used by this connection. + */ + String getOptimizerVersion() throws SQLException; + + /** + * @return true if this connection has a transaction (that has not necessarily + * started). This method will only return false when the {@link Connection} is in autocommit + * mode and no explicit transaction has been started by executing `BEGIN TRANSACTION`. If the + * {@link Connection} is not in autocommit mode, there will always be a transaction. + */ + boolean isInTransaction() throws SQLException; + + /** + * @return true if this connection has a transaction that has started. A transaction + * is automatically started by the first statement that is executed in the transaction. + */ + boolean isTransactionStarted() throws SQLException; + + /** + * @return the commit {@link Timestamp} of the last read/write transaction. If the last + * transaction was not a read/write transaction, or a read/write transaction that did not + * return a commit timestamp because the transaction was not committed, the method will throw + * a {@link SQLException}. + */ + Timestamp getCommitTimestamp() throws SQLException; + + /** + * @return the {@link CommitResponse} of the last read/write transaction. If the last transaction + * was not a read/write transaction, or a read/write transaction that did not return a {@link + * CommitResponse} because the transaction was not committed, the method will throw a {@link + * SQLException}. The {@link CommitResponse} will include {@link CommitStats} if {@link + * #isReturnCommitStats()} returns true. + */ + CommitResponse getCommitResponse() throws SQLException; + + /** + * Sets whether this connection should request commit statistics from Cloud Spanner for read/write + * transactions and for DML statements in autocommit mode. + */ + void setReturnCommitStats(boolean returnCommitStats) throws SQLException; + + /** + * @return true if this connection requests commit statistics from Cloud Spanner. + */ + boolean isReturnCommitStats() throws SQLException; + + /** + * @return the read {@link Timestamp} of the last read-only transaction. If the last transaction + * was not a read-only transaction, or a read-only transaction that did not return a read + * timestamp because no data was read, the method will throw a {@link SQLException}. + */ + Timestamp getReadTimestamp() throws SQLException; + + /** + * @return true if this connection will automatically retry read/write transactions + * that abort. This method may only be called when the connection is in read/write + * transactional mode and no transaction has been started yet. + */ + boolean isRetryAbortsInternally() throws SQLException; + + /** + * Sets whether this connection will internally retry read/write transactions that abort. The + * default is true. When internal retry is enabled, the {@link Connection} will keep + * track of a running SHA256 checksum of all {@link ResultSet}s that have been returned from Cloud + * Spanner. If the checksum that is calculated during an internal retry differs from the original + * checksum, the transaction will abort with an {@link + * AbortedDueToConcurrentModificationException}. + * + *

Note that retries of a read/write transaction that calls a non-deterministic function on + * Cloud Spanner, such as CURRENT_TIMESTAMP(), will never be successful, as the data returned + * during the retry will always be different from the original transaction. + * + *

It is also highly recommended that all queries in a read/write transaction have an ORDER BY + * clause that guarantees that the data is returned in the same order as in the original + * transaction if the transaction is internally retried. The most efficient way to achieve this is + * to always include the primary key columns at the end of the ORDER BY clause. + * + *

This method may only be called when the connection is in read/write transactional mode and + * no transaction has been started yet. + * + * @param retryAbortsInternally Set to true to internally retry transactions that are + * aborted by Spanner. When set to false, any database call on a transaction that + * has been aborted by Cloud Spanner will throw an {@link AbortedException} instead of being + * retried. Set this to false if your application already uses retry loops to handle {@link + * AbortedException}s. + */ + void setRetryAbortsInternally(boolean retryAbortsInternally) throws SQLException; + + /** Returns the current savepoint support for this connection. */ + SavepointSupport getSavepointSupport() throws SQLException; + + /** Sets how savepoints should be supported on this connection. */ + void setSavepointSupport(SavepointSupport savepointSupport) throws SQLException; + + /** + * Writes the specified mutation directly to the database and commits the change. The value is + * readable after the successful completion of this method. Writing multiple mutations to a + * database by calling this method multiple times mode is inefficient, as each call will need a + * round trip to the database. Instead, you should consider writing the mutations together by + * calling {@link CloudSpannerJdbcConnection#write(Iterable)}. + * + *

Calling this method is only allowed in autocommit mode. See {@link + * CloudSpannerJdbcConnection#bufferedWrite(Iterable)} for writing mutations in transactions. + * + * @param mutation The {@link Mutation} to write to the database. + * @throws SQLException if the {@link Connection} is not in autocommit mode or if the {@link + * Connection} is closed. + */ + void write(Mutation mutation) throws SQLException; + + /** + * Writes the specified mutations directly to the database and commits the changes. The values are + * readable after the successful completion of this method. + * + *

Calling this method is only allowed in autocommit mode. See {@link + * CloudSpannerJdbcConnection#bufferedWrite(Iterable)} for writing mutations in transactions. + * + * @param mutations The {@link Mutation}s to write to the database. + * @throws SQLException if the {@link Connection} is not in autocommit mode or if the {@link + * Connection} is closed. + */ + void write(Iterable mutations) throws SQLException; + + /** + * Buffers the given mutation locally on the current transaction of this {@link Connection}. The + * mutation will be written to the database at the next call to {@link Connection#commit()}. The + * value will not be readable on this {@link Connection} before the transaction is committed. + * + *

Calling this method is only allowed when not in autocommit mode. See {@link + * CloudSpannerJdbcConnection#write(Mutation)} for writing mutations in autocommit mode. + * + * @param mutation the {@link Mutation} to buffer for writing to the database on the next commit. + * @throws SQLException if the {@link Connection} is in autocommit mode or the {@link Connection} + * is closed. + */ + void bufferedWrite(Mutation mutation) throws SQLException; + + /** + * Buffers the given mutations locally on the current transaction of this {@link Connection}. The + * mutations will be written to the database at the next call to {@link Connection#commit()}. The + * values will not be readable on this {@link Connection} before the transaction is committed. + * + *

Calling this method is only allowed when not in autocommit mode. See {@link + * CloudSpannerJdbcConnection#write(Iterable)} for writing mutations in autocommit mode. + * + * @param mutations the {@link Mutation}s to buffer for writing to the database on the next + * commit. + * @throws SQLException if the {@link Connection} is in autocommit mode or the {@link Connection} + * is closed. + */ + void bufferedWrite(Iterable mutations) throws SQLException; + + /** + * @return a connection URL that can be used to create a new {@link Connection} that is equal to + * the initial state of this connection. If this connection was initially opened in read-only + * mode, and later changed to read-write, this will not be reflected in the connection URL + * that is returned. + */ + String getConnectionUrl(); + + /** + * @return The {@link Dialect} that is used by this connection. + */ + default Dialect getDialect() { + return Dialect.GOOGLE_STANDARD_SQL; + } + + /** + * Enable data boost for partitioned queries. See also {@link + * CloudSpannerJdbcStatement#partitionQuery(String, PartitionOptions, QueryOption...)} and {@link + * CloudSpannerJdbcPreparedStatement#partitionQuery(PartitionOptions, QueryOption...)}. + */ + default void setDataBoostEnabled(boolean dataBoostEnabled) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Returns whether data boost is enabled for partitioned queries. See also {@link + * CloudSpannerJdbcStatement#partitionQuery(String, PartitionOptions, QueryOption...)} and {@link + * CloudSpannerJdbcPreparedStatement#partitionQuery(PartitionOptions, QueryOption...)}. + */ + default boolean isDataBoostEnabled() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets whether this connection should always use partitioned queries when a query is executed on + * this connection. Setting this flag to true and then executing a query that cannot + * be partitioned, or executing a query in a read/write transaction, will cause an error. Use this + * flag in combination with {@link #setDataBoostEnabled(boolean)} to force all queries on this + * connection to use data boost. + */ + default void setAutoPartitionMode(boolean alwaysUsePartitionedQueries) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** Returns whether this connection will execute all queries as partitioned queries. */ + default boolean isAutoPartitionMode() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets the maximum number of partitions that should be included as a hint to Cloud Spanner when + * partitioning a query on this connection. Note that this is only a hint and Cloud Spanner might + * choose to ignore the hint. + */ + default void setMaxPartitions(int maxPartitions) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Gets the maximum number of partitions that should be included as a hint to Cloud Spanner when + * partitioning a query on this connection. Note that this is only a hint and Cloud Spanner might + * choose to ignore the hint. + */ + default int getMaxPartitions() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets the maximum degree of parallelism that is used when executing a partitioned query. A + * partitioned query will use up to maxThreads to execute and retrieve the results + * from Cloud Spanner. Set this value to 0> to use the number of available processors + * as returned by {@link Runtime#availableProcessors()}. + */ + default void setMaxPartitionedParallelism(int maxThreads) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** Returns the maximum degree of parallelism that is used for partitioned queries. */ + default int getMaxPartitionedParallelism() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Enables or disables automatic batching of DML statements. When enabled, DML statements that are + * executed on this connection will be buffered in memory instead of actually being executed. The + * buffered DML statements are flushed to Spanner when a statement that cannot be part of a DML + * batch is executed on the connection. This can be a query, a DDL statement with a THEN RETURN + * clause, or a Commit call. The update count that is returned for DML statements that are + * buffered is determined by the value that has been set with {@link + * #setAutoBatchDmlUpdateCount(long)}. The default is 1. The connection verifies that the update + * counts that were returned while buffering DML statements match the actual update counts that + * are returned by Spanner when the batch is executed. This verification can be disabled by + * calling {@link #setAutoBatchDmlUpdateCountVerification(boolean)}. + */ + default void setAutoBatchDml(boolean autoBatchDml) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** Returns whether automatic DML batching is enabled on this connection. */ + default boolean isAutoBatchDml() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets the update count that is returned for DML statements that are buffered during an automatic + * DML batch. This value is only used if {@link #isAutoBatchDml()} is enabled. + */ + default void setAutoBatchDmlUpdateCount(long updateCount) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Returns the update count that is returned for DML statements that are buffered during an + * automatic DML batch. + */ + default long getAutoBatchDmlUpdateCount() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets whether the update count that is returned by Spanner after executing an automatic DML + * batch should be verified against the update counts that were returned during the buffering of + * those statements. + */ + default void setAutoBatchDmlUpdateCountVerification(boolean verification) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** Indicates whether the update counts of automatic DML batches should be verified. */ + default boolean isAutoBatchDmlUpdateCountVerification() throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * @see + * com.google.cloud.spanner.connection.Connection#addTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener) + * @throws SQLException if the {@link Connection} is closed. + */ + void addTransactionRetryListener( + com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException; + + /** + * Use {@link + * #addTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener)} + * instead. + */ + @Deprecated + void addTransactionRetryListener(com.google.cloud.spanner.jdbc.TransactionRetryListener listener) + throws SQLException; + + /** + * @see + * com.google.cloud.spanner.connection.Connection#removeTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener) + * @throws SQLException if the {@link Connection} is closed. + */ + boolean removeTransactionRetryListener( + com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException; + + /** + * Use {@link + * #removeTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener)} + * instead. + */ + @Deprecated + boolean removeTransactionRetryListener( + com.google.cloud.spanner.jdbc.TransactionRetryListener listener) throws SQLException; + + /** Use {@link #getTransactionRetryListenersFromConnection()} instead. */ + @Deprecated + Iterator getTransactionRetryListeners() + throws SQLException; + + /** + * @see com.google.cloud.spanner.connection.Connection#getTransactionRetryListeners() + * @throws SQLException if the {@link Connection} is closed. + */ + Iterator + getTransactionRetryListenersFromConnection() throws SQLException; + + /** + * Sets the proto descriptors to use for the next DDL statement (single or batch) that will be + * executed. The proto descriptor is automatically cleared after the statement is executed. + * + * @param protoDescriptors The proto descriptors to use with the next DDL statement (single or + * batch) that will be executed on this connection. + */ + default void setProtoDescriptors(@Nonnull byte[] protoDescriptors) throws SQLException { + throw new UnsupportedOperationException(); + } + + /** + * Sets the proto descriptors to use for the next DDL statement (single or batch) that will be + * executed. The proto descriptor is automatically cleared after the statement is executed. + * + * @param protoDescriptors The proto descriptors to use with the next DDL statement (single or + * batch) that will be executed on this connection. + */ + default void setProtoDescriptors(@Nonnull InputStream protoDescriptors) + throws SQLException, IOException { + throw new UnsupportedOperationException(); + } + + /** + * @return The proto descriptor that will be used with the next DDL statement (single or batch) + * that is executed on this connection. + */ + default byte[] getProtoDescriptors() throws SQLException { + throw new UnsupportedOperationException(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPartitionedQueryResultSet.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPartitionedQueryResultSet.java new file mode 100644 index 000000000000..3b6118fba906 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPartitionedQueryResultSet.java @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.PartitionOptions; +import java.sql.ResultSet; + +/** + * Result set that is returned for partitioned queries, e.g. for 'run partitioned query select ...' + * or for {@link CloudSpannerJdbcPreparedStatement#runPartitionedQuery(PartitionOptions, + * QueryOption...)}. + */ +public interface CloudSpannerJdbcPartitionedQueryResultSet extends ResultSet { + /** Returns the number of partitions that this result set contains. */ + int getNumPartitions(); + + /** Returns the degree of parallelism that this result set uses. */ + int getParallelism(); +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPreparedStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPreparedStatement.java new file mode 100644 index 000000000000..8e72d9556b0c --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPreparedStatement.java @@ -0,0 +1,61 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.PartitionOptions; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +/** + * This interface is implemented by {@link PreparedStatement}s that are created on Cloud Spanner + * JDBC connections. + */ +public interface CloudSpannerJdbcPreparedStatement extends PreparedStatement { + + /** + * Partitions this query, so it can be executed in parallel. This method returns a {@link + * ResultSet} with a string-representation of the partitions that were created. These strings can + * be used to execute a partition either on this connection or an any other connection (on this + * host or an any other host) by calling the method {@link #runPartition()}. This method will + * automatically enable data boost for the query if {@link + * CloudSpannerJdbcConnection#isDataBoostEnabled()} returns true. + */ + ResultSet partitionQuery(PartitionOptions partitionOptions, QueryOption... options) + throws SQLException; + + /** + * Executes the given partition of a query. The partition that should be executed must be set as a + * string parameter on this {@link PreparedStatement} using {@link #setString(int, String)}. The + * value should be a string that was returned by {@link #partitionQuery(PartitionOptions, + * QueryOption...)}. + */ + ResultSet runPartition() throws SQLException; + + /** + * Executes the given query as a partitioned query. The query will first be partitioned using the + * {@link #partitionQuery(PartitionOptions, QueryOption...)} method. Each of the partitions will + * then be executed in the background, and the results will be merged into a single result set. + * + *

This method will use {@link CloudSpannerJdbcConnection#getMaxPartitionedParallelism()} + * threads to execute the partitioned query. Set this variable to a higher/lower value to + * increase/decrease the degree of parallelism used for execution. + */ + CloudSpannerJdbcPartitionedQueryResultSet runPartitionedQuery( + PartitionOptions partitionOptions, QueryOption... options) throws SQLException; +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcStatement.java new file mode 100644 index 000000000000..6115c0b23812 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcStatement.java @@ -0,0 +1,60 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.PartitionOptions; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +/** + * This interface is implemented by {@link Statement}s that are created on Cloud Spanner JDBC + * connections. + */ +public interface CloudSpannerJdbcStatement extends Statement { + + /** + * Partitions the given query, so it can be executed in parallel. This method returns a {@link + * ResultSet} with a string-representation of the partitions that were created. These strings can + * be used to execute a partition either on this connection or an any other connection (on this + * host or an any other host) by calling the method {@link #runPartition(String)}. This method + * will automatically enable data boost for the query if {@link + * CloudSpannerJdbcConnection#isDataBoostEnabled()} returns true. + */ + ResultSet partitionQuery(String query, PartitionOptions partitionOptions, QueryOption... options) + throws SQLException; + + /** + * Executes the given partition of a query. The encodedPartitionId should be a string that was + * returned by {@link #partitionQuery(String, PartitionOptions, QueryOption...)}. + */ + ResultSet runPartition(String encodedPartitionId) throws SQLException; + + /** + * Executes the given query as a partitioned query. The query will first be partitioned using the + * {@link #partitionQuery(String, PartitionOptions, QueryOption...)} method. Each of the + * partitions will then be executed in the background, and the results will be merged into a + * single result set. + * + *

This method will use {@link CloudSpannerJdbcConnection#getMaxPartitionedParallelism()} + * threads to execute the partitioned query. Set this variable to a higher/lower value to + * increase/decrease the degree of parallelism used for execution. + */ + CloudSpannerJdbcPartitionedQueryResultSet runPartitionedQuery( + String query, PartitionOptions partitionOptions, QueryOption... options) throws SQLException; +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesHelper.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesHelper.java new file mode 100644 index 000000000000..4f8db86b0bfb --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesHelper.java @@ -0,0 +1,72 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.connection.ConnectionProperties; +import com.google.cloud.spanner.connection.ConnectionProperty; +import com.google.common.collect.ImmutableList; +import java.sql.DriverPropertyInfo; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +class ConnectionPropertiesHelper { + static ImmutableList> VALID_CONNECTION_PROPERTIES = + ImmutableList.copyOf( + ConnectionProperties.VALID_CONNECTION_PROPERTIES.stream() + .sorted(Comparator.comparing(ConnectionProperty::getName)) + .collect(Collectors.toList())); + + static DriverPropertyInfo toDriverPropertyInfo( + String connectionUri, ConnectionProperty connectionProperty) { + DriverPropertyInfo result = + new DriverPropertyInfo( + connectionProperty.getName(), + parseUriProperty( + connectionUri, + connectionProperty.getName(), + connectionProperty.getDefaultValue() == null + ? null + : connectionProperty.getDefaultValue().toString())); + result.description = connectionProperty.getDescription(); + result.choices = + connectionProperty.getValidValues() == null + ? null + : Arrays.stream(connectionProperty.getValidValues()) + .map(Objects::toString) + .toArray(String[]::new); + return result; + } + + static String getConnectionPropertyName(ConnectionProperty connectionProperty) { + return connectionProperty.getName(); + } + + private static String parseUriProperty(String uri, String property, String defaultValue) { + Pattern pattern = Pattern.compile(String.format("(?is)(?:;|\\?)%s=(.*?)(?:;|$)", property)); + Matcher matcher = pattern.matcher(uri); + if (matcher.find() && matcher.groupCount() == 1) { + return matcher.group(1); + } + return defaultValue; + } + + private ConnectionPropertiesHelper() {} +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/IsolationLevelConverter.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/IsolationLevelConverter.java new file mode 100644 index 000000000000..cabf08057734 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/IsolationLevelConverter.java @@ -0,0 +1,54 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; + +class IsolationLevelConverter { + static IsolationLevel convertToSpanner(int jdbcIsolationLevel) throws SQLException { + switch (jdbcIsolationLevel) { + case Connection.TRANSACTION_SERIALIZABLE: + return IsolationLevel.SERIALIZABLE; + case Connection.TRANSACTION_REPEATABLE_READ: + return IsolationLevel.REPEATABLE_READ; + case Connection.TRANSACTION_READ_COMMITTED: + case Connection.TRANSACTION_READ_UNCOMMITTED: + case Connection.TRANSACTION_NONE: + throw new SQLFeatureNotSupportedException( + "Unsupported JDBC isolation level: " + jdbcIsolationLevel); + default: + throw new IllegalArgumentException("Invalid JDBC isolation level: " + jdbcIsolationLevel); + } + } + + static int convertToJdbc(IsolationLevel isolationLevel) { + switch (isolationLevel) { + // Translate UNSPECIFIED to SERIALIZABLE as that is the default isolation level. + case ISOLATION_LEVEL_UNSPECIFIED: + case SERIALIZABLE: + return Connection.TRANSACTION_SERIALIZABLE; + case REPEATABLE_READ: + return Connection.TRANSACTION_REPEATABLE_READ; + default: + throw new IllegalArgumentException( + "Unknown or unsupported isolation level: " + isolationLevel); + } + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcArray.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcArray.java new file mode 100644 index 000000000000..3cb523186517 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcArray.java @@ -0,0 +1,393 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.ValueBinder; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Message; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.rpc.Code; +import java.math.BigDecimal; +import java.sql.Array; +import java.sql.Date; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +/** Implementation of java.sql.Array for Google Cloud Spanner */ +class JdbcArray implements Array { + private static final String FREE_EXCEPTION = + "free() has been called, array is no longer available"; + + private final JdbcDataType type; + private Object data; + private boolean freed = false; + + /** + * Create a JDBC {@link Array} from the given type name and array elements. + * + * @param typeName The Google Cloud Spanner type name to be used as the base type of the array. + * @param elements The elements to store in the array. + * @return the initialized {@link Array}. + * @throws SQLException if the type name is not a valid Cloud Spanner type or if the contents of + * the elements array is not compatible with the base type of the array. + */ + static JdbcArray createArray(String typeName, Object[] elements) throws SQLException { + if (typeName != null) { + for (JdbcDataType type : JdbcDataType.values()) { + if (type.matches(typeName)) { + return new JdbcArray(type, elements); + } + } + } + throw JdbcSqlExceptionFactory.of( + "Data type " + typeName + " is unknown", Code.INVALID_ARGUMENT); + } + + /** + * Create a JDBC {@link Array} from the given type name and list. + * + * @param type The Google Cloud Spanner type to be used as the base type of the array. + * @param elements The elements to store in the array. + * @return the initialized {@link Array}. + */ + static JdbcArray createArray(JdbcDataType type, List elements) { + return new JdbcArray(type, elements); + } + + private JdbcArray(JdbcDataType type, Object[] elements) throws SQLException { + this.type = type; + if (elements != null) { + if ((type.getCode() == Type.Code.PROTO + && AbstractMessage[].class.isAssignableFrom(elements.getClass())) + || (type.getCode() == Type.Code.ENUM + && ProtocolMessageEnum[].class.isAssignableFrom(elements.getClass()))) { + this.data = + java.lang.reflect.Array.newInstance( + elements.getClass().getComponentType(), elements.length); + System.arraycopy(elements, 0, this.data, 0, elements.length); + } else if (type == JdbcDataType.INT64 && requiresWideningToLong(elements)) { + // Convert Byte[], Short[], and Integer[] to Long[] for INT64 type + // since Spanner only supports ARRAY + this.data = convertToLongArray(elements); + } else { + this.data = java.lang.reflect.Array.newInstance(type.getJavaClass(), elements.length); + try { + System.arraycopy(elements, 0, this.data, 0, elements.length); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + "Could not copy array elements. Make sure the supplied array only contains elements of class " + + type.getJavaClass().getName(), + Code.UNKNOWN, + e); + } + } + } + } + + private static boolean requiresWideningToLong(Object[] elements) { + Class componentType = elements.getClass().getComponentType(); + return componentType == Byte.class + || componentType == Short.class + || componentType == Integer.class; + } + + private static Long[] convertToLongArray(Object[] elements) { + Long[] longElements = new Long[elements.length]; + for (int i = 0; i < elements.length; i++) { + if (elements[i] != null) { + longElements[i] = ((Number) elements[i]).longValue(); + } + } + return longElements; + } + + private JdbcArray(JdbcDataType type, List elements) { + this.type = type; + if (elements != null) { + this.data = java.lang.reflect.Array.newInstance(type.getJavaClass(), elements.size()); + elements.toArray((Object[]) data); + } + } + + private void checkFree() throws SQLException { + if (freed) { + throw JdbcSqlExceptionFactory.of(FREE_EXCEPTION, Code.FAILED_PRECONDITION); + } + } + + @Override + public String getBaseTypeName() throws SQLException { + checkFree(); + return type.getTypeName(); + } + + @Override + public int getBaseType() throws SQLException { + checkFree(); + return type.getSqlType(); + } + + @Override + public Object getArray() throws SQLException { + checkFree(); + return data; + } + + @Override + public Object getArray(Map> map) throws SQLException { + checkFree(); + return data; + } + + @Override + public Object getArray(long index, int count) throws SQLException { + checkFree(); + return getArray(index, count, null); + } + + @Override + public Object getArray(long index, int count, Map> map) throws SQLException { + checkFree(); + if (this.data != null) { + Object res; + if ((this.type.getCode() == Type.Code.PROTO + && AbstractMessage[].class.isAssignableFrom(this.data.getClass())) + || (this.type.getCode() == Type.Code.ENUM + && ProtocolMessageEnum[].class.isAssignableFrom(this.data.getClass()))) { + res = java.lang.reflect.Array.newInstance(this.data.getClass().getComponentType(), count); + } else { + res = java.lang.reflect.Array.newInstance(this.type.getJavaClass(), count); + } + System.arraycopy(this.data, (int) index - 1, res, 0, count); + return res; + } + return null; + } + + private static final String RESULTSET_WITH_TYPE_MAPPING_NOT_SUPPORTED = + "Getting a ResultSet with a custom type mapping from an array is not supported"; + + @Override + public ResultSet getResultSet() throws SQLException { + return getResultSet(1L, Integer.MAX_VALUE); + } + + @Override + public ResultSet getResultSet(Map> map) throws SQLException { + throw new SQLFeatureNotSupportedException(RESULTSET_WITH_TYPE_MAPPING_NOT_SUPPORTED); + } + + @Override + public ResultSet getResultSet(long startIndex, int count) throws SQLException { + JdbcPreconditions.checkArgument( + startIndex + count - 1L <= Integer.MAX_VALUE, + String.format("End index cannot exceed %d", Integer.MAX_VALUE)); + JdbcPreconditions.checkArgument(startIndex >= 1L, "Start index must be >= 1"); + JdbcPreconditions.checkArgument(count >= 0, "Count must be >= 0"); + checkFree(); + Type spannerTypeForProto = getSpannerTypeForProto(); + Type spannerType = + spannerTypeForProto == null ? this.type.getSpannerType() : spannerTypeForProto; + + ImmutableList.Builder rows = ImmutableList.builder(); + int added = 0; + if (this.data != null) { + // Note that array index in JDBC is base-one. + for (int index = (int) startIndex; + added < count && index <= ((Object[]) this.data).length; + index++) { + Object value = ((Object[]) this.data)[index - 1]; + ValueBinder binder = + Struct.newBuilder().set("INDEX").to(index).set("VALUE"); + Struct.Builder builder; + switch (this.type.getCode()) { + case BOOL: + builder = binder.to((Boolean) value); + break; + case BYTES: + builder = binder.to(ByteArray.copyFrom((byte[]) value)); + break; + case PROTO: + if (value == null && AbstractMessage[].class.isAssignableFrom(this.data.getClass())) { + builder = binder.to((ByteArray) null, spannerType.getProtoTypeFqn()); + } else if (value instanceof AbstractMessage) { + builder = binder.to((AbstractMessage) value); + } else { + builder = binder.to(value != null ? ByteArray.copyFrom((byte[]) value) : null); + } + break; + case DATE: + builder = binder.to(JdbcTypeConverter.toGoogleDate((Date) value)); + break; + case FLOAT32: + builder = binder.to((Float) value); + break; + case FLOAT64: + builder = binder.to((Double) value); + break; + case INT64: + builder = binder.to((Long) value); + break; + case ENUM: + if (value == null + && ProtocolMessageEnum[].class.isAssignableFrom(this.data.getClass())) { + builder = binder.to((Long) null, spannerType.getProtoTypeFqn()); + } else if (value instanceof ProtocolMessageEnum) { + builder = binder.to((ProtocolMessageEnum) value); + } else { + builder = binder.to((Long) value); + } + break; + case NUMERIC: + builder = binder.to((BigDecimal) value); + break; + case STRING: + builder = binder.to((String) value); + break; + case JSON: + builder = binder.to(Value.json((String) value)); + break; + case PG_JSONB: + builder = binder.to(Value.pgJsonb((String) value)); + break; + case TIMESTAMP: + builder = binder.to(JdbcTypeConverter.toGoogleTimestamp((Timestamp) value)); + break; + case ARRAY: + case STRUCT: + default: + throw new SQLFeatureNotSupportedException( + String.format( + "Array of type %s cannot be converted to a ResultSet", + this.type.getCode().name())); + } + rows.add(builder.build()); + added++; + if (added == count) { + break; + } + } + } + + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("INDEX", Type.int64()), StructField.of("VALUE", spannerType)), + rows.build())); + } + + // Returns null if the type is not a PROTO or ENUM + private Type getSpannerTypeForProto() throws SQLException { + Type spannerType = null; + if (this.data != null) { + if (this.type.getCode() == Type.Code.PROTO + && AbstractMessage[].class.isAssignableFrom(this.data.getClass())) { + spannerType = createSpannerProtoType(); + } else if (this.type.getCode() == Type.Code.ENUM + && ProtocolMessageEnum[].class.isAssignableFrom(this.data.getClass())) { + spannerType = createSpannerProtoEnumType(); + } + } + return spannerType; + } + + private Type createSpannerProtoType() throws SQLException { + Class componentType = this.data.getClass().getComponentType(); + try { + Message.Builder builder = + (Message.Builder) componentType.getMethod("newBuilder").invoke(null); + Descriptor msgDescriptor = builder.getDescriptorForType(); + return Type.proto(msgDescriptor.getFullName()); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + "Error occurred when getting proto message descriptor from data", Code.UNKNOWN, e); + } + } + + private Type createSpannerProtoEnumType() throws SQLException { + Class componentType = this.data.getClass().getComponentType(); + try { + Descriptors.EnumDescriptor enumDescriptor = + (Descriptors.EnumDescriptor) componentType.getMethod("getDescriptor").invoke(null); + return Type.protoEnum(enumDescriptor.getFullName()); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + "Error occurred when getting proto enum descriptor from data", Code.UNKNOWN, e); + } + } + + @Override + public ResultSet getResultSet(long index, int count, Map> map) + throws SQLException { + throw new SQLFeatureNotSupportedException(RESULTSET_WITH_TYPE_MAPPING_NOT_SUPPORTED); + } + + @Override + public void free() { + this.freed = true; + this.data = null; + } + + @Override + public String toString() { + if (data == null) { + return "null"; + } + boolean first = true; + StringBuilder builder = new StringBuilder("{"); + for (Object o : (Object[]) data) { + if (!first) { + builder.append(","); + } + first = false; + if (o == null) { + builder.append("null"); + } else { + builder.append(o); + } + } + builder.append("}"); + return builder.toString(); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof JdbcArray)) return false; + JdbcArray array = (JdbcArray) other; + return this.type == array.type + && Arrays.deepEquals((Object[]) this.data, (Object[]) array.data); + } + + @Override + public int hashCode() { + return this.type.hashCode() ^ Arrays.deepHashCode((Object[]) data); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcBlob.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcBlob.java new file mode 100644 index 000000000000..5a1566f7e4f9 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcBlob.java @@ -0,0 +1,271 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.common.base.Preconditions; +import com.google.rpc.Code; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.Blob; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * Simple {@link Blob} implementation for Google Cloud Spanner. The value is mapped to a byte array + * in memory. The {@link Blob} data type can be used in combination with the BYTES Cloud Spanner + * data type. + */ +class JdbcBlob implements Blob { + private byte[] value = new byte[0]; + + /** Creates an empty blob. */ + JdbcBlob() {} + + /** Creates a blob with the specified bytes as its value. */ + JdbcBlob(byte[] value) { + this.value = value; + } + + private void checkPosition(long pos) { + Preconditions.checkArgument( + pos + 1 <= Integer.MAX_VALUE, + "position larger than " + Integer.MAX_VALUE + " is not supported"); + } + + private void checkLength(long length) { + Preconditions.checkArgument( + length <= Integer.MAX_VALUE, + "length larger than " + Integer.MAX_VALUE + " is not supported"); + } + + private void checkPositionPlusLength(long pos, long length) { + Preconditions.checkArgument( + pos + 1 + length <= Integer.MAX_VALUE, + "position+length larger than " + Integer.MAX_VALUE + " is not supported"); + } + + @Override + public long length() { + return value.length; + } + + @Override + public byte[] getBytes(long pos, int length) throws SQLException { + JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1"); + JdbcPreconditions.checkArgument(length >= 0, "length must be >= 0"); + checkPosition(pos); + checkPositionPlusLength(pos, length); + int end = (int) pos + length - 1; + int blobLength = (int) length(); + if (end > blobLength) { + length = blobLength - (int) pos + 1; + } + byte[] res = new byte[length]; + System.arraycopy(value, (int) pos - 1, res, 0, length); + return res; + } + + @Override + public InputStream getBinaryStream() { + return new ByteArrayInputStream(value); + } + + @Override + public long position(byte[] pattern, long start) throws SQLException { + JdbcPreconditions.checkArgument( + pattern != null && pattern.length > 0, "pattern must not be empty"); + JdbcPreconditions.checkArgument(start > 0L, "start must be >= 1"); + checkPosition(start); + for (int outerIndex = (int) start - 1; outerIndex < value.length; outerIndex++) { + int innerIndex = 0; + int valueIndex = outerIndex; + while (valueIndex < value.length + && innerIndex < pattern.length + && value[valueIndex] == pattern[innerIndex]) { + innerIndex++; + valueIndex++; + } + if (innerIndex == pattern.length) { + return outerIndex + 1; + } + } + return -1; + } + + @Override + public long position(Blob pattern, long start) throws SQLException { + JdbcPreconditions.checkArgument(pattern != null, "pattern must not be empty"); + JdbcPreconditions.checkArgument(start > 0L, "start must be >= 1"); + checkPosition(start); + byte[] buffer = new byte[1024]; + int totalSize = 0; + List totalBytes = new ArrayList<>(); + try (InputStream is = pattern.getBinaryStream()) { + int bytesRead; + while ((bytesRead = is.read(buffer)) > -1) { + if (bytesRead == buffer.length) { + totalBytes.add(buffer); + } else { + byte[] dest = new byte[bytesRead]; + System.arraycopy(buffer, 0, dest, 0, bytesRead); + totalBytes.add(dest); + } + totalSize += bytesRead; + buffer = new byte[1024]; + } + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of("reading blob failed", Code.UNKNOWN, e); + } + byte[] bytePattern = new byte[totalSize]; + int index = 0; + for (byte[] b : totalBytes) { + System.arraycopy(b, 0, bytePattern, index, b.length); + index += b.length; + } + return position(bytePattern, start); + } + + private void setLength(int length) { + int prevLength = value.length; + byte[] newValue = new byte[length]; + System.arraycopy(value, 0, newValue, 0, Math.min(prevLength, newValue.length)); + value = newValue; + } + + @Override + public int setBytes(long pos, byte[] bytes) throws SQLException { + JdbcPreconditions.checkArgument(bytes != null, "bytes must be non-null"); + JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1"); + checkPosition(pos); + int end = (int) pos + bytes.length - 1; + if (end >= value.length) { + setLength(end); + } + System.arraycopy(bytes, 0, value, (int) pos - 1, bytes.length); + return bytes.length; + } + + @Override + public int setBytes(long pos, byte[] bytes, int offset, int len) throws SQLException { + JdbcPreconditions.checkArgument(bytes != null, "bytes must be non-null"); + JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1"); + JdbcPreconditions.checkArgument(offset >= 0, "offset must be >= 0"); + JdbcPreconditions.checkArgument(len >= 0, "len must be >= 0"); + checkPosition(pos); + if (offset > bytes.length) { + offset = 0; + len = 0; + } else { + if (offset + len > bytes.length) { + len = bytes.length - offset; + } + } + int end = (int) pos + len - 1; + if (end >= value.length) { + setLength(end); + } + System.arraycopy(bytes, offset, value, (int) pos - 1, len); + return len; + } + + private final class BlobOutputStream extends OutputStream { + private byte[] buffer = new byte[1024]; + private int currentPos = 0; + private int blobPosition; + + private BlobOutputStream(int pos) { + blobPosition = pos; + } + + @Override + public void write(int b) { + if (currentPos >= buffer.length) { + byte[] newBuffer = new byte[buffer.length * 2]; + System.arraycopy(buffer, 0, newBuffer, 0, buffer.length); + buffer = newBuffer; + } + buffer[currentPos] = (byte) b; + currentPos++; + } + + @Override + public void flush() throws IOException { + try { + setBytes(blobPosition, buffer, 0, currentPos); + blobPosition += currentPos; + currentPos = 0; + Arrays.fill(buffer, (byte) 0); + } catch (SQLException e) { + throw new IOException(e); + } + } + + @Override + public void close() throws IOException { + flush(); + } + } + + @Override + public OutputStream setBinaryStream(long pos) throws SQLException { + JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1"); + checkPosition(pos); + return new BlobOutputStream((int) pos); + } + + @Override + public void truncate(long len) throws SQLException { + JdbcPreconditions.checkArgument(len >= 0, "len must be >= 0"); + checkLength(len); + setLength((int) len); + } + + @Override + public void free() { + setLength(0); + } + + @Override + public InputStream getBinaryStream(long pos, long length) throws SQLException { + JdbcPreconditions.checkArgument(pos > 0, "pos must be >= 1"); + JdbcPreconditions.checkArgument(length >= 0, "length must be >= 0"); + checkPosition(pos); + checkPositionPlusLength(pos, length); + if (pos + length > value.length) { + length = value.length - pos + 1; + } + byte[] buffer = new byte[(int) length]; + System.arraycopy(value, (int) pos - 1, buffer, 0, (int) length); + return new ByteArrayInputStream(buffer); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof JdbcBlob)) return false; + JdbcBlob blob = (JdbcBlob) other; + return Arrays.equals(this.value, blob.value); + } + + @Override + public int hashCode() { + return Arrays.hashCode(value); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcClob.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcClob.java new file mode 100644 index 000000000000..2939dee1afb9 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcClob.java @@ -0,0 +1,211 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Reader; +import java.io.StringReader; +import java.io.StringWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.sql.Clob; +import java.sql.NClob; +import java.sql.SQLException; + +/** + * Simple implementation of {@link Clob} and {@link NClob} for Google Cloud Spanner. The value is + * mapped to a {@link StringBuilder} in memory. {@link Clob} and {@link NClob} can be used with the + * STRING data type of Cloud Spanner. + */ +class JdbcClob implements NClob { + private StringBuilder value = new StringBuilder(); + + JdbcClob() {} + + JdbcClob(String value) { + this.value.append(value); + } + + private void checkPosition(long pos) { + Preconditions.checkArgument( + pos + 1 <= Integer.MAX_VALUE, + "position larger than " + Integer.MAX_VALUE + " is not supported"); + } + + private void checkLength(long length) { + Preconditions.checkArgument( + length <= Integer.MAX_VALUE, + "length larger than " + Integer.MAX_VALUE + " is not supported"); + } + + private void checkPositionPlusLength(long pos, long length) { + Preconditions.checkArgument( + pos + 1 + length <= Integer.MAX_VALUE, + "position+length larger than " + Integer.MAX_VALUE + " is not supported"); + } + + @Override + public long length() { + return value.length(); + } + + @Override + public String getSubString(long pos, int length) throws SQLException { + JdbcPreconditions.checkArgument(pos >= 1, "Start position must be >= 1"); + JdbcPreconditions.checkArgument(length >= 0, "Length must be >= 0"); + checkPosition(pos); + checkPositionPlusLength(pos, length); + if (pos > length()) { + return ""; + } + int end = (int) pos + length - 1; + if (end >= value.length()) { + end = value.length(); + } + return value.substring((int) pos - 1, end); + } + + @Override + public Reader getCharacterStream() { + return new StringReader(value.toString()); + } + + @Override + public InputStream getAsciiStream() { + return new ByteArrayInputStream(StandardCharsets.US_ASCII.encode(value.toString()).array()); + } + + @Override + public long position(String searchStr, long start) throws SQLException { + JdbcPreconditions.checkArgument(start >= 1, "Start position must be >= 1"); + JdbcPreconditions.checkArgument(searchStr != null, "searchStr may not be null"); + checkPosition(start); + int res = value.indexOf(searchStr, (int) start - 1); + if (res == -1) { + return res; + } + return res + 1; + } + + @Override + public long position(Clob searchStr, long start) throws SQLException { + JdbcPreconditions.checkArgument(start >= 1, "Start position must be >= 1"); + JdbcPreconditions.checkArgument(searchStr != null, "searchStr may not be null"); + checkPosition(start); + checkPositionPlusLength(start, searchStr.length()); + int res = value.indexOf(searchStr.getSubString(1L, (int) searchStr.length()), (int) start - 1); + if (res == -1) { + return res; + } + return res + 1; + } + + @Override + public int setString(long pos, String str) throws SQLException { + JdbcPreconditions.checkArgument(str != null, "str is null"); + JdbcPreconditions.checkArgument(pos >= 1, "Position must be >= 1"); + checkPosition(pos); + int ipos = (int) pos; + if ((ipos - 1) > value.length()) { + value.append(Strings.repeat(" ", ipos - value.length() - 1)); + } + value.replace(ipos - 1, ipos + str.length() - 1, str); + return str.length(); + } + + @Override + public int setString(long pos, String str, int offset, int len) throws SQLException { + JdbcPreconditions.checkArgument(str != null, "str is null"); + JdbcPreconditions.checkArgument(pos >= 1, "Position must be >= 1"); + JdbcPreconditions.checkArgument(offset >= 1, "Offset must be >= 1"); + JdbcPreconditions.checkArgument( + offset + len - 1 <= str.length(), "offset + len is greater than str.length()"); + checkPosition(pos); + return setString(pos, str.substring(offset - 1, offset + len - 1)); + } + + private class ClobWriter extends StringWriter { + private final long startPos; + + private ClobWriter(long startPos) { + this.startPos = startPos; + } + + @Override + public void flush() { + try { + setString(startPos, getBuffer().toString()); + } catch (SQLException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() { + flush(); + } + } + + @Override + public OutputStream setAsciiStream(long pos) throws SQLException { + throw JdbcSqlExceptionFactory.unsupported( + "setAsciiStream is not supported. Use setCharacterStream instead"); + } + + @Override + public Writer setCharacterStream(long pos) throws SQLException { + JdbcPreconditions.checkArgument(pos >= 1, "pos must be >= 1"); + return new ClobWriter(pos); + } + + @Override + public void truncate(long len) throws SQLException { + JdbcPreconditions.checkArgument(len >= 0, "len must be >= 0"); + checkLength(len); + value.setLength((int) len); + } + + @Override + public void free() { + value = new StringBuilder(); + } + + @Override + public Reader getCharacterStream(long pos, long length) throws SQLException { + JdbcPreconditions.checkArgument(pos >= 1, "pos must be >= 1"); + JdbcPreconditions.checkArgument(length >= 0, "length must be >= 0"); + checkPosition(pos); + checkPositionPlusLength(pos, length); + return new StringReader(value.substring((int) pos - 1, (int) pos + (int) length - 1)); + } + + @Override + public boolean equals(Object other) { + if (!(other instanceof JdbcClob)) return false; + JdbcClob blob = (JdbcClob) other; + return value.toString().equals(blob.value.toString()); + } + + @Override + public int hashCode() { + return value.toString().hashCode(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConnection.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConnection.java new file mode 100644 index 000000000000..8041e8d68e3a --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConnection.java @@ -0,0 +1,933 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcStatement.ALL_COLUMNS; +import static com.google.cloud.spanner.jdbc.JdbcStatement.isNullOrEmpty; + +import com.google.api.client.util.Preconditions; +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.TimestampBound; +import com.google.cloud.spanner.connection.AutocommitDmlMode; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.connection.ConnectionProperties; +import com.google.cloud.spanner.connection.SavepointSupport; +import com.google.cloud.spanner.connection.TransactionMode; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterators; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import java.io.IOException; +import java.io.InputStream; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.UUID; +import java.util.function.BiConsumer; +import java.util.function.Function; +import javax.annotation.Nonnull; + +/** Jdbc Connection class for Google Cloud Spanner */ +class JdbcConnection extends AbstractJdbcConnection { + private static final String ONLY_RS_FORWARD_ONLY = + "Only result sets of type TYPE_FORWARD_ONLY are supported"; + private static final String ONLY_CONCUR_READ_ONLY = + "Only result sets with concurrency CONCUR_READ_ONLY are supported"; + private static final String ONLY_CLOSE_CURSORS_AT_COMMIT = + "Only result sets with holdability CLOSE_CURSORS_AT_COMMIT are supported"; + + /** + * This query is used to check the aliveness of the connection if legacy alive check has been + * enabled. As Cloud Spanner JDBC connections do not maintain a physical or logical connection to + * Cloud Spanner, there is also no point in repeatedly executing a simple query to check whether a + * connection is alive. Instead, we rely on the result from the initial query to Spanner that + * determines the dialect to determine whether the connection is alive or not. This result is + * cached for all JDBC connections using the same {@link com.google.cloud.spanner.Spanner} + * instance. + * + *

The legacy {@link #isValid(int)} check using a SELECT 1 statement can be enabled by setting + * the System property spanner.jdbc.use_legacy_is_valid_check to true or setting the environment + * variable SPANNER_JDBC_USE_LEGACY_IS_VALID_CHECK to true. + */ + static final String LEGACY_IS_VALID_QUERY = "SELECT 1"; + + static final ImmutableList NO_GENERATED_KEY_COLUMNS = ImmutableList.of(); + + private Map> typeMap = new HashMap<>(); + + private final boolean useLegacyIsValidCheck; + + private final Metrics metrics; + + private final Attributes openTelemetryMetricsAttributes; + + JdbcConnection(String connectionUrl, ConnectionOptions options) throws SQLException { + super(connectionUrl, options); + this.useLegacyIsValidCheck = useLegacyValidCheck(); + OpenTelemetry openTelemetry = getSpanner().getOptions().getOpenTelemetry(); + this.openTelemetryMetricsAttributes = + createOpenTelemetryAttributes(getConnectionOptions().getDatabaseId(), false); + this.metrics = new Metrics(openTelemetry); + } + + static boolean useLegacyValidCheck() { + String value = System.getProperty("spanner.jdbc.use_legacy_is_valid_check"); + if (Strings.isNullOrEmpty(value)) { + value = System.getenv("SPANNER_JDBC_USE_LEGACY_IS_VALID_CHECK"); + } + if (!Strings.isNullOrEmpty(value)) { + return Boolean.parseBoolean(value); + } + return false; + } + + @VisibleForTesting + static Attributes createOpenTelemetryAttributes( + DatabaseId databaseId, boolean includeConnectionId) { + AttributesBuilder attributesBuilder = Attributes.builder(); + // A unique connection ID should only be included for tracing and not for metrics. + if (includeConnectionId) { + attributesBuilder.put("connection_id", UUID.randomUUID().toString()); + } + attributesBuilder.put("database", databaseId.getDatabase()); + attributesBuilder.put("instance_id", databaseId.getInstanceId().getInstance()); + attributesBuilder.put("project_id", databaseId.getInstanceId().getProject()); + return attributesBuilder.build(); + } + + public void recordClientLibLatencyMetric(long value) { + metrics.recordClientLibLatency(value, openTelemetryMetricsAttributes); + } + + @Override + public Statement createStatement() throws SQLException { + checkClosed(); + return new JdbcStatement(this); + } + + @Override + public JdbcPreparedStatement prepareStatement(String sql) throws SQLException { + return prepareStatement(sql, NO_GENERATED_KEY_COLUMNS); + } + + private JdbcPreparedStatement prepareStatement( + String sql, ImmutableList generatedKeyColumns) throws SQLException { + checkClosed(); + return new JdbcPreparedStatement(this, sql, generatedKeyColumns); + } + + @Override + public String nativeSQL(String sql) throws SQLException { + checkClosed(); + return getParser() + .convertPositionalParametersToNamedParameters('?', getParser().removeCommentsAndTrim(sql)) + .sqlWithNamedParameters; + } + + @Override + public String getStatementTag() throws SQLException { + checkClosed(); + return getSpannerConnection().getStatementTag(); + } + + @Override + public void setStatementTag(String tag) throws SQLException { + checkClosed(); + try { + getSpannerConnection().setStatementTag(tag); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public String getTransactionTag() throws SQLException { + checkClosed(); + return getSpannerConnection().getTransactionTag(); + } + + @Override + public void setTransactionTag(String tag) throws SQLException { + checkClosed(); + try { + getSpannerConnection().setTransactionTag(tag); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void setTransactionMode(TransactionMode mode) throws SQLException { + checkClosed(); + getSpannerConnection().setTransactionMode(mode); + } + + @Override + public TransactionMode getTransactionMode() throws SQLException { + checkClosed(); + return getSpannerConnection().getTransactionMode(); + } + + @Override + public void setAutocommitDmlMode(AutocommitDmlMode mode) throws SQLException { + checkClosed(); + getSpannerConnection().setAutocommitDmlMode(mode); + } + + @Override + public AutocommitDmlMode getAutocommitDmlMode() throws SQLException { + checkClosed(); + return getSpannerConnection().getAutocommitDmlMode(); + } + + @Override + public void setReadOnlyStaleness(TimestampBound staleness) throws SQLException { + checkClosed(); + getSpannerConnection().setReadOnlyStaleness(staleness); + } + + @Override + public TimestampBound getReadOnlyStaleness() throws SQLException { + checkClosed(); + return getSpannerConnection().getReadOnlyStaleness(); + } + + @Override + public void setOptimizerVersion(String optimizerVersion) throws SQLException { + checkClosed(); + getSpannerConnection().setOptimizerVersion(optimizerVersion); + } + + @Override + public String getOptimizerVersion() throws SQLException { + checkClosed(); + return getSpannerConnection().getOptimizerVersion(); + } + + /** Returns the value that should be returned for column types with an unknown length. */ + int getColumnTypeUnknownLength() { + return getSpannerConnection().getConnectionPropertyValue(ConnectionProperties.UNKNOWN_LENGTH); + } + + @Override + public boolean isInTransaction() throws SQLException { + checkClosed(); + return getSpannerConnection().isInTransaction(); + } + + @Override + public boolean isTransactionStarted() throws SQLException { + checkClosed(); + return getSpannerConnection().isTransactionStarted(); + } + + @Override + public void setAutoCommit(boolean autoCommit) throws SQLException { + checkClosed(); + try { + // According to the JDBC spec's we need to commit the current transaction when changing + // autocommit mode. + if (getSpannerConnection().isAutocommit() != autoCommit + && getSpannerConnection().isTransactionStarted()) { + commit(); + } + getSpannerConnection().setAutocommit(autoCommit); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public boolean getAutoCommit() throws SQLException { + checkClosed(); + return getSpannerConnection().isAutocommit(); + } + + @Override + public void commit() throws SQLException { + checkClosed(); + try { + getSpannerConnection().commit(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void rollback() throws SQLException { + checkClosed(); + try { + getSpannerConnection().rollback(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void close() throws SQLException { + try { + getSpannerConnection().close(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public boolean isClosed() { + return getSpannerConnection().isClosed(); + } + + @Override + public DatabaseMetaData getMetaData() throws SQLException { + checkClosed(); + return new JdbcDatabaseMetaData(this); + } + + @Override + public void setReadOnly(boolean readOnly) throws SQLException { + checkClosed(); + try { + getSpannerConnection().setReadOnly(readOnly); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public boolean isReadOnly() throws SQLException { + checkClosed(); + return getSpannerConnection().isReadOnly(); + } + + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) + throws SQLException { + checkClosed(); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY); + return createStatement(); + } + + @Override + public Statement createStatement( + int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + checkClosed(); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetHoldability == ResultSet.CLOSE_CURSORS_AT_COMMIT, ONLY_CLOSE_CURSORS_AT_COMMIT); + return createStatement(); + } + + @Override + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + checkClosed(); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY); + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement( + String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + checkClosed(); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY); + JdbcPreconditions.checkSqlFeatureSupported( + resultSetHoldability == ResultSet.CLOSE_CURSORS_AT_COMMIT, ONLY_CLOSE_CURSORS_AT_COMMIT); + return prepareStatement(sql); + } + + @Override + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return prepareStatement( + sql, + autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS + ? ALL_COLUMNS + : NO_GENERATED_KEY_COLUMNS); + } + + @Override + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + // This should preferably have returned an error, but the initial version of the driver just + // accepted and ignored this. Starting to throw an error now would be a breaking change. + // TODO: Consider throwing an Unsupported error for the next major version bump. + return prepareStatement(sql, NO_GENERATED_KEY_COLUMNS); + } + + @Override + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return prepareStatement( + sql, + isNullOrEmpty(columnNames) ? NO_GENERATED_KEY_COLUMNS : ImmutableList.copyOf(columnNames)); + } + + @Override + public Map> getTypeMap() throws SQLException { + checkClosed(); + return new HashMap<>(typeMap); + } + + @Override + public void setTypeMap(Map> map) throws SQLException { + checkClosed(); + this.typeMap = new HashMap<>(map); + } + + boolean isUseLegacyIsValidCheck() { + return useLegacyIsValidCheck; + } + + @Override + public boolean isValid(int timeout) throws SQLException { + JdbcPreconditions.checkArgument(timeout >= 0, "timeout must be >= 0"); + if (!isClosed()) { + if (isUseLegacyIsValidCheck()) { + return legacyIsValid(timeout); + } + try { + return getDialect() != null; + } catch (Exception ignore) { + // ignore and fall through. + } + } + return false; + } + + private boolean legacyIsValid(int timeout) throws SQLException { + try (Statement statement = createStatement()) { + statement.setQueryTimeout(timeout); + try (ResultSet rs = statement.executeQuery(LEGACY_IS_VALID_QUERY)) { + if (rs.next()) { + if (rs.getLong(1) == 1L) { + return true; + } + } + } + } catch (SQLException e) { + // ignore and fall through. + } + return false; + } + + @Override + public Blob createBlob() throws SQLException { + checkClosed(); + return new JdbcBlob(); + } + + @Override + public Clob createClob() throws SQLException { + checkClosed(); + return new JdbcClob(); + } + + @Override + public NClob createNClob() throws SQLException { + checkClosed(); + return new JdbcClob(); + } + + @Override + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + checkClosed(); + return JdbcArray.createArray(typeName, elements); + } + + @Override + public void setCatalog(String catalog) throws SQLException { + // This method could be changed to allow the user to change to another database. + // For now, we only support setting the default catalog in order to support frameworks + // and applications that set this when no catalog has been specified in the connection + // URL. + checkClosed(); + checkValidCatalog(catalog); + } + + void checkValidCatalog(String catalog) throws SQLException { + String defaultCatalog = getDefaultCatalog(); + JdbcPreconditions.checkArgument( + defaultCatalog.equals(catalog), + String.format("Only catalog %s is supported", defaultCatalog)); + } + + @Override + public String getCatalog() throws SQLException { + checkClosed(); + return getDefaultCatalog(); + } + + @Nonnull + String getDefaultCatalog() { + switch (getDialect()) { + case POSTGRESQL: + String database = getConnectionOptions().getDatabaseName(); + // It should not be possible that database is null, but it's better to be safe than sorry. + return database == null ? "" : database; + case GOOGLE_STANDARD_SQL: + default: + return ""; + } + } + + @Override + public void setSchema(String schema) throws SQLException { + checkClosed(); + checkValidSchema(schema); + } + + void checkValidSchema(String schema) throws SQLException { + String defaultSchema = getDefaultSchema(); + JdbcPreconditions.checkArgument( + defaultSchema.equals(schema), String.format("Only schema %s is supported", defaultSchema)); + } + + @Override + public String getSchema() throws SQLException { + checkClosed(); + return getDefaultSchema(); + } + + @Nonnull + String getDefaultSchema() { + return getDialect().getDefaultSchema(); + } + + @Override + public SavepointSupport getSavepointSupport() throws SQLException { + checkClosed(); + return getSpannerConnection().getSavepointSupport(); + } + + @Override + public void setSavepointSupport(SavepointSupport savepointSupport) throws SQLException { + checkClosed(); + try { + getSpannerConnection().setSavepointSupport(savepointSupport); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public Savepoint setSavepoint() throws SQLException { + checkClosed(); + try { + JdbcSavepoint savepoint = JdbcSavepoint.unnamed(); + getSpannerConnection().savepoint(savepoint.internalGetSavepointName()); + return savepoint; + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public Savepoint setSavepoint(String name) throws SQLException { + checkClosed(); + try { + JdbcSavepoint savepoint = JdbcSavepoint.named(name); + getSpannerConnection().savepoint(savepoint.internalGetSavepointName()); + return savepoint; + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void rollback(Savepoint savepoint) throws SQLException { + checkClosed(); + JdbcPreconditions.checkArgument(savepoint instanceof JdbcSavepoint, savepoint); + JdbcSavepoint jdbcSavepoint = (JdbcSavepoint) savepoint; + try { + getSpannerConnection().rollbackToSavepoint(jdbcSavepoint.internalGetSavepointName()); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + checkClosed(); + JdbcPreconditions.checkArgument(savepoint instanceof JdbcSavepoint, savepoint); + JdbcSavepoint jdbcSavepoint = (JdbcSavepoint) savepoint; + try { + getSpannerConnection().releaseSavepoint(jdbcSavepoint.internalGetSavepointName()); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public Timestamp getCommitTimestamp() throws SQLException { + checkClosed(); + try { + return getSpannerConnection().getCommitTimestamp().toSqlTimestamp(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public CommitResponse getCommitResponse() throws SQLException { + checkClosed(); + try { + return getSpannerConnection().getCommitResponse(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void setReturnCommitStats(boolean returnCommitStats) throws SQLException { + checkClosed(); + try { + getSpannerConnection().setReturnCommitStats(returnCommitStats); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public boolean isReturnCommitStats() throws SQLException { + checkClosed(); + try { + return getSpannerConnection().isReturnCommitStats(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public Timestamp getReadTimestamp() throws SQLException { + checkClosed(); + try { + return getSpannerConnection().getReadTimestamp().toSqlTimestamp(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public boolean isRetryAbortsInternally() throws SQLException { + checkClosed(); + try { + return getSpannerConnection().isRetryAbortsInternally(); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void setRetryAbortsInternally(boolean retryAbortsInternally) throws SQLException { + checkClosed(); + try { + getSpannerConnection().setRetryAbortsInternally(retryAbortsInternally); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void write(Mutation mutation) throws SQLException { + checkClosed(); + try { + getSpannerConnection().write(mutation); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void write(Iterable mutations) throws SQLException { + checkClosed(); + try { + getSpannerConnection().write(mutations); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void bufferedWrite(Mutation mutation) throws SQLException { + checkClosed(); + try { + getSpannerConnection().bufferedWrite(mutation); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void bufferedWrite(Iterable mutations) throws SQLException { + checkClosed(); + try { + getSpannerConnection().bufferedWrite(mutations); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + /** + * Convenience method for calling a setter and translating any {@link SpannerException} to a + * {@link SQLException}. + */ + private void set(BiConsumer setter, T value) throws SQLException { + checkClosed(); + try { + setter.accept(getSpannerConnection(), value); + } catch (SpannerException spannerException) { + throw JdbcSqlExceptionFactory.of(spannerException); + } + } + + /** + * Convenience method for calling a getter and translating any {@link SpannerException} to a + * {@link SQLException}. + */ + private R get(Function getter) throws SQLException { + checkClosed(); + try { + return getter.apply(getSpannerConnection()); + } catch (SpannerException spannerException) { + throw JdbcSqlExceptionFactory.of(spannerException); + } + } + + @Override + public void setDataBoostEnabled(boolean dataBoostEnabled) throws SQLException { + set(Connection::setDataBoostEnabled, dataBoostEnabled); + } + + @Override + public boolean isDataBoostEnabled() throws SQLException { + return get(Connection::isDataBoostEnabled); + } + + @Override + public void setAutoPartitionMode(boolean autoPartitionMode) throws SQLException { + set(Connection::setAutoPartitionMode, autoPartitionMode); + } + + @Override + public boolean isAutoPartitionMode() throws SQLException { + return get(Connection::isAutoPartitionMode); + } + + @Override + public void setMaxPartitions(int maxPartitions) throws SQLException { + set(Connection::setMaxPartitions, maxPartitions); + } + + @Override + public int getMaxPartitions() throws SQLException { + return get(Connection::getMaxPartitions); + } + + @Override + public void setMaxPartitionedParallelism(int maxThreads) throws SQLException { + set(Connection::setMaxPartitionedParallelism, maxThreads); + } + + @Override + public int getMaxPartitionedParallelism() throws SQLException { + return get(Connection::getMaxPartitionedParallelism); + } + + @Override + public void setAutoBatchDml(boolean autoBatchDml) throws SQLException { + set(Connection::setAutoBatchDml, autoBatchDml); + } + + @Override + public boolean isAutoBatchDml() throws SQLException { + return get(Connection::isAutoBatchDml); + } + + @Override + public void setAutoBatchDmlUpdateCount(long updateCount) throws SQLException { + set(Connection::setAutoBatchDmlUpdateCount, updateCount); + } + + @Override + public long getAutoBatchDmlUpdateCount() throws SQLException { + return get(Connection::getAutoBatchDmlUpdateCount); + } + + @Override + public void setAutoBatchDmlUpdateCountVerification(boolean verification) throws SQLException { + set(Connection::setAutoBatchDmlUpdateCountVerification, verification); + } + + @Override + public boolean isAutoBatchDmlUpdateCountVerification() throws SQLException { + return get(Connection::isAutoBatchDmlUpdateCountVerification); + } + + @SuppressWarnings("deprecation") + private static final class JdbcToSpannerTransactionRetryListener + implements com.google.cloud.spanner.connection.TransactionRetryListener { + private final TransactionRetryListener delegate; + + JdbcToSpannerTransactionRetryListener(TransactionRetryListener delegate) { + this.delegate = Preconditions.checkNotNull(delegate); + } + + @Override + public void retryStarting( + com.google.cloud.Timestamp transactionStarted, long transactionId, int retryAttempt) { + delegate.retryStarting(transactionStarted, transactionId, retryAttempt); + } + + @Override + public void retryFinished( + com.google.cloud.Timestamp transactionStarted, + long transactionId, + int retryAttempt, + RetryResult result) { + delegate.retryFinished( + transactionStarted, + transactionId, + retryAttempt, + TransactionRetryListener.RetryResult.valueOf(result.name())); + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof JdbcToSpannerTransactionRetryListener)) { + return false; + } + JdbcToSpannerTransactionRetryListener other = (JdbcToSpannerTransactionRetryListener) o; + return this.delegate.equals(other.delegate); + } + + @Override + public int hashCode() { + return delegate.hashCode(); + } + } + + @SuppressWarnings("deprecation") + @Override + public void addTransactionRetryListener(TransactionRetryListener listener) throws SQLException { + checkClosed(); + getSpannerConnection() + .addTransactionRetryListener(new JdbcToSpannerTransactionRetryListener(listener)); + } + + @Override + public void addTransactionRetryListener( + com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException { + checkClosed(); + getSpannerConnection().addTransactionRetryListener(listener); + } + + @SuppressWarnings("deprecation") + @Override + public boolean removeTransactionRetryListener(TransactionRetryListener listener) + throws SQLException { + checkClosed(); + return getSpannerConnection() + .removeTransactionRetryListener(new JdbcToSpannerTransactionRetryListener(listener)); + } + + @Override + public boolean removeTransactionRetryListener( + com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException { + checkClosed(); + return getSpannerConnection().removeTransactionRetryListener(listener); + } + + @SuppressWarnings("deprecation") + @Override + public Iterator getTransactionRetryListeners() throws SQLException { + checkClosed(); + return Iterators.transform( + getSpannerConnection().getTransactionRetryListeners(), + input -> { + if (input instanceof JdbcToSpannerTransactionRetryListener) { + return ((JdbcToSpannerTransactionRetryListener) input).delegate; + } + return null; + }); + } + + @Override + public Iterator + getTransactionRetryListenersFromConnection() throws SQLException { + checkClosed(); + return getSpannerConnection().getTransactionRetryListeners(); + } + + @Override + public void setProtoDescriptors(@Nonnull byte[] protoDescriptors) throws SQLException { + Preconditions.checkNotNull(protoDescriptors); + checkClosed(); + try { + getSpannerConnection().setProtoDescriptors(protoDescriptors); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public void setProtoDescriptors(@Nonnull InputStream protoDescriptors) + throws SQLException, IOException { + Preconditions.checkNotNull(protoDescriptors); + checkClosed(); + try { + getSpannerConnection() + .setProtoDescriptors(ByteArray.copyFrom(protoDescriptors).toByteArray()); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + } + + @Override + public byte[] getProtoDescriptors() throws SQLException { + checkClosed(); + return getSpannerConnection().getProtoDescriptors(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConstants.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConstants.java new file mode 100644 index 000000000000..8de8c7f0ee58 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConstants.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import java.sql.ResultSet; +import java.sql.Statement; + +/** Constants for special values used by the Cloud Spanner JDBC driver. */ +public final class JdbcConstants { + /** + * Special value that is used to indicate that a statement returned a {@link ResultSet}. The + * method {@link Statement#getUpdateCount()} will return this value if the previous statement that + * was executed with {@link Statement#execute(String)} returned a {@link ResultSet}. + */ + public static final int STATEMENT_RESULT_SET = -1; + + /** + * Special value that is used to indicate that a statement had no result. The method {@link + * Statement#getUpdateCount()} will return this value if the previous statement that was executed + * with {@link Statement#execute(String)} returned {@link + * com.google.cloud.spanner.connection.StatementResult.ResultType#NO_RESULT}, such as DDL + * statements. + */ + public static final int STATEMENT_NO_RESULT = -2; + + /** No instantiation */ + private JdbcConstants() {} +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataSource.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataSource.java new file mode 100644 index 000000000000..f0f073aad29c --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataSource.java @@ -0,0 +1,228 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcDriver.appendPropertiesToUrl; +import static com.google.cloud.spanner.jdbc.JdbcDriver.buildConnectionOptions; +import static com.google.cloud.spanner.jdbc.JdbcDriver.maybeAddUserAgent; + +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.rpc.Code; +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.util.Properties; +import java.util.logging.Logger; +import javax.sql.DataSource; + +/** {@link DataSource} implementation for Google Cloud Spanner. */ +public class JdbcDataSource extends AbstractJdbcWrapper implements DataSource { + private String url; + private String credentials; + private Boolean autocommit; + private Boolean readonly; + private Boolean retryAbortsInternally; + + private volatile ConnectionOptions cachedConnectionOptions; + + // Make sure the JDBC driver class is loaded. + static { + try { + Class.forName("com.google.cloud.spanner.jdbc.JdbcDriver"); + } catch (ClassNotFoundException e) { + throw new IllegalStateException( + "JdbcDataSource failed to load com.google.cloud.spanner.jdbc.JdbcDriver", e); + } + } + + @Override + public PrintWriter getLogWriter() { + return null; + } + + @Override + public void setLogWriter(PrintWriter out) { + // no-op + } + + @Override + public void setLoginTimeout(int seconds) { + // no-op + } + + @Override + public int getLoginTimeout() { + return 0; + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw JdbcSqlExceptionFactory.unsupported("java.util.logging is not used"); + } + + @Override + public Connection getConnection() throws SQLException { + if (getUrl() == null) { + throw JdbcSqlExceptionFactory.of( + "There is no URL specified for this data source", Code.FAILED_PRECONDITION); + } + if (cachedConnectionOptions == null) { + synchronized (this) { + if (cachedConnectionOptions == null) { + if (!JdbcDriver.getRegisteredDriver().acceptsURL(getUrl())) { + throw JdbcSqlExceptionFactory.of( + "The URL " + getUrl() + " is not valid for the data source " + getClass().getName(), + Code.FAILED_PRECONDITION); + } + Properties properties = createProperties(); + maybeAddUserAgent(properties); + String connectionUri = appendPropertiesToUrl(url.substring(5), properties); + cachedConnectionOptions = buildConnectionOptions(connectionUri, properties); + } + } + } + return new JdbcConnection(getUrl(), cachedConnectionOptions); + } + + @Override + public Connection getConnection(String username, String password) throws SQLException { + return getConnection(); + } + + private Properties createProperties() { + Properties props = new Properties(); + if (this.credentials != null) { + props.setProperty(ConnectionOptions.CREDENTIALS_PROPERTY_NAME, this.credentials); + } + if (this.autocommit != null) { + props.setProperty( + ConnectionOptions.AUTOCOMMIT_PROPERTY_NAME, String.valueOf(this.autocommit)); + } + if (this.readonly != null) { + props.setProperty(ConnectionOptions.READONLY_PROPERTY_NAME, String.valueOf(this.readonly)); + } + if (this.retryAbortsInternally != null) { + props.setProperty( + ConnectionOptions.RETRY_ABORTS_INTERNALLY_PROPERTY_NAME, + String.valueOf(this.retryAbortsInternally)); + } + return props; + } + + @Override + public boolean isClosed() { + return false; + } + + private void clearCachedConnectionOptions() { + synchronized (this) { + cachedConnectionOptions = null; + } + } + + /** + * @return the JDBC URL to use for this {@link DataSource}. + */ + public String getUrl() { + return url; + } + + /** + * @param url The JDBC URL to use for this {@link DataSource}. + */ + public void setUrl(String url) { + clearCachedConnectionOptions(); + this.url = url; + } + + /** + * @return the credentials URL to use for this {@link DataSource}. If a credentials URL is + * specified in both the connection URL and using this property, the value in the connection + * URL will be used. + */ + public String getCredentials() { + return credentials; + } + + /** + * @param credentials The credentials URL to use for this {@link DataSource}. If a credentials URL + * is specified in both the connection URL and using this property, the value in the + * connection URL will be used. + */ + public void setCredentials(String credentials) { + clearCachedConnectionOptions(); + this.credentials = credentials; + } + + /** + * @return the initial autocommit setting to use for this {@link DataSource}. If autocommit is + * specified in both the connection URL and using this property, the value in the connection + * URL will be used. + */ + public Boolean getAutocommit() { + return autocommit; + } + + /** + * @param autocommit The initial autocommit setting to use for this {@link DataSource}. If + * autocommit is specified in both the connection URL and using this property, the value in + * the connection URL will be used. + */ + public void setAutocommit(Boolean autocommit) { + clearCachedConnectionOptions(); + this.autocommit = autocommit; + } + + /** + * @return the initial readonly setting to use for this {@link DataSource}. If readonly is + * specified in both the connection URL and using this property, the value in the connection + * URL will be used. + */ + public Boolean getReadonly() { + return readonly; + } + + /** + * @param readonly The initial readonly setting to use for this {@link DataSource}. If readonly is + * specified in both the connection URL and using this property, the value in the connection + * URL will be used. + */ + public void setReadonly(Boolean readonly) { + clearCachedConnectionOptions(); + this.readonly = readonly; + } + + /** + * @return the initial retryAbortsInternally setting to use for this {@link DataSource}. If + * retryAbortsInternally is specified in both the connection URL and using this property, the + * value in the connection URL will be used. + */ + public Boolean getRetryAbortsInternally() { + return retryAbortsInternally; + } + + /** + * @param retryAbortsInternally The initial retryAbortsInternally setting to use for this {@link + * DataSource}. If retryAbortsInternally is specified in both the connection URL and using + * this property, the value in the connection URL will be used. + */ + public void setRetryAbortsInternally(Boolean retryAbortsInternally) { + clearCachedConnectionOptions(); + this.retryAbortsInternally = retryAbortsInternally; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataType.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataType.java new file mode 100644 index 000000000000..892c0057ce60 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataType.java @@ -0,0 +1,594 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.Code; +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +/** Enum for mapping Cloud Spanner data types to Java classes and JDBC SQL {@link Types}. */ +enum JdbcDataType { + BOOL { + @Override + public int getSqlType() { + return Types.BOOLEAN; + } + + @Override + public Class getJavaClass() { + return Boolean.class; + } + + @Override + public Code getCode() { + return Code.BOOL; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getBooleanList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.bool(); + } + }, + BYTES { + @Override + public int getSqlType() { + return Types.BINARY; + } + + @Override + public Class getJavaClass() { + return byte[].class; + } + + @Override + public Code getCode() { + return Code.BYTES; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return JdbcTypeConverter.toJavaByteArrays(rs.getBytesList(columnIndex)); + } + + @Override + public Type getSpannerType() { + return Type.bytes(); + } + }, + DATE { + @Override + public int getSqlType() { + return Types.DATE; + } + + @Override + public Class getJavaClass() { + return Date.class; + } + + @Override + public Code getCode() { + return Code.DATE; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return JdbcTypeConverter.toSqlDates(rs.getDateList(columnIndex)); + } + + @Override + public Type getSpannerType() { + return Type.date(); + } + }, + FLOAT32 { + private final Set aliases = new HashSet<>(Collections.singletonList("float4")); + + @Override + public int getSqlType() { + return Types.REAL; + } + + @Override + public int getScale() { + return 7; + } + + @Override + public int getPrecision() { + return 7; + } + + @Override + public int getDefaultColumnDisplaySize() { + return 7; + } + + @Override + public Class getJavaClass() { + return Float.class; + } + + @Override + public Code getCode() { + return Code.FLOAT32; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getFloatList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.float32(); + } + + @Override + public Set getPostgreSQLAliases() { + return aliases; + } + }, + FLOAT64 { + private final Set> classes = new HashSet<>(Arrays.asList(Float.class, Double.class)); + private final Set aliases = new HashSet<>(Collections.singletonList("float8")); + + @Override + public int getSqlType() { + return Types.DOUBLE; + } + + @Override + public Class getJavaClass() { + return Double.class; + } + + @Override + public Set> getSupportedJavaClasses() { + return classes; + } + + @Override + public Code getCode() { + return Code.FLOAT64; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getDoubleList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.float64(); + } + + @Override + public Set getPostgreSQLAliases() { + return aliases; + } + }, + INT64 { + private final Set> classes = + new HashSet<>(Arrays.asList(Byte.class, Short.class, Integer.class, Long.class)); + + @Override + public int getSqlType() { + return Types.BIGINT; + } + + @Override + public Class getJavaClass() { + return Long.class; + } + + @Override + public Set> getSupportedJavaClasses() { + return classes; + } + + @Override + public Code getCode() { + return Code.INT64; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getLongList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.int64(); + } + }, + NUMERIC { + + private final Set aliases = new HashSet<>(Collections.singletonList("decimal")); + + @Override + public int getSqlType() { + return Types.NUMERIC; + } + + @Override + public Class getJavaClass() { + return BigDecimal.class; + } + + @Override + public Code getCode() { + return Code.NUMERIC; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getBigDecimalList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.numeric(); + } + + @Override + public Set getPostgreSQLAliases() { + return aliases; + } + }, + PG_NUMERIC { + @Override + public int getSqlType() { + return Types.NUMERIC; + } + + @Override + public Class getJavaClass() { + return BigDecimal.class; + } + + @Override + public Code getCode() { + return Code.PG_NUMERIC; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getValue(columnIndex).getNumericArray(); + } + + @Override + public Type getSpannerType() { + return Type.pgNumeric(); + } + }, + STRING { + private final Set aliases = new HashSet<>(Arrays.asList("varchar", "text")); + + @Override + public int getSqlType() { + return Types.NVARCHAR; + } + + @Override + public Class getJavaClass() { + return String.class; + } + + @Override + public Code getCode() { + return Code.STRING; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getStringList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.string(); + } + + @Override + public Set getPostgreSQLAliases() { + return aliases; + } + }, + JSON { + @Override + public int getSqlType() { + return JsonType.VENDOR_TYPE_NUMBER; + } + + @Override + public Class getJavaClass() { + return String.class; + } + + @Override + public Code getCode() { + return Code.JSON; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getJsonList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.json(); + } + }, + PG_JSONB { + @Override + public int getSqlType() { + return PgJsonbType.VENDOR_TYPE_NUMBER; + } + + @Override + public Class getJavaClass() { + return String.class; + } + + @Override + public Code getCode() { + return Code.PG_JSONB; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getPgJsonbList(columnIndex); + } + + @Override + public String getTypeName() { + return "JSONB"; + } + + @Override + public Type getSpannerType() { + return Type.pgJsonb(); + } + }, + TIMESTAMP { + @Override + public int getSqlType() { + return Types.TIMESTAMP; + } + + @Override + public Class getJavaClass() { + return Timestamp.class; + } + + @Override + public Code getCode() { + return Code.TIMESTAMP; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return JdbcTypeConverter.toSqlTimestamps(rs.getTimestampList(columnIndex)); + } + + @Override + public Type getSpannerType() { + return Type.timestamp(); + } + }, + UUID { + @Override + public int getSqlType() { + return UuidType.VENDOR_TYPE_NUMBER; + } + + @Override + public Class getJavaClass() { + return UUID.class; + } + + @Override + public Code getCode() { + return Code.UUID; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getUuidList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.uuid(); + } + }, + STRUCT { + @Override + public int getSqlType() { + return Types.STRUCT; + } + + @Override + public Class getJavaClass() { + return Struct.class; + } + + @Override + public Code getCode() { + return Code.STRUCT; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getStructList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.struct(); + } + }, + PROTO { + @Override + public int getSqlType() { + return ProtoMessageType.VENDOR_TYPE_NUMBER; + } + + @Override + public Class getJavaClass() { + return byte[].class; + } + + @Override + public Code getCode() { + return Code.PROTO; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return JdbcTypeConverter.toJavaByteArrays(rs.getBytesList(columnIndex)); + } + + @Override + public Type getSpannerType() { + return Type.bytes(); + } + }, + ENUM { + private final Set> classes = new HashSet<>(Arrays.asList(Integer.class, Long.class)); + + @Override + public int getSqlType() { + return ProtoEnumType.VENDOR_TYPE_NUMBER; + } + + @Override + public Class getJavaClass() { + return Long.class; + } + + @Override + public Set> getSupportedJavaClasses() { + return classes; + } + + @Override + public Code getCode() { + return Code.ENUM; + } + + @Override + public List getArrayElements(ResultSet rs, int columnIndex) { + return rs.getLongList(columnIndex); + } + + @Override + public Type getSpannerType() { + return Type.int64(); + } + }; + + public abstract int getSqlType(); + + public abstract Code getCode(); + + public abstract Type getSpannerType(); + + public Set getPostgreSQLAliases() { + return Collections.emptySet(); + } + + /*** + * @param typeName type of the column + * @return true if type name matches current type name or matches with one of postgres aliases + * or if it matches equivalent postgres type. + */ + boolean matches(String typeName) { + return getTypeName().equalsIgnoreCase(typeName) + || getPostgreSQLAliases().contains(typeName.toLowerCase()) + || getSpannerType().getSpannerTypeName(Dialect.POSTGRESQL).equalsIgnoreCase(typeName); + } + + // TODO: Implement and use this method for all types. + public int getPrecision() { + throw new UnsupportedOperationException(); + } + + // TODO: Implement and use this method for all types. + public int getScale() { + throw new UnsupportedOperationException(); + } + + // TODO: Implement and use this method for all types. + public int getDefaultColumnDisplaySize() { + throw new UnsupportedOperationException(); + } + + /** + * @param rs the result set to look up the elements + * @param columnIndex zero based column index + * @return The corresponding array elements of the type in the given result set + */ + public abstract List getArrayElements(ResultSet rs, int columnIndex); + + public String getTypeName() { + return name(); + } + + public abstract Class getJavaClass(); + + public Set> getSupportedJavaClasses() { + return Collections.singleton(getJavaClass()); + } + + public static JdbcDataType getType(Class clazz) { + for (JdbcDataType type : JdbcDataType.values()) { + if (type.getSupportedJavaClasses().contains(clazz)) { + return type; + } + } + return null; + } + + public static JdbcDataType getType(Code code) { + for (JdbcDataType type : JdbcDataType.values()) { + if (type.getCode() == code) { + return type; + } + } + return null; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaData.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaData.java new file mode 100644 index 000000000000..7168e2f9204b --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaData.java @@ -0,0 +1,1771 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.auth.Credentials; +import com.google.auth.ServiceAccountSigner; +import com.google.auth.oauth2.UserCredentials; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.connection.Connection.InternalMetadataQuery; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableSet; +import java.io.BufferedReader; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.RowIdLifetime; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Arrays; +import java.util.Collections; +import java.util.Properties; +import java.util.Scanner; + +/** {@link DatabaseMetaData} implementation for Cloud Spanner */ +class JdbcDatabaseMetaData extends AbstractJdbcWrapper implements DatabaseMetaData { + private static final int JDBC_MAJOR_VERSION = 4; + private static final int JDBC_MINOR_VERSION = 1; + private static final int DATABASE_MAJOR_VERSION = 1; + private static final int DATABASE_MINOR_VERSION = 0; + private static final String PRODUCT_NAME = "Google Cloud Spanner"; + private static final String POSTGRESQL_PRODUCT_NAME = PRODUCT_NAME + " PostgreSQL"; + + @VisibleForTesting + static String readSqlFromFile(String filename, Dialect dialect) { + InputStream in; + switch (dialect) { + case POSTGRESQL: + in = JdbcDatabaseMetaData.class.getResourceAsStream("postgresql/" + filename); + break; + case GOOGLE_STANDARD_SQL: + default: + in = JdbcDatabaseMetaData.class.getResourceAsStream(filename); + } + BufferedReader reader = new BufferedReader(new InputStreamReader(in)); + StringBuilder builder = new StringBuilder(); + try (Scanner scanner = new Scanner(reader)) { + while (scanner.hasNextLine()) { + String line = scanner.nextLine(); + builder.append(line).append("\n"); + } + } + return builder.toString(); + } + + private final JdbcConnection connection; + + JdbcDatabaseMetaData(JdbcConnection connection) { + this.connection = connection; + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public boolean allProceduresAreCallable() { + return true; + } + + @Override + public boolean allTablesAreSelectable() { + return true; + } + + @Override + public String getURL() { + return connection.getConnectionUrl(); + } + + @Override + public String getUserName() { + Credentials credentials = connection.getConnectionOptions().getCredentials(); + if (credentials != null) { + if (credentials instanceof ServiceAccountSigner) { + return ((ServiceAccountSigner) credentials).getAccount(); + } else if (credentials instanceof UserCredentials) { + return ((UserCredentials) credentials).getClientId(); + } + } + return ""; + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public boolean nullsAreSortedHigh() { + return false; + } + + @Override + public boolean nullsAreSortedLow() { + return true; + } + + @Override + public boolean nullsAreSortedAtStart() { + return false; + } + + @Override + public boolean nullsAreSortedAtEnd() { + return false; + } + + @Override + public String getDatabaseProductName() { + return connection.getDialect() == Dialect.POSTGRESQL ? POSTGRESQL_PRODUCT_NAME : PRODUCT_NAME; + } + + @Override + public String getDatabaseProductVersion() { + return getDatabaseMajorVersion() + "." + getDatabaseMinorVersion(); + } + + @Override + public String getDriverName() { + return JdbcDriver.class.getName(); + } + + @Override + public String getDriverVersion() { + return getDriverMajorVersion() + "." + getDriverMinorVersion(); + } + + @Override + public int getDriverMajorVersion() { + return JdbcDriver.MAJOR_VERSION; + } + + @Override + public int getDriverMinorVersion() { + return JdbcDriver.MINOR_VERSION; + } + + @Override + public boolean usesLocalFiles() { + return false; + } + + @Override + public boolean usesLocalFilePerTable() { + return false; + } + + @Override + public boolean supportsMixedCaseIdentifiers() { + return false; + } + + @Override + public boolean storesUpperCaseIdentifiers() { + return false; + } + + @Override + public boolean storesLowerCaseIdentifiers() { + return connection.getDialect() == Dialect.POSTGRESQL; + } + + @Override + public boolean storesMixedCaseIdentifiers() { + return true; + } + + @Override + public boolean supportsMixedCaseQuotedIdentifiers() { + return false; + } + + @Override + public boolean storesUpperCaseQuotedIdentifiers() { + return false; + } + + @Override + public boolean storesLowerCaseQuotedIdentifiers() { + return false; + } + + @Override + public boolean storesMixedCaseQuotedIdentifiers() { + return true; + } + + @Override + public String getIdentifierQuoteString() { + return "`"; + } + + @Override + public String getSQLKeywords() { + return "ASSERT_ROWS_MODIFIED,ENUM,GROUPS,HASH,IGNORE,LOOKUP,PROTO,RESPECT,STRUCT,WINDOW"; + } + + @Override + public String getNumericFunctions() { + return "ABS,SIGN,IS_INF,IS_NAN,IEEE_DIVIDE,SQRT,POW,POWER,EXP,LN,LOG,LOG10,GREATEST,LEAST,DIV,MOD,ROUND,TRUNC,CEIL,CEILING,FLOOR,COS,COSH,ACOS,ACOSH,SIN,SINH,ASIN,ASINH,TAN,TANH,ATAN,ATANH,ATAN2,FARM_FINGERPRINT,SHA1,SHA256,SHA512"; + } + + @Override + public String getStringFunctions() { + return "BYTE_LENGTH,CHAR_LENGTH,CHARACTER_LENGTH,CODE_POINTS_TO_BYTES,CODE_POINTS_TO_STRING,CONCAT,ENDS_WITH,FORMAT,FROM_BASE64,FROM_HEX,LENGTH,LPAD,LOWER,LTRIM,REGEXP_CONTAINS,REGEXP_EXTRACT,REGEXP_EXTRACT_ALL,REGEXP_REPLACE,REPLACE,REPEAT,REVERSE,RPAD,RTRIM,SAFE_CONVERT_BYTES_TO_STRING,SPLIT,STARTS_WITH,STRPOS,SUBSTR,TO_BASE64,TO_CODE_POINTS,TO_HEX,TRIM,UPPER,JSON_QUERY,JSON_VALUE"; + } + + @Override + public String getSystemFunctions() { + return ""; + } + + @Override + public String getTimeDateFunctions() { + return "CURRENT_DATE,EXTRACT,DATE,DATE_ADD,DATE_SUB,DATE_DIFF,DATE_TRUNC,DATE_FROM_UNIX_DATE,FORMAT_DATE,PARSE_DATE,UNIX_DATE,CURRENT_TIMESTAMP,STRING,TIMESTAMP,TIMESTAMP_ADD,TIMESTAMP_SUB,TIMESTAMP_DIFF,TIMESTAMP_TRUNC,FORMAT_TIMESTAMP,PARSE_TIMESTAMP,TIMESTAMP_SECONDS,TIMESTAMP_MILLIS,TIMESTAMP_MICROS,UNIX_SECONDS,UNIX_MILLIS,UNIX_MICROS"; + } + + @Override + public String getSearchStringEscape() { + return "\\"; + } + + @Override + public String getExtraNameCharacters() { + return ""; + } + + @Override + public boolean supportsAlterTableWithAddColumn() { + return true; + } + + @Override + public boolean supportsAlterTableWithDropColumn() { + return true; + } + + @Override + public boolean supportsColumnAliasing() { + return true; + } + + @Override + public boolean nullPlusNonNullIsNull() { + return true; + } + + @Override + public boolean supportsConvert() { + return false; + } + + @Override + public boolean supportsConvert(int fromType, int toType) { + return false; + } + + @Override + public boolean supportsTableCorrelationNames() { + return true; + } + + @Override + public boolean supportsDifferentTableCorrelationNames() { + return false; + } + + @Override + public boolean supportsExpressionsInOrderBy() { + return true; + } + + @Override + public boolean supportsOrderByUnrelated() { + return true; + } + + @Override + public boolean supportsGroupBy() { + return true; + } + + @Override + public boolean supportsGroupByUnrelated() { + return true; + } + + @Override + public boolean supportsGroupByBeyondSelect() { + return true; + } + + @Override + public boolean supportsLikeEscapeClause() { + return true; + } + + @Override + public boolean supportsMultipleResultSets() { + return true; + } + + @Override + public boolean supportsMultipleTransactions() { + return true; + } + + @Override + public boolean supportsNonNullableColumns() { + return true; + } + + @Override + public boolean supportsMinimumSQLGrammar() { + return false; + } + + @Override + public boolean supportsCoreSQLGrammar() { + return false; + } + + @Override + public boolean supportsExtendedSQLGrammar() { + return false; + } + + @Override + public boolean supportsANSI92EntryLevelSQL() { + return false; + } + + @Override + public boolean supportsANSI92IntermediateSQL() { + return false; + } + + @Override + public boolean supportsANSI92FullSQL() { + return false; + } + + @Override + public boolean supportsIntegrityEnhancementFacility() { + return false; + } + + @Override + public boolean supportsOuterJoins() { + return true; + } + + @Override + public boolean supportsFullOuterJoins() { + return true; + } + + @Override + public boolean supportsLimitedOuterJoins() { + return true; + } + + @Override + public String getSchemaTerm() { + return "SCHEMA"; + } + + @Override + public String getProcedureTerm() { + return "PROCEDURE"; + } + + @Override + public String getCatalogTerm() { + // Spanner does not support catalogs, but the term is included for compatibility with the SQL + // standard + return "CATALOG"; + } + + @Override + public boolean isCatalogAtStart() { + return false; + } + + @Override + public String getCatalogSeparator() { + return "."; + } + + @Override + public boolean supportsSchemasInDataManipulation() { + return true; + } + + @Override + public boolean supportsSchemasInProcedureCalls() { + return true; + } + + @Override + public boolean supportsSchemasInTableDefinitions() { + return true; + } + + @Override + public boolean supportsSchemasInIndexDefinitions() { + return true; + } + + @Override + public boolean supportsSchemasInPrivilegeDefinitions() { + return true; + } + + @Override + public boolean supportsCatalogsInDataManipulation() { + return false; + } + + @Override + public boolean supportsCatalogsInProcedureCalls() { + return false; + } + + @Override + public boolean supportsCatalogsInTableDefinitions() { + return false; + } + + @Override + public boolean supportsCatalogsInIndexDefinitions() { + return false; + } + + @Override + public boolean supportsCatalogsInPrivilegeDefinitions() { + return false; + } + + @Override + public boolean supportsPositionedDelete() { + return false; + } + + @Override + public boolean supportsPositionedUpdate() { + return false; + } + + @Override + public boolean supportsSelectForUpdate() { + return true; + } + + @Override + public boolean supportsStoredProcedures() { + return false; + } + + @Override + public boolean supportsSubqueriesInComparisons() { + return true; + } + + @Override + public boolean supportsSubqueriesInExists() { + return true; + } + + @Override + public boolean supportsSubqueriesInIns() { + return true; + } + + @Override + public boolean supportsSubqueriesInQuantifieds() { + return true; + } + + @Override + public boolean supportsCorrelatedSubqueries() { + return true; + } + + @Override + public boolean supportsUnion() { + // Note that Cloud Spanner requires the user to specify 'UNION DISTINCT' or 'UNION ALL' in a + // query. 'UNION DISTINCT' is equal to the SQL operation 'UNION'. + return true; + } + + @Override + public boolean supportsUnionAll() { + return true; + } + + @Override + public boolean supportsOpenCursorsAcrossCommit() { + return false; + } + + @Override + public boolean supportsOpenCursorsAcrossRollback() { + return false; + } + + @Override + public boolean supportsOpenStatementsAcrossCommit() { + return true; + } + + @Override + public boolean supportsOpenStatementsAcrossRollback() { + return true; + } + + @Override + public int getMaxBinaryLiteralLength() { + return 0; + } + + @Override + public int getMaxCharLiteralLength() { + return 0; + } + + @Override + public int getMaxColumnNameLength() { + return 128; + } + + @Override + public int getMaxColumnsInGroupBy() { + return 1000; + } + + @Override + public int getMaxColumnsInIndex() { + return 16; + } + + @Override + public int getMaxColumnsInOrderBy() { + return 0; + } + + @Override + public int getMaxColumnsInSelect() { + return 0; + } + + @Override + public int getMaxColumnsInTable() { + return 1024; + } + + @Override + public int getMaxConnections() { + // there is a max number of sessions, but that is not the same as the max number of connections + return 0; + } + + @Override + public int getMaxCursorNameLength() { + return 0; + } + + @Override + public int getMaxIndexLength() { + return 8000; + } + + @Override + public int getMaxSchemaNameLength() { + return 128; + } + + @Override + public int getMaxProcedureNameLength() { + return 0; + } + + @Override + public int getMaxCatalogNameLength() { + return 0; + } + + @Override + public int getMaxRowSize() { + // The limit is 1024 columns per table * 10MB per column, which is more than fits in an int. + // We therefore return 0 to indicate no limit (or an unknown limit). + return 0; + } + + @Override + public boolean doesMaxRowSizeIncludeBlobs() { + return true; + } + + @Override + public int getMaxStatementLength() { + return 1000000; + } + + @Override + public int getMaxStatements() { + return 0; + } + + @Override + public int getMaxTableNameLength() { + return 128; + } + + @Override + public int getMaxTablesInSelect() { + return 0; + } + + @Override + public int getMaxUserNameLength() { + return 0; + } + + @Override + public int getDefaultTransactionIsolation() { + return Connection.TRANSACTION_SERIALIZABLE; + } + + @Override + public boolean supportsTransactions() { + return true; + } + + @Override + public boolean supportsTransactionIsolationLevel(int level) { + return supportsIsolationLevel(level); + } + + static boolean supportsIsolationLevel(int level) { + return Connection.TRANSACTION_SERIALIZABLE == level + || Connection.TRANSACTION_REPEATABLE_READ == level; + } + + @Override + public boolean supportsDataDefinitionAndDataManipulationTransactions() { + return false; + } + + @Override + public boolean supportsDataManipulationTransactionsOnly() { + return true; + } + + @Override + public boolean dataDefinitionCausesTransactionCommit() { + return false; + } + + @Override + public boolean dataDefinitionIgnoredInTransactions() { + return false; + } + + @Override + public ResultSet getProcedures( + String catalog, String schemaPattern, String procedureNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("PROCEDURE_CAT", Type.string()), + StructField.of("PROCEDURE_SCHEM", Type.string()), + StructField.of("PROCEDURE_NAME", Type.string()), + StructField.of("reserved1", Type.string()), + StructField.of("reserved2", Type.string()), + StructField.of("reserved3", Type.string()), + StructField.of("REMARKS", Type.string()), + StructField.of("PROCEDURE_TYPE", Type.int64()), + StructField.of("SPECIFIC_NAME", Type.string())), + Collections.emptyList())); + } + + @Override + public ResultSet getProcedureColumns( + String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("PROCEDURE_CAT", Type.string()), + StructField.of("PROCEDURE_SCHEM", Type.string()), + StructField.of("PROCEDURE_NAME", Type.string()), + StructField.of("COLUMN_NAME", Type.string()), + StructField.of("COLUMN_TYPE", Type.int64()), + StructField.of("DATA_TYPE", Type.int64()), + StructField.of("TYPE_NAME", Type.string()), + StructField.of("PRECISION", Type.string()), + StructField.of("LENGTH", Type.int64()), + StructField.of("SCALE", Type.int64()), + StructField.of("RADIX", Type.int64()), + StructField.of("NULLABLE", Type.int64()), + StructField.of("REMARKS", Type.string()), + StructField.of("COLUMN_DEF", Type.string()), + StructField.of("SQL_DATA_TYPE", Type.int64()), + StructField.of("SQL_DATETIME_SUB", Type.int64()), + StructField.of("CHAR_OCTET_LENGTH", Type.int64()), + StructField.of("ORDINAL_POSITION", Type.int64()), + StructField.of("IS_NULLABLE", Type.string()), + StructField.of("SPECIFIC_NAME", Type.string())), + Collections.emptyList())); + } + + private JdbcPreparedStatement prepareStatementReplaceNullWithAnyString( + String sql, String... params) throws SQLException { + JdbcPreparedStatement statement = connection.prepareStatement(sql); + int paramIndex = 1; + for (String param : params) { + if (param == null) { + statement.setString(paramIndex, "%"); + } else { + statement.setString(paramIndex, param.toUpperCase()); + } + paramIndex++; + } + return statement; + } + + @Override + public ResultSet getTables( + String catalog, String schemaPattern, String tableNamePattern, String[] types) + throws SQLException { + String sql = readSqlFromFile("DatabaseMetaData_GetTables.sql", connection.getDialect()); + String type1; + String type2; + if (types == null || types.length == 0) { + type1 = "TABLE"; + type2 = "VIEW"; + } else if (types.length == 1) { + type1 = types[0]; + type2 = "NON_EXISTENT_TYPE"; + } else { + type1 = types[0]; + type2 = types[1]; + } + JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString( + sql, catalog, schemaPattern, tableNamePattern, type1, type2); + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + + @Override + public ResultSet getSchemas() throws SQLException { + return getSchemas(null, null); + } + + @Override + public ResultSet getCatalogs() throws SQLException { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct(StructField.of("TABLE_CAT", Type.string())), + Collections.singletonList( + Struct.newBuilder().set("TABLE_CAT").to(getConnection().getCatalog()).build()))); + } + + @Override + public ResultSet getTableTypes() { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct(StructField.of("TABLE_TYPE", Type.string())), + Arrays.asList( + Struct.newBuilder().set("TABLE_TYPE").to("TABLE").build(), + Struct.newBuilder().set("TABLE_TYPE").to("VIEW").build()))); + } + + @Override + public ResultSet getColumns( + String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) + throws SQLException { + String sql = readSqlFromFile("DatabaseMetaData_GetColumns.sql", connection.getDialect()); + JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString( + sql, catalog, schemaPattern, tableNamePattern, columnNamePattern); + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + + @Override + public ResultSet getColumnPrivileges( + String catalog, String schema, String table, String columnNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TABLE_CAT", Type.string()), + StructField.of("TABLE_SCHEM", Type.string()), + StructField.of("TABLE_NAME", Type.string()), + StructField.of("COLUMN_NAME", Type.string()), + StructField.of("GRANTOR", Type.string()), + StructField.of("GRANTEE", Type.string()), + StructField.of("PRIVILEGE", Type.string()), + StructField.of("IS_GRANTABLE", Type.string())), + Collections.emptyList())); + } + + @Override + public ResultSet getTablePrivileges( + String catalog, String schemaPattern, String tableNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TABLE_CAT", Type.string()), + StructField.of("TABLE_SCHEM", Type.string()), + StructField.of("TABLE_NAME", Type.string()), + StructField.of("GRANTOR", Type.string()), + StructField.of("GRANTEE", Type.string()), + StructField.of("PRIVILEGE", Type.string()), + StructField.of("IS_GRANTABLE", Type.string())), + Collections.emptyList())); + } + + @Override + public ResultSet getBestRowIdentifier( + String catalog, String schema, String table, int scope, boolean nullable) { + return getEmptyColumnsResultSet(); + } + + @Override + public ResultSet getVersionColumns(String catalog, String schema, String table) { + return getEmptyColumnsResultSet(); + } + + private ResultSet getEmptyColumnsResultSet() { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("SCOPE", Type.int64()), + StructField.of("COLUMN_NAME", Type.string()), + StructField.of("DATA_TYPE", Type.int64()), + StructField.of("TYPE_NAME", Type.string()), + StructField.of("COLUMN_SIZE", Type.int64()), + StructField.of("BUFFER_LENGTH", Type.int64()), + StructField.of("DECIMAL_DIGITS", Type.int64()), + StructField.of("PSEUDO_COLUMN", Type.int64())), + Collections.emptyList())); + } + + @Override + public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { + JdbcPreconditions.checkArgument(table != null, "table may not be null"); + String sql = readSqlFromFile("DatabaseMetaData_GetPrimaryKeys.sql", connection.getDialect()); + JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString(sql, catalog, schema, table); + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + + @Override + public ResultSet getImportedKeys(String catalog, String schema, String table) + throws SQLException { + JdbcPreconditions.checkArgument(table != null, "table may not be null"); + String sql = readSqlFromFile("DatabaseMetaData_GetImportedKeys.sql", connection.getDialect()); + JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString(sql, catalog, schema, table); + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + + @Override + public ResultSet getExportedKeys(String catalog, String schema, String table) + throws SQLException { + JdbcPreconditions.checkArgument(table != null, "table may not be null"); + String sql = readSqlFromFile("DatabaseMetaData_GetExportedKeys.sql", connection.getDialect()); + JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString(sql, catalog, schema, table); + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + + @Override + public ResultSet getCrossReference( + String parentCatalog, + String parentSchema, + String parentTable, + String foreignCatalog, + String foreignSchema, + String foreignTable) + throws SQLException { + String sql = + readSqlFromFile("DatabaseMetaData_GetCrossReferences.sql", connection.getDialect()); + JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString( + sql, + parentCatalog, + parentSchema, + parentTable, + foreignCatalog, + foreignSchema, + foreignTable); + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + + @Override + public ResultSet getTypeInfo() { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TYPE_NAME", Type.string()), + StructField.of("DATA_TYPE", Type.int64()), + StructField.of("PRECISION", Type.int64()), + StructField.of("LITERAL_PREFIX", Type.string()), + StructField.of("LITERAL_SUFFIX", Type.string()), + StructField.of("CREATE_PARAMS", Type.string()), + StructField.of("NULLABLE", Type.int64()), + StructField.of("CASE_SENSITIVE", Type.bool()), + StructField.of("SEARCHABLE", Type.int64()), + StructField.of("UNSIGNED_ATTRIBUTE", Type.bool()), + StructField.of("FIXED_PREC_SCALE", Type.bool()), + StructField.of("AUTO_INCREMENT", Type.bool()), + StructField.of("LOCAL_TYPE_NAME", Type.string()), + StructField.of("MINIMUM_SCALE", Type.int64()), + StructField.of("MAXIMUM_SCALE", Type.int64()), + StructField.of("SQL_DATA_TYPE", Type.int64()), + StructField.of("SQL_DATETIME_SUB", Type.int64()), + StructField.of("NUM_PREC_RADIX", Type.int64())), + Arrays.asList( + // TODO(#925): Make these dialect-dependent (i.e. 'timestamptz' for PostgreSQL. + Struct.newBuilder() + .set("TYPE_NAME") + .to("STRING") + .set("DATA_TYPE") + .to(Types.NVARCHAR) // -9 + .set("PRECISION") + .to(2621440L) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to("(length)") + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(true) + .set("SEARCHABLE") + .to(DatabaseMetaData.typeSearchable) + .set("UNSIGNED_ATTRIBUTE") + .to(true) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("STRING") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to((Long) null) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("INT64") + .set("DATA_TYPE") + .to(Types.BIGINT) // -5 + .set("PRECISION") + .to(19L) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(false) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("INT64") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to(10) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("BYTES") + .set("DATA_TYPE") + .to(Types.BINARY) // -2 + .set("PRECISION") + .to(10485760L) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to("(length)") + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(true) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("BYTES") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to((Long) null) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("FLOAT32") + .set("DATA_TYPE") + .to(Types.REAL) // 8 + .set("PRECISION") + .to(7L) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(false) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("FLOAT32") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to(2) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("FLOAT64") + .set("DATA_TYPE") + .to(Types.DOUBLE) // 8 + .set("PRECISION") + .to(15L) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(false) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("FLOAT64") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to(2) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("BOOL") + .set("DATA_TYPE") + .to(Types.BOOLEAN) // 16 + .set("PRECISION") + .to((Long) null) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(true) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("BOOL") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to((Long) null) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("DATE") + .set("DATA_TYPE") + .to(Types.DATE) // 91 + .set("PRECISION") + .to(10L) + .set("LITERAL_PREFIX") + .to("DATE ") + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(true) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("DATE") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to((Long) null) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("TIMESTAMP") + .set("DATA_TYPE") + .to(Types.TIMESTAMP) // 93 + .set("PRECISION") + .to(35L) + .set("LITERAL_PREFIX") + .to("TIMESTAMP ") + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(true) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("TIMESTAMP") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to((Long) null) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("NUMERIC") + .set("DATA_TYPE") + .to(Types.NUMERIC) // 2 + .set("PRECISION") + .to(2621440L) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typePredBasic) + .set("UNSIGNED_ATTRIBUTE") + .to(false) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("NUMERIC") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to(10) + .build(), + Struct.newBuilder() + .set("TYPE_NAME") + .to("UUID") + .set("DATA_TYPE") + .to(Types.OTHER) // There's no JDBC-specific type code for UUID. + .set("PRECISION") + .to((Long) null) + .set("LITERAL_PREFIX") + .to("UUID ") + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(false) + .set("SEARCHABLE") + .to(DatabaseMetaData.typeSearchable) + .set("UNSIGNED_ATTRIBUTE") + .to(true) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to("UUID") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to((Long) null) + .build(), + getJsonType(connection.getDialect()))), + // Allow column 2 to be cast to short without any range checks. + ImmutableSet.of(2)); + } + + private Struct getJsonType(Dialect dialect) { + return Struct.newBuilder() + .set("TYPE_NAME") + .to(dialect == Dialect.POSTGRESQL ? "JSONB" : "JSON") + .set("DATA_TYPE") + .to( + dialect == Dialect.POSTGRESQL + ? PgJsonbType.VENDOR_TYPE_NUMBER + : JsonType.VENDOR_TYPE_NUMBER) + .set("PRECISION") + .to(2621440L) + .set("LITERAL_PREFIX") + .to((String) null) + .set("LITERAL_SUFFIX") + .to((String) null) + .set("CREATE_PARAMS") + .to((String) null) + .set("NULLABLE") + .to(DatabaseMetaData.typeNullable) + .set("CASE_SENSITIVE") + .to(true) + .set("SEARCHABLE") + .to(DatabaseMetaData.typeSearchable) + .set("UNSIGNED_ATTRIBUTE") + .to(true) + .set("FIXED_PREC_SCALE") + .to(false) + .set("AUTO_INCREMENT") + .to(false) + .set("LOCAL_TYPE_NAME") + .to(dialect == Dialect.POSTGRESQL ? "JSONB" : "JSON") + .set("MINIMUM_SCALE") + .to(0) + .set("MAXIMUM_SCALE") + .to(0) + .set("SQL_DATA_TYPE") + .to((Long) null) + .set("SQL_DATETIME_SUB") + .to((Long) null) + .set("NUM_PREC_RADIX") + .to((Long) null) + .build(); + } + + @Override + public ResultSet getIndexInfo( + String catalog, String schema, String table, boolean unique, boolean approximate) + throws SQLException { + return getIndexInfo(catalog, schema, table, null, unique); + } + + public ResultSet getIndexInfo(String catalog, String schema, String indexName) + throws SQLException { + return getIndexInfo(catalog, schema, null, indexName, false); + } + + private ResultSet getIndexInfo( + String catalog, String schema, String table, String indexName, boolean unique) + throws SQLException { + String sql = readSqlFromFile("DatabaseMetaData_GetIndexInfo.sql", connection.getDialect()); + JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString( + sql, catalog, schema, table, indexName, unique ? "YES" : "%"); + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + + @Override + public boolean supportsResultSetType(int type) { + return type == ResultSet.TYPE_FORWARD_ONLY; + } + + @Override + public boolean supportsResultSetConcurrency(int type, int concurrency) { + return type == ResultSet.TYPE_FORWARD_ONLY && concurrency == ResultSet.CONCUR_READ_ONLY; + } + + @Override + public boolean ownUpdatesAreVisible(int type) { + return false; + } + + @Override + public boolean ownDeletesAreVisible(int type) { + return false; + } + + @Override + public boolean ownInsertsAreVisible(int type) { + return false; + } + + @Override + public boolean othersUpdatesAreVisible(int type) { + return false; + } + + @Override + public boolean othersDeletesAreVisible(int type) { + return false; + } + + @Override + public boolean othersInsertsAreVisible(int type) { + return false; + } + + @Override + public boolean updatesAreDetected(int type) { + return false; + } + + @Override + public boolean deletesAreDetected(int type) { + return false; + } + + @Override + public boolean insertsAreDetected(int type) { + return false; + } + + @Override + public boolean supportsBatchUpdates() { + return true; + } + + @Override + public ResultSet getUDTs( + String catalog, String schemaPattern, String typeNamePattern, int[] types) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TYPE_CAT", Type.string()), + StructField.of("TYPE_SCHEM", Type.string()), + StructField.of("TYPE_NAME", Type.string()), + StructField.of("CLASS_NAME", Type.string()), + StructField.of("DATA_TYPE", Type.int64()), + StructField.of("REMARKS", Type.string()), + StructField.of("BASE_TYPE", Type.int64())), + Collections.emptyList())); + } + + @Override + public Connection getConnection() { + return connection; + } + + @Override + public boolean supportsSavepoints() { + return false; + } + + @Override + public boolean supportsNamedParameters() { + return false; + } + + @Override + public boolean supportsMultipleOpenResults() { + return true; + } + + @Override + public boolean supportsGetGeneratedKeys() { + return false; + } + + @Override + public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TYPE_CAT", Type.string()), + StructField.of("TYPE_SCHEM", Type.string()), + StructField.of("TYPE_NAME", Type.string()), + StructField.of("SUPERTYPE_CAT", Type.string()), + StructField.of("SUPERTYPE_SCHEM", Type.string()), + StructField.of("SUPERTYPE_NAME", Type.string())), + Collections.emptyList())); + } + + @Override + public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TABLE_CAT", Type.string()), + StructField.of("TABLE_SCHEM", Type.string()), + StructField.of("TABLE_NAME", Type.string()), + StructField.of("SUPERTABLE_NAME", Type.string())), + Collections.emptyList())); + } + + @Override + public ResultSet getAttributes( + String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TYPE_CAT", Type.string()), + StructField.of("TYPE_SCHEM", Type.string()), + StructField.of("TYPE_NAME", Type.string()), + StructField.of("ATTR_NAME", Type.string()), + StructField.of("DATA_TYPE", Type.int64()), + StructField.of("ATTR_TYPE_NAME", Type.string()), + StructField.of("ATTR_SIZE", Type.int64()), + StructField.of("DECIMAL_DIGITS", Type.int64()), + StructField.of("NUM_PREC_RADIX", Type.int64()), + StructField.of("NULLABLE", Type.int64()), + StructField.of("REMARKS", Type.string()), + StructField.of("ATTR_DEF", Type.string()), + StructField.of("SQL_DATA_TYPE", Type.int64()), + StructField.of("SQL_DATETIME_SUB", Type.int64()), + StructField.of("CHAR_OCTET_LENGTH", Type.int64()), + StructField.of("ORDINAL_POSITION", Type.int64()), + StructField.of("IS_NULLABLE", Type.string()), + StructField.of("SCOPE_CATALOG", Type.string()), + StructField.of("SCOPE_SCHEMA", Type.string()), + StructField.of("SCOPE_TABLE", Type.string()), + StructField.of("SOURCE_DATA_TYPE", Type.int64())), + Collections.emptyList())); + } + + @Override + public boolean supportsResultSetHoldability(int holdability) { + return holdability == ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public int getResultSetHoldability() { + return ResultSet.CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public int getDatabaseMajorVersion() { + return DATABASE_MAJOR_VERSION; + } + + @Override + public int getDatabaseMinorVersion() { + return DATABASE_MINOR_VERSION; + } + + @Override + public int getJDBCMajorVersion() { + return JDBC_MAJOR_VERSION; + } + + @Override + public int getJDBCMinorVersion() { + return JDBC_MINOR_VERSION; + } + + @Override + public int getSQLStateType() { + return sqlStateSQL; + } + + @Override + public boolean locatorsUpdateCopy() { + return true; + } + + @Override + public boolean supportsStatementPooling() { + return false; + } + + @Override + public RowIdLifetime getRowIdLifetime() { + return RowIdLifetime.ROWID_UNSUPPORTED; + } + + @Override + public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException { + String sql = readSqlFromFile("DatabaseMetaData_GetSchemas.sql", connection.getDialect()); + try (JdbcPreparedStatement statement = + prepareStatementReplaceNullWithAnyString(sql, catalog, schemaPattern)) { + return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE); + } + } + + @Override + public boolean supportsStoredFunctionsUsingCallSyntax() { + return false; + } + + @Override + public boolean autoCommitFailureClosesAllResultSets() { + return false; + } + + /** + * The max length for client info values is 63 to make them fit in Cloud Spanner session labels. + */ + static final int MAX_CLIENT_INFO_VALUE_LENGTH = 63; + + static Properties getDefaultClientInfoProperties() throws SQLException { + Properties info = new Properties(); + try (ResultSet rs = getDefaultClientInfo()) { + while (rs.next()) { + info.put(rs.getString("NAME"), rs.getString("DEFAULT_VALUE")); + } + } + return info; + } + + private static ResultSet getDefaultClientInfo() { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("NAME", Type.string()), + StructField.of("MAX_LEN", Type.int64()), + StructField.of("DEFAULT_VALUE", Type.string()), + StructField.of("DESCRIPTION", Type.string())), + Arrays.asList( + Struct.newBuilder() + .set("NAME") + .to("APPLICATIONNAME") + .set("MAX_LEN") + .to(MAX_CLIENT_INFO_VALUE_LENGTH) + .set("DEFAULT_VALUE") + .to("") + .set("DESCRIPTION") + .to("The name of the application currently utilizing the connection.") + .build(), + Struct.newBuilder() + .set("NAME") + .to("CLIENTHOSTNAME") + .set("MAX_LEN") + .to(MAX_CLIENT_INFO_VALUE_LENGTH) + .set("DEFAULT_VALUE") + .to("") + .set("DESCRIPTION") + .to( + "The hostname of the computer the application using the connection is running on.") + .build(), + Struct.newBuilder() + .set("NAME") + .to("CLIENTUSER") + .set("MAX_LEN") + .to(MAX_CLIENT_INFO_VALUE_LENGTH) + .set("DEFAULT_VALUE") + .to("") + .set("DESCRIPTION") + .to( + "The name of the user that the application using the connection is performing work for. " + + "This may not be the same as the user name that was used in establishing the connection.") + .build()))); + } + + @Override + public ResultSet getClientInfoProperties() { + return getDefaultClientInfo(); + } + + @Override + public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) { + // TODO: return system functions + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("FUNCTION_CAT", Type.string()), + StructField.of("FUNCTION_SCHEM", Type.string()), + StructField.of("FUNCTION_NAME", Type.string()), + StructField.of("REMARKS", Type.string()), + StructField.of("FUNCTION_TYPE", Type.int64()), + StructField.of("SPECIFIC_NAME", Type.string())), + Collections.emptyList())); + } + + @Override + public ResultSet getFunctionColumns( + String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) { + // TODO: return system functions + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("FUNCTION_CAT", Type.string()), + StructField.of("FUNCTION_SCHEM", Type.string()), + StructField.of("FUNCTION_NAME", Type.string()), + StructField.of("COLUMN_NAME", Type.string()), + StructField.of("COLUMN_TYPE", Type.int64()), + StructField.of("DATA_TYPE", Type.int64()), + StructField.of("TYPE_NAME", Type.string()), + StructField.of("PRECISION", Type.int64()), + StructField.of("LENGTH", Type.int64()), + StructField.of("SCALE", Type.int64()), + StructField.of("RADIX", Type.int64()), + StructField.of("NULLABLE", Type.int64()), + StructField.of("REMARKS", Type.string()), + StructField.of("CHAR_OCTET_LENGTH", Type.int64()), + StructField.of("ORDINAL_POSITION", Type.int64()), + StructField.of("IS_NULLABLE", Type.string()), + StructField.of("SPECIFIC_NAME", Type.string())), + Collections.emptyList())); + } + + @Override + public ResultSet getPseudoColumns( + String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) { + return JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("TABLE_CAT", Type.string()), + StructField.of("TABLE_SCHEM", Type.string()), + StructField.of("TABLE_NAME", Type.string()), + StructField.of("COLUMN_NAME", Type.string()), + StructField.of("DATA_TYPE", Type.int64()), + StructField.of("COLUMN_SIZE", Type.int64()), + StructField.of("DECIMAL_DIGITS", Type.int64()), + StructField.of("NUM_PREC_RADIX", Type.int64()), + StructField.of("COLUMN_USAGE", Type.string()), + StructField.of("REMARKS", Type.string()), + StructField.of("CHAR_OCTET_LENGTH", Type.int64()), + StructField.of("IS_NULLABLE", Type.string())), + Collections.emptyList())); + } + + @Override + public boolean generatedKeyAlwaysReturned() { + return false; + } + + @Override + public long getMaxLogicalLobSize() { + // BYTES(MAX) + return 10485760L; + } + + @Override + public boolean supportsRefCursors() { + return false; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDriver.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDriver.java new file mode 100644 index 000000000000..8e8bd0726317 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDriver.java @@ -0,0 +1,358 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.api.core.InternalApi; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.SessionPoolOptionsHelper; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.connection.ConnectionOptionsHelper; +import com.google.cloud.spanner.connection.ConnectionProperty; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Suppliers; +import com.google.rpc.Code; +import io.opentelemetry.api.OpenTelemetry; +import java.sql.Connection; +import java.sql.Driver; +import java.sql.DriverManager; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.function.Supplier; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * JDBC {@link Driver} for Google Cloud Spanner. + * + *

Usage: + * + *

+ * 
+ * {@code
+ * String url = "jdbc:cloudspanner:/projects/my_project_id/"
+ *            + "instances/my_instance_id/databases/my_database_name?"
+ *            + "credentials=/home/cloudspanner-keys/my-key.json;autocommit=false";
+ * try (Connection connection = DriverManager.getConnection(url)) {
+ *   try(ResultSet rs = connection.createStatement().executeQuery("SELECT SingerId, AlbumId, MarketingBudget FROM Albums")) {
+ *     while(rs.next()) {
+ *       // do something
+ *     }
+ *   }
+ * }
+ * }
+ * 
+ * 
+ * + * The connection that is returned will implement the interface {@link CloudSpannerJdbcConnection}. + * The JDBC connection URL must be specified in the following format: + * + *
+ * jdbc:cloudspanner:[//host[:port]]/projects/project-id[/instances/instance-id[/databases/database-name]][\?property-name=property-value[;property-name=property-value]*]?
+ * 
+ * + * The property-value strings should be url-encoded. + * + *

The project-id part of the URI may be filled with the placeholder DEFAULT_PROJECT_ID. This + * placeholder is replaced by the default project id of the environment that is requesting a + * connection. + * + *

The supported properties are: + * + *

    + *
  • credentials (String): URL for the credentials file to use for the connection. If you do not + * specify any credentials at all, the default credentials of the environment as returned by + * {@link GoogleCredentials#getApplicationDefault()} is used. + *
  • autocommit (boolean): Sets the initial autocommit mode for the connection. Default is true. + *
  • readonly (boolean): Sets the initial readonly mode for the connection. Default is false. + *
  • autoConfigEmulator (boolean): Automatically configure the connection to try to connect to + * the Cloud Spanner emulator. You do not need to specify any host or port in the connection + * string as long as the emulator is running on the default host/port (localhost:9010). The + * instance and database in the connection string will automatically be created if these do + * not yet exist on the emulator. This means that you do not need to execute any `gcloud` + * commands on the emulator to create the instance and database before you can connect to it. + * Setting this property to true also enables running concurrent transactions on the emulator. + * The emulator aborts any concurrent transaction on the emulator, and the JDBC driver works + * around this by automatically setting a savepoint after each statement that is executed. + * When the transaction has been aborted by the emulator and the JDBC connection wants to + * continue with that transaction, the transaction is replayed up until the savepoint that had + * automatically been set after the last statement that was executed before the transaction + * was aborted by the emulator. + *
  • endpoint (string): Set this property to specify a custom endpoint that the JDBC driver + * should connect to. You can use this property in combination with the autoConfigEmulator + * property to instruct the JDBC driver to connect to an emulator instance that uses a + * randomly assigned port numer. See ConcurrentTransactionOnEmulatorTest + * for a concrete example of how to use this property. + *
  • usePlainText (boolean): Sets whether the JDBC connection should establish an unencrypted + * connection to the server. This option can only be used when connecting to a local emulator + * that does not require an encrypted connection, and that does not require authentication. + *
  • optimizerVersion (string): The query optimizer version to use for the connection. The value + * must be either a valid version number or LATEST. If no value is specified, the + * query optimizer version specified in the environment variable + * SPANNER_OPTIMIZER_VERSION is used. If no query optimizer version is specified in the + * connection URL or in the environment variable, the default query optimizer version of Cloud + * Spanner is used. + *
  • oauthtoken (String): A valid OAuth2 token to use for the JDBC connection. The token must + * have been obtained with one or both of the scopes + * 'https://www.googleapis.com/auth/spanner.admin' and/or + * 'https://www.googleapis.com/auth/spanner.data'. If you specify both a credentials file and + * an OAuth token, the JDBC driver will throw an exception when you try to obtain a + * connection. + *
  • retryAbortsInternally (boolean): Sets the initial retryAbortsInternally mode for the + * connection. Default is true. @see {@link + * CloudSpannerJdbcConnection#setRetryAbortsInternally(boolean)} for more information. + *
  • minSessions (int): Sets the minimum number of sessions in the backing session pool. + * Defaults to 100. + *
  • maxSessions (int): Sets the maximum number of sessions in the backing session pool. + * Defaults to 400. + *
  • numChannels (int): Sets the number of gRPC channels to use. Defaults to 4. + *
  • rpcPriority (String): Sets the priority for all RPC invocations from this connection. + * Defaults to HIGH. + *
+ */ +public class JdbcDriver implements Driver { + /** + * The info {@link Properties} object that is passed to the JDBC driver may contain an entry with + * this key and an {@link io.opentelemetry.api.OpenTelemetry} instance as its value. This {@link + * io.opentelemetry.api.OpenTelemetry} instance will be used for tracing and metrics in the JDBC + * connection. + */ + public static final String OPEN_TELEMETRY_PROPERTY_KEY = "openTelemetry"; + + private static final String JDBC_API_CLIENT_LIB_TOKEN = "sp-jdbc"; + // Updated to version 2 when upgraded to Java 8 (JDBC 4.2) + static final int MAJOR_VERSION = 2; + static final int MINOR_VERSION = 0; + private static final String JDBC_URL_FORMAT = + "jdbc:" + ConnectionOptions.Builder.SPANNER_URI_FORMAT; + private static final Pattern URL_PATTERN = Pattern.compile(JDBC_URL_FORMAT); + private static final String JDBC_EXTERNAL_HOST_FORMAT = + "jdbc:" + ConnectionOptions.Builder.EXTERNAL_HOST_FORMAT; + + @VisibleForTesting + static final Pattern EXTERNAL_HOST_URL_PATTERN = Pattern.compile(JDBC_EXTERNAL_HOST_FORMAT); + + @InternalApi + public static String getClientLibToken() { + return JDBC_API_CLIENT_LIB_TOKEN; + } + + static { + try { + register(); + } catch (SQLException e) { + java.sql.DriverManager.println("Registering driver failed: " + e.getMessage()); + } + } + + private static JdbcDriver registeredDriver; + + static void register() throws SQLException { + if (isRegistered()) { + throw new IllegalStateException( + "Driver is already registered. It can only be registered once."); + } + JdbcDriver registeredDriver = new JdbcDriver(); + DriverManager.registerDriver(registeredDriver); + JdbcDriver.registeredDriver = registeredDriver; + } + + /** + * According to JDBC specification, this driver is registered against {@link DriverManager} when + * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the + * class can be gc'ed if necessary. + * + * @throws IllegalStateException if the driver is not registered + * @throws SQLException if deregistering the driver fails + */ + static void deregister() throws SQLException { + if (!isRegistered()) { + throw new IllegalStateException( + "Driver is not registered (or it has not been registered using Driver.register() method)"); + } + ConnectionOptions.closeSpanner(); + DriverManager.deregisterDriver(registeredDriver); + registeredDriver = null; + } + + /** + * @return {@code true} if the driver is registered against {@link DriverManager} + */ + static boolean isRegistered() { + return registeredDriver != null; + } + + /** + * @return the registered JDBC driver for Cloud Spanner. + * @throws SQLException if the driver has not been registered. + */ + static JdbcDriver getRegisteredDriver() throws SQLException { + if (isRegistered()) { + return registeredDriver; + } + throw JdbcSqlExceptionFactory.of( + "The driver has not been registered", Code.FAILED_PRECONDITION); + } + + public JdbcDriver() {} + + @Override + public Connection connect(String url, Properties info) throws SQLException { + if (url != null && (url.startsWith("jdbc:cloudspanner") || url.startsWith("jdbc:spanner"))) { + try { + Matcher matcher = URL_PATTERN.matcher(url); + Matcher matcherExternalHost = EXTERNAL_HOST_URL_PATTERN.matcher(url); + if (matcher.matches() || matcherExternalHost.matches()) { + // strip 'jdbc:' from the URL, add any extra properties and pass on to the generic + // Connection API. Also set the user-agent if we detect that the connection + // comes from known framework like Hibernate, and there is no other user-agent set. + maybeAddUserAgent(info); + String connectionUri = appendPropertiesToUrl(url.substring(5), info); + ConnectionOptions options = buildConnectionOptions(connectionUri, info); + JdbcConnection connection = new JdbcConnection(url, options); + if (options.getWarnings() != null) { + connection.pushWarning(new SQLWarning(options.getWarnings())); + } + return connection; + } + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } catch (IllegalArgumentException e) { + throw JdbcSqlExceptionFactory.of(e.getMessage(), Code.INVALID_ARGUMENT, e); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of(e.getMessage(), Code.UNKNOWN, e); + } + throw JdbcSqlExceptionFactory.of("invalid url: " + url, Code.INVALID_ARGUMENT); + } + return null; + } + + static ConnectionOptions buildConnectionOptions(String connectionUrl, Properties info) { + ConnectionOptions.Builder builder = + ConnectionOptions.newBuilder().setTracingPrefix("JDBC").setUri(connectionUrl); + if (info.containsKey(OPEN_TELEMETRY_PROPERTY_KEY) + && info.get(OPEN_TELEMETRY_PROPERTY_KEY) instanceof OpenTelemetry) { + builder.setOpenTelemetry((OpenTelemetry) info.get(OPEN_TELEMETRY_PROPERTY_KEY)); + } + // Enable multiplexed sessions by default for the JDBC driver. + builder.setSessionPoolOptions( + SessionPoolOptionsHelper.useMultiplexedSessions(SessionPoolOptions.newBuilder()).build()); + // Enable direct executor for JDBC, as we don't use the async API. + builder = + ConnectionOptionsHelper.useDirectExecutorIfNotUseVirtualThreads(connectionUrl, builder); + return builder.build(); + } + + static void maybeAddUserAgent(Properties properties) { + if (properties.containsKey("userAgent")) { + return; + } + if (isHibernate()) { + properties.setProperty("userAgent", "sp-hib"); + } + } + + private static final Supplier isHibernate = + Suppliers.memoize( + () -> { + try { + // First check if the Spanner Hibernate dialect is on the classpath. If it is, then + // we assume that Hibernate will (eventually) be used. + Class.forName( + "com.google.cloud.spanner.hibernate.SpannerDialect", + /* initialize= */ false, + JdbcDriver.class.getClassLoader()); + return true; + } catch (Throwable ignore) { + } + + // If we did not find the Spanner Hibernate dialect on the classpath, then do a + // check if the connection is still being created by Hibernate using the built-in + // Spanner dialect in Hibernate. + try { + StackTraceElement[] callStack = Thread.currentThread().getStackTrace(); + for (StackTraceElement element : callStack) { + if (element.getClassName().contains(".hibernate.")) { + return true; + } + } + } catch (Throwable ignore) { + } + return false; + }); + + static boolean isHibernate() { + // Cache the result as the check is relatively expensive, and we also don't want to create + // multiple different Spanner instances just to get the correct user-agent in every case. + return isHibernate.get(); + } + + static String appendPropertiesToUrl(String url, Properties info) { + StringBuilder res = new StringBuilder(url); + for (Entry entry : info.entrySet()) { + if (entry.getValue() instanceof String && !"".equals(entry.getValue())) { + res.append(";").append(entry.getKey()).append("=").append(entry.getValue()); + } + } + return res.toString(); + } + + @Override + public boolean acceptsURL(String url) { + return URL_PATTERN.matcher(url).matches() || EXTERNAL_HOST_URL_PATTERN.matcher(url).matches(); + } + + @Override + public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) { + String connectionUri = appendPropertiesToUrl(url.substring(5), info); + DriverPropertyInfo[] res = + new DriverPropertyInfo[ConnectionPropertiesHelper.VALID_CONNECTION_PROPERTIES.size()]; + int i = 0; + for (ConnectionProperty prop : ConnectionPropertiesHelper.VALID_CONNECTION_PROPERTIES) { + res[i] = ConnectionPropertiesHelper.toDriverPropertyInfo(connectionUri, prop); + i++; + } + return res; + } + + @Override + public int getMajorVersion() { + return MAJOR_VERSION; + } + + @Override + public int getMinorVersion() { + return MINOR_VERSION; + } + + @Override + public boolean jdbcCompliant() { + return false; + } + + @Override + public Logger getParentLogger() throws SQLFeatureNotSupportedException { + throw new SQLFeatureNotSupportedException(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcParameterMetaData.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcParameterMetaData.java new file mode 100644 index 000000000000..784a2678fd29 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcParameterMetaData.java @@ -0,0 +1,239 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.JdbcDataTypeConverter; +import com.google.cloud.spanner.ResultSet; +import com.google.rpc.Code; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.ParameterMetaData; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; + +/** {@link ParameterMetaData} implementation for Cloud Spanner */ +class JdbcParameterMetaData extends AbstractJdbcWrapper implements ParameterMetaData { + private final JdbcPreparedStatement statement; + + private final StructType parameters; + + JdbcParameterMetaData(JdbcPreparedStatement statement, ResultSet resultSet) { + this.statement = statement; + this.parameters = resultSet.getMetadata().getUndeclaredParameters(); + } + + private Field getField(int param) throws SQLException { + JdbcPreconditions.checkArgument(param > 0 && param <= parameters.getFieldsCount(), param); + String paramName = "p" + param; + return parameters.getFieldsList().stream() + .filter(field -> field.getName().equals(paramName)) + .findAny() + .orElseThrow( + () -> + JdbcSqlExceptionFactory.of( + "Unknown parameter: " + paramName, Code.INVALID_ARGUMENT)); + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public int getParameterCount() { + return parameters.getFieldsCount(); + } + + @Override + public int isNullable(int param) { + Integer nullable = statement.getParameters().getNullable(param); + //noinspection MagicConstant + return nullable == null ? parameterNullableUnknown : nullable; + } + + @Override + public boolean isSigned(int param) throws SQLException { + int type = getParameterType(param); + return type == Types.DOUBLE + || type == Types.FLOAT + || type == Types.REAL + || type == Types.BIGINT + || type == Types.INTEGER + || type == Types.SMALLINT + || type == Types.TINYINT + || type == Types.DECIMAL + || type == Types.NUMERIC; + } + + @Override + public int getPrecision(int param) { + Integer length = statement.getParameters().getScaleOrLength(param); + return length == null ? 0 : length; + } + + @Override + public int getScale(int param) { + return 0; + } + + @Override + public int getParameterType(int param) throws SQLException { + JdbcPreconditions.checkArgument(param > 0 && param <= parameters.getFieldsCount(), param); + int typeFromValue = getParameterTypeFromValue(param); + if (typeFromValue != Types.OTHER) { + return typeFromValue; + } + + Type type = getField(param).getType(); + // JDBC only has a generic ARRAY type. + if (type.getCode() == TypeCode.ARRAY) { + return Types.ARRAY; + } + JdbcDataType jdbcDataType = + JdbcDataType.getType(JdbcDataTypeConverter.toSpannerType(type).getCode()); + return jdbcDataType == null ? Types.OTHER : jdbcDataType.getSqlType(); + } + + /** + * This method returns the parameter type based on the parameter value that has been set. This was + * previously the only way to get the parameter types of a statement. Cloud Spanner can now return + * the types and names of parameters in a SQL string, which is what this method should return. + */ + // TODO: Remove this method for the next major version bump. + private int getParameterTypeFromValue(int param) { + Integer type = statement.getParameters().getType(param); + if (type != null) { + return type; + } + + Object value = statement.getParameters().getParameter(param); + if (value == null) { + return Types.OTHER; + } else if (Boolean.class.isAssignableFrom(value.getClass())) { + return Types.BOOLEAN; + } else if (Byte.class.isAssignableFrom(value.getClass())) { + return Types.TINYINT; + } else if (Short.class.isAssignableFrom(value.getClass())) { + return Types.SMALLINT; + } else if (Integer.class.isAssignableFrom(value.getClass())) { + return Types.INTEGER; + } else if (Long.class.isAssignableFrom(value.getClass())) { + return Types.BIGINT; + } else if (Float.class.isAssignableFrom(value.getClass())) { + return Types.REAL; + } else if (Double.class.isAssignableFrom(value.getClass())) { + return Types.DOUBLE; + } else if (BigDecimal.class.isAssignableFrom(value.getClass())) { + return Types.NUMERIC; + } else if (Date.class.isAssignableFrom(value.getClass())) { + return Types.DATE; + } else if (Timestamp.class.isAssignableFrom(value.getClass())) { + return Types.TIMESTAMP; + } else if (Time.class.isAssignableFrom(value.getClass())) { + return Types.TIME; + } else if (String.class.isAssignableFrom(value.getClass())) { + return Types.NVARCHAR; + } else if (byte[].class.isAssignableFrom(value.getClass())) { + return Types.BINARY; + } else { + return Types.OTHER; + } + } + + @Override + public String getParameterTypeName(int param) throws SQLException { + JdbcPreconditions.checkArgument(param > 0 && param <= parameters.getFieldsCount(), param); + String typeNameFromValue = getParameterTypeNameFromValue(param); + if (typeNameFromValue != null) { + return typeNameFromValue; + } + + com.google.cloud.spanner.Type type = + JdbcDataTypeConverter.toSpannerType(getField(param).getType()); + return getSpannerTypeName(type, statement.getConnection().getDialect()); + } + + private String getParameterTypeNameFromValue(int param) { + int type = getParameterTypeFromValue(param); + if (type != Types.OTHER) { + return getSpannerTypeName(type); + } + return null; + } + + @Override + public String getParameterClassName(int param) throws SQLException { + JdbcPreconditions.checkArgument(param > 0 && param <= parameters.getFieldsCount(), param); + String classNameFromValue = getParameterClassNameFromValue(param); + if (classNameFromValue != null) { + return classNameFromValue; + } + + com.google.cloud.spanner.Type type = + JdbcDataTypeConverter.toSpannerType(getField(param).getType()); + return getClassName(type); + } + + // TODO: Remove this method for the next major version bump. + private String getParameterClassNameFromValue(int param) { + Object value = statement.getParameters().getParameter(param); + if (value != null) { + return value.getClass().getName(); + } + Integer type = statement.getParameters().getType(param); + if (type != null) { + return getClassName(type); + } + return null; + } + + @Override + public int getParameterMode(int param) { + return parameterModeIn; + } + + @Override + public String toString() { + try { + StringBuilder res = new StringBuilder(); + res.append("CloudSpannerPreparedStatementParameterMetaData, parameter count: ") + .append(getParameterCount()); + for (int param = 1; param <= getParameterCount(); param++) { + res.append("\nParameter ") + .append(param) + .append(":\n\t Class name: ") + .append(getParameterClassName(param)); + res.append(",\n\t Parameter type name: ").append(getParameterTypeName(param)); + res.append(",\n\t Parameter type: ").append(getParameterType(param)); + res.append(",\n\t Parameter precision: ").append(getPrecision(param)); + res.append(",\n\t Parameter scale: ").append(getScale(param)); + res.append(",\n\t Parameter signed: ").append(isSigned(param)); + res.append(",\n\t Parameter nullable: ").append(isNullable(param)); + res.append(",\n\t Parameter mode: ").append(getParameterMode(param)); + } + return res.toString(); + } catch (SQLException exception) { + return "Failed to get parameter metadata: " + exception; + } + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcParameterStore.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcParameterStore.java new file mode 100644 index 000000000000..b43b44ddc7d0 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcParameterStore.java @@ -0,0 +1,1105 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Statement.Builder; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.ValueBinder; +import com.google.common.io.CharStreams; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Descriptors.Descriptor; +import com.google.protobuf.Message; +import com.google.protobuf.NullValue; +import com.google.protobuf.ProtocolMessageEnum; +import com.google.rpc.Code; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.lang.reflect.Method; +import java.math.BigDecimal; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.Date; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLType; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.time.LocalDate; +import java.time.OffsetDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +/** This class handles the parameters of a {@link PreparedStatement}. */ +class JdbcParameterStore { + /** + * The initial size of the arrays that hold the parameter values. The array will automatically be + * extended when needed. + */ + private static final int INITIAL_PARAMETERS_ARRAY_SIZE = 10; + + private static final class JdbcParameter { + private Object value; + private Integer type; + private Integer nullable; + private Integer scaleOrLength; + private String column; + } + + private ArrayList parametersList = new ArrayList<>(INITIAL_PARAMETERS_ARRAY_SIZE); + + /** Name of the table that the parameters will be used to query/update. Can be null. */ + private String table; + + /** + * The highest parameter index in use. Parameter values do not need to be set in order, it could + * be that a parameter with for example index 10 is set first, and that the preceding parameters + * are set at a later time. + */ + private int highestIndex = 0; + + private final Dialect dialect; + + JdbcParameterStore(Dialect dialect) { + this.dialect = dialect; + } + + void clearParameters() { + parametersList = new ArrayList<>(INITIAL_PARAMETERS_ARRAY_SIZE); + highestIndex = 0; + table = null; + } + + /** Get parameter value. Index is 1-based. */ + Object getParameter(int parameterIndex) { + int arrayIndex = parameterIndex - 1; + if (arrayIndex >= parametersList.size() || parametersList.get(arrayIndex) == null) return null; + return parametersList.get(arrayIndex).value; + } + + /** Get parameter type code according to the values in {@link Types}. Index is 1-based. */ + Integer getType(int parameterIndex) { + int arrayIndex = parameterIndex - 1; + if (arrayIndex >= parametersList.size() || parametersList.get(arrayIndex) == null) return null; + return parametersList.get(arrayIndex).type; + } + + Integer getNullable(int parameterIndex) { + int arrayIndex = parameterIndex - 1; + if (arrayIndex >= parametersList.size() || parametersList.get(arrayIndex) == null) return null; + return parametersList.get(arrayIndex).nullable; + } + + Integer getScaleOrLength(int parameterIndex) { + int arrayIndex = parameterIndex - 1; + if (arrayIndex >= parametersList.size() || parametersList.get(arrayIndex) == null) return null; + return parametersList.get(arrayIndex).scaleOrLength; + } + + String getColumn(int parameterIndex) { + int arrayIndex = parameterIndex - 1; + if (arrayIndex >= parametersList.size() || parametersList.get(arrayIndex) == null) return null; + return parametersList.get(arrayIndex).column; + } + + String getTable() { + return table; + } + + void setTable(String table) { + this.table = table; + } + + void setColumn(int parameterIndex, String column) throws SQLException { + setParameter( + parameterIndex, + getParameter(parameterIndex), + getType(parameterIndex), + getScaleOrLength(parameterIndex), + column, + null); + } + + void setType(int parameterIndex, Integer type) throws SQLException { + setParameter( + parameterIndex, + getParameter(parameterIndex), + type, + getScaleOrLength(parameterIndex), + getColumn(parameterIndex), + null); + } + + /** Sets a parameter value. The type will be determined based on the type of the value. */ + void setParameter(int parameterIndex, Object value) throws SQLException { + setParameter(parameterIndex, value, null, null, null, null); + } + + /** Sets a parameter value as the specified vendor-specific {@link SQLType}. */ + void setParameter(int parameterIndex, Object value, SQLType sqlType) throws SQLException { + setParameter(parameterIndex, value, null, null, null, sqlType); + } + + /** + * Sets a parameter value as the specified vendor-specific {@link SQLType} with the specified + * scale or length. This method is only here to support the {@link + * PreparedStatement#setObject(int, Object, SQLType, int)} method. + */ + void setParameter(int parameterIndex, Object value, SQLType sqlType, Integer scaleOrLength) + throws SQLException { + setParameter(parameterIndex, value, null, scaleOrLength, null, sqlType); + } + + /** + * Sets a parameter value as the specified sql type. The type can be one of the constants in + * {@link Types} or a vendor specific type code supplied by a vendor specific {@link SQLType}. + */ + void setParameter(int parameterIndex, Object value, Integer sqlType) throws SQLException { + setParameter(parameterIndex, value, sqlType, null); + } + + /** + * Sets a parameter value as the specified sql type with the specified scale or length. The type + * can be one of the constants in {@link Types} or a vendor specific type code supplied by a + * vendor specific {@link SQLType}. + */ + void setParameter(int parameterIndex, Object value, Integer sqlType, Integer scaleOrLength) + throws SQLException { + setParameter(parameterIndex, value, sqlType, scaleOrLength, null, null); + } + + /** + * Sets a parameter value as the specified sql type with the specified scale or length. Any {@link + * SQLType} instance will take precedence over sqlType. The type can be one of the constants in + * {@link Types} or a vendor specific type code supplied by a vendor specific {@link SQLType}. + */ + void setParameter( + int parameterIndex, + Object value, + Integer sqlType, + Integer scaleOrLength, + String column, + SQLType sqlTypeObject) + throws SQLException { + // Ignore the sql type if the application has created a Spanner Value object. + if (!(value instanceof Value)) { + // check that only valid type/value combinations are entered + if (sqlTypeObject != null && sqlType == null) { + sqlType = sqlTypeObject.getVendorTypeNumber(); + } + if (sqlType != null) { + checkTypeAndValueSupported(value, sqlType); + } + } // set the parameter + highestIndex = Math.max(parameterIndex, highestIndex); + int arrayIndex = parameterIndex - 1; + if (arrayIndex >= parametersList.size() || parametersList.get(arrayIndex) == null) { + parametersList.ensureCapacity(parameterIndex); + while (parametersList.size() < parameterIndex) { + parametersList.add(null); + } + parametersList.set(arrayIndex, new JdbcParameter()); + } + JdbcParameter param = parametersList.get(arrayIndex); + param.value = value; + param.type = sqlType; + param.scaleOrLength = scaleOrLength; + param.column = column; + } + + private void checkTypeAndValueSupported(Object value, int sqlType) throws SQLException { + if (value == null) { + // null is always supported, as we will just fall back to an untyped NULL value. + return; + } + if (!isTypeSupported(sqlType)) { + throw JdbcSqlExceptionFactory.of( + "Type " + sqlType + " is not supported", Code.INVALID_ARGUMENT); + } + if (!isValidTypeAndValue(value, sqlType)) { + throw JdbcSqlExceptionFactory.of( + value + " is not a valid value for type " + sqlType, Code.INVALID_ARGUMENT); + } + } + + private boolean isTypeSupported(int sqlType) { + switch (sqlType) { + case Types.BIT: + case Types.BOOLEAN: + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + case Types.BIGINT: + case Types.FLOAT: + case Types.REAL: + case Types.DOUBLE: + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + case Types.NCHAR: + case Types.NVARCHAR: + case Types.LONGNVARCHAR: + case Types.DATE: + case Types.TIME: + case Types.TIME_WITH_TIMEZONE: + case Types.TIMESTAMP: + case Types.TIMESTAMP_WITH_TIMEZONE: + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + case Types.ARRAY: + case Types.BLOB: + case Types.CLOB: + case Types.NCLOB: + case Types.NUMERIC: + case Types.DECIMAL: + case JsonType.VENDOR_TYPE_NUMBER: + case JsonType.SHORT_VENDOR_TYPE_NUMBER: + case PgJsonbType.VENDOR_TYPE_NUMBER: + case PgJsonbType.SHORT_VENDOR_TYPE_NUMBER: + case ProtoMessageType.VENDOR_TYPE_NUMBER: + case ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER: + case ProtoEnumType.VENDOR_TYPE_NUMBER: + case ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER: + return true; + } + return false; + } + + private boolean isValidTypeAndValue(Object value, int sqlType) { + if (value == null) { + return true; + } + switch (sqlType) { + case Types.BIT: + case Types.BOOLEAN: + return value instanceof Boolean || value instanceof Number; + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + case Types.BIGINT: + case Types.FLOAT: + case Types.REAL: + case Types.DOUBLE: + case Types.NUMERIC: + case Types.DECIMAL: + return value instanceof Number || value instanceof ProtocolMessageEnum; + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + case Types.NCHAR: + case Types.NVARCHAR: + case Types.LONGNVARCHAR: + return value instanceof String + || value instanceof InputStream + || value instanceof Reader + || value instanceof URL; + case Types.DATE: + return value instanceof Date + || value instanceof Time + || value instanceof Timestamp + || value instanceof LocalDate; + case Types.TIME: + case Types.TIME_WITH_TIMEZONE: + case Types.TIMESTAMP: + case Types.TIMESTAMP_WITH_TIMEZONE: + return value instanceof Date + || value instanceof Time + || value instanceof Timestamp + || value instanceof OffsetDateTime; + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + return value instanceof byte[] + || value instanceof InputStream + || value instanceof AbstractMessage; + case Types.ARRAY: + return value instanceof Array; + case Types.BLOB: + return value instanceof Blob || value instanceof InputStream; + case Types.CLOB: + return value instanceof Clob || value instanceof Reader; + case Types.NCLOB: + return value instanceof NClob || value instanceof Reader; + case JsonType.VENDOR_TYPE_NUMBER: + case JsonType.SHORT_VENDOR_TYPE_NUMBER: + return value instanceof String + || value instanceof InputStream + || value instanceof Reader + || (value instanceof Value && ((Value) value).getType().getCode() == Type.Code.JSON); + case PgJsonbType.VENDOR_TYPE_NUMBER: + case PgJsonbType.SHORT_VENDOR_TYPE_NUMBER: + return value instanceof String + || value instanceof InputStream + || value instanceof Reader + || (value instanceof Value + && ((Value) value).getType().getCode() == Type.Code.PG_JSONB); + case ProtoMessageType.VENDOR_TYPE_NUMBER: + case ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER: + return value instanceof AbstractMessage || value instanceof byte[]; + case ProtoEnumType.VENDOR_TYPE_NUMBER: + case ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER: + return value instanceof ProtocolMessageEnum || value instanceof Number; + } + return false; + } + + /** Return the highest param index in use in this store. */ + int getHighestIndex() { + return highestIndex; + } + + /** Fetch parameter metadata from the database. */ + void fetchMetaData(Connection connection) throws SQLException { + if (table != null && !"".equals(table)) { + try (ResultSet rsCols = connection.getMetaData().getColumns(null, null, table, null)) { + while (rsCols.next()) { + String col = rsCols.getString("COLUMN_NAME"); + int arrayIndex = getParameterArrayIndex(col); + if (arrayIndex > -1) { + JdbcParameter param = parametersList.get(arrayIndex); + if (param != null) { + param.scaleOrLength = rsCols.getInt("COLUMN_SIZE"); + param.type = rsCols.getInt("DATA_TYPE"); + param.nullable = rsCols.getInt("NULLABLE"); + } + } + } + } + } + } + + private int getParameterArrayIndex(String columnName) { + if (columnName != null) { + for (int index = 0; index < highestIndex; index++) { + JdbcParameter param = parametersList.get(index); + if (param != null && param.column != null) { + if (columnName.equalsIgnoreCase(param.column)) { + return index; + } + } + } + } + return -1; + } + + /** Bind a JDBC parameter to a parameter on a Spanner {@link Statement}. */ + Builder bindParameterValue(ValueBinder binder, int index) throws SQLException { + return setValue(binder, getParameter(index), getType(index)); + } + + /** Set a value from a JDBC parameter on a Spanner {@link Statement}. */ + Builder setValue(ValueBinder binder, Object value, Integer sqlType) throws SQLException { + Builder res; + if (value instanceof Value) { + // If a Value has been constructed, then that should override any sqlType that might have been + // supplied. + res = binder.to((Value) value); + } else if (sqlType != null && sqlType == Types.ARRAY) { + if (value instanceof Array) { + Array array = (Array) value; + value = array.getArray(); + sqlType = array.getBaseType(); + } + res = setArrayValue(binder, sqlType, value); + } else { + res = setSingleValue(binder, value, sqlType); + } + if (res == null && value != null) { + throw JdbcSqlExceptionFactory.of( + "Unsupported parameter type: " + value.getClass().getName() + " - " + value, + Code.INVALID_ARGUMENT); + } + return res; + } + + private Builder setSingleValue(ValueBinder binder, Object value, Integer sqlType) + throws SQLException { + if (value == null) { + return setNullValue(binder, sqlType); + } else if (sqlType == null || sqlType.equals(Types.OTHER)) { + return setParamWithUnknownType(binder, value); + } else { + return setParamWithKnownType(binder, value, sqlType); + } + } + + /** Set a JDBC parameter value on a Spanner {@link Statement} with a known SQL type. */ + private Builder setParamWithKnownType(ValueBinder binder, Object value, Integer sqlType) + throws SQLException { + if (sqlType == null) { + return null; + } + int type = sqlType; + + switch (type) { + case Types.BIT: + case Types.BOOLEAN: + if (value instanceof Boolean) { + return binder.to((Boolean) value); + } else if (value instanceof Number) { + return binder.to(((Number) value).longValue() != 0L); + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid boolean", Code.INVALID_ARGUMENT); + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + case Types.BIGINT: + if (value instanceof Number) { + return binder.to(((Number) value).longValue()); + } else if (value instanceof ProtocolMessageEnum) { + return binder.to((ProtocolMessageEnum) value); + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid long", Code.INVALID_ARGUMENT); + case Types.REAL: + if (value instanceof Number) { + return binder.to(((Number) value).floatValue()); + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid float", Code.INVALID_ARGUMENT); + case Types.FLOAT: + case Types.DOUBLE: + if (value instanceof Number) { + return binder.to(((Number) value).doubleValue()); + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid double", Code.INVALID_ARGUMENT); + case Types.NUMERIC: + case Types.DECIMAL: + if (dialect == Dialect.POSTGRESQL) { + if (value instanceof Number) { + return binder.to(Value.pgNumeric(value.toString())); + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid Number", Code.INVALID_ARGUMENT); + } else { + if (value instanceof Number) { + if (value instanceof BigDecimal) { + return binder.to((BigDecimal) value); + } + try { + return binder.to(new BigDecimal(value.toString())); + } catch (NumberFormatException e) { + // ignore and fall through to the exception. + } + } + throw JdbcSqlExceptionFactory.of( + value + " is not a valid BigDecimal", Code.INVALID_ARGUMENT); + } + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + case Types.NCHAR: + case Types.NVARCHAR: + case Types.LONGNVARCHAR: + String stringValue; + if (value instanceof String) { + stringValue = (String) value; + } else if (value instanceof InputStream) { + stringValue = getStringFromInputStream((InputStream) value); + } else if (value instanceof Reader) { + stringValue = getStringFromReader((Reader) value); + } else if (value instanceof URL) { + stringValue = value.toString(); + } else if (value instanceof UUID) { + stringValue = ((UUID) value).toString(); + } else { + throw JdbcSqlExceptionFactory.of(value + " is not a valid string", Code.INVALID_ARGUMENT); + } + return binder.to(stringValue); + case JsonType.VENDOR_TYPE_NUMBER: + case JsonType.SHORT_VENDOR_TYPE_NUMBER: + case PgJsonbType.VENDOR_TYPE_NUMBER: + case PgJsonbType.SHORT_VENDOR_TYPE_NUMBER: + String jsonValue; + if (value instanceof String) { + jsonValue = (String) value; + } else if (value instanceof InputStream) { + jsonValue = getStringFromInputStream((InputStream) value); + } else if (value instanceof Reader) { + jsonValue = getStringFromReader((Reader) value); + } else { + throw JdbcSqlExceptionFactory.of( + value + " is not a valid JSON value", Code.INVALID_ARGUMENT); + } + if (type == PgJsonbType.VENDOR_TYPE_NUMBER + || type == PgJsonbType.SHORT_VENDOR_TYPE_NUMBER) { + return binder.to(Value.pgJsonb(jsonValue)); + } + return binder.to(Value.json(jsonValue)); + case Types.DATE: + if (value instanceof Date) { + return binder.to(JdbcTypeConverter.toGoogleDate((Date) value)); + } else if (value instanceof Time) { + return binder.to(JdbcTypeConverter.toGoogleDate((Time) value)); + } else if (value instanceof Timestamp) { + return binder.to(JdbcTypeConverter.toGoogleDate((Timestamp) value)); + } else if (value instanceof LocalDate) { + LocalDate localDate = (LocalDate) value; + return binder.to( + com.google.cloud.Date.fromYearMonthDay( + localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth())); + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid date", Code.INVALID_ARGUMENT); + case Types.TIME: + case Types.TIME_WITH_TIMEZONE: + case Types.TIMESTAMP: + case Types.TIMESTAMP_WITH_TIMEZONE: + if (value instanceof Date) { + return binder.to(JdbcTypeConverter.toGoogleTimestamp((Date) value)); + } else if (value instanceof Time) { + return binder.to(JdbcTypeConverter.toGoogleTimestamp((Time) value)); + } else if (value instanceof Timestamp) { + return binder.to(JdbcTypeConverter.toGoogleTimestamp((Timestamp) value)); + } else if (value instanceof OffsetDateTime) { + OffsetDateTime offsetDateTime = (OffsetDateTime) value; + return binder.to( + com.google.cloud.Timestamp.ofTimeSecondsAndNanos( + offsetDateTime.toEpochSecond(), offsetDateTime.getNano())); + } + throw JdbcSqlExceptionFactory.of( + value + " is not a valid timestamp", Code.INVALID_ARGUMENT); + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + if (value instanceof byte[]) { + return binder.to(ByteArray.copyFrom((byte[]) value)); + } else if (value instanceof InputStream) { + try { + return binder.to(ByteArray.copyFrom((InputStream) value)); + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of( + "Could not copy bytes from input stream: " + e.getMessage(), + Code.INVALID_ARGUMENT, + e); + } + } else if (value instanceof AbstractMessage) { + return binder.to((AbstractMessage) value); + } + throw JdbcSqlExceptionFactory.of( + value + " is not a valid byte array", Code.INVALID_ARGUMENT); + case Types.ARRAY: + if (value instanceof Array) { + Array jdbcArray = (Array) value; + return setArrayValue(binder, sqlType, jdbcArray.getArray()); + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid array", Code.INVALID_ARGUMENT); + case Types.BLOB: + if (value instanceof Blob) { + try { + return binder.to(ByteArray.copyFrom(((Blob) value).getBinaryStream())); + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of( + "could not set bytes from blob", Code.INVALID_ARGUMENT, e); + } + } else if (value instanceof InputStream) { + try { + return binder.to(ByteArray.copyFrom((InputStream) value)); + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of( + "could not set bytes from input stream", Code.INVALID_ARGUMENT, e); + } + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid blob", Code.INVALID_ARGUMENT); + case Types.CLOB: + case Types.NCLOB: + if (value instanceof Clob) { + try { + return binder.to(CharStreams.toString(((Clob) value).getCharacterStream())); + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of( + "could not set string from clob", Code.INVALID_ARGUMENT, e); + } + } else if (value instanceof Reader) { + try { + return binder.to(CharStreams.toString((Reader) value)); + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of( + "could not set string from reader", Code.INVALID_ARGUMENT, e); + } + } + throw JdbcSqlExceptionFactory.of(value + " is not a valid clob", Code.INVALID_ARGUMENT); + case ProtoMessageType.VENDOR_TYPE_NUMBER: + case ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER: + if (value instanceof AbstractMessage) { + return binder.to((AbstractMessage) value); + } else if (value instanceof byte[]) { + return binder.to(ByteArray.copyFrom((byte[]) value)); + } else { + throw JdbcSqlExceptionFactory.of( + value + " is not a valid PROTO value", Code.INVALID_ARGUMENT); + } + case ProtoEnumType.VENDOR_TYPE_NUMBER: + case ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER: + if (value instanceof ProtocolMessageEnum) { + return binder.to((ProtocolMessageEnum) value); + } else if (value instanceof Number) { + return binder.to(((Number) value).longValue()); + } + throw JdbcSqlExceptionFactory.of( + value + " is not a valid ENUM value", Code.INVALID_ARGUMENT); + } + return null; + } + + private String getStringFromInputStream(InputStream inputStream) throws SQLException { + InputStreamReader reader = new InputStreamReader(inputStream, StandardCharsets.US_ASCII); + try { + return CharStreams.toString(reader); + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of( + "could not set string from input stream", Code.INVALID_ARGUMENT, e); + } + } + + private String getStringFromReader(Reader reader) throws SQLException { + try { + return CharStreams.toString(reader); + } catch (IOException e) { + throw JdbcSqlExceptionFactory.of( + "could not set string from reader", Code.INVALID_ARGUMENT, e); + } + } + + /** Set the parameter value based purely on the type of the value. */ + private Builder setParamWithUnknownType(ValueBinder binder, Object value) + throws SQLException { + if (Boolean.class.isAssignableFrom(value.getClass())) { + return binder.to((Boolean) value); + } else if (Byte.class.isAssignableFrom(value.getClass())) { + return binder.to(((Byte) value).longValue()); + } else if (Short.class.isAssignableFrom(value.getClass())) { + return binder.to(((Short) value).longValue()); + } else if (Integer.class.isAssignableFrom(value.getClass())) { + return binder.to(((Integer) value).longValue()); + } else if (Long.class.isAssignableFrom(value.getClass())) { + return binder.to(((Long) value).longValue()); + } else if (Float.class.isAssignableFrom(value.getClass())) { + return binder.to(((Float) value).doubleValue()); + } else if (Double.class.isAssignableFrom(value.getClass())) { + return binder.to(((Double) value).doubleValue()); + } else if (BigDecimal.class.isAssignableFrom(value.getClass())) { + if (dialect == Dialect.POSTGRESQL) { + return binder.to(Value.pgNumeric(value.toString())); + } else { + return binder.to((BigDecimal) value); + } + } else if (Date.class.isAssignableFrom(value.getClass())) { + Date dateValue = (Date) value; + return binder.to(JdbcTypeConverter.toGoogleDate(dateValue)); + } else if (LocalDate.class.isAssignableFrom(value.getClass())) { + LocalDate localDate = (LocalDate) value; + return binder.to( + com.google.cloud.Date.fromYearMonthDay( + localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth())); + } else if (Timestamp.class.isAssignableFrom(value.getClass())) { + return binder.to(JdbcTypeConverter.toGoogleTimestamp((Timestamp) value)); + } else if (OffsetDateTime.class.isAssignableFrom(value.getClass())) { + OffsetDateTime offsetDateTime = (OffsetDateTime) value; + return binder.to( + com.google.cloud.Timestamp.ofTimeSecondsAndNanos( + offsetDateTime.toEpochSecond(), offsetDateTime.getNano())); + } else if (Time.class.isAssignableFrom(value.getClass())) { + Time timeValue = (Time) value; + return binder.to(JdbcTypeConverter.toGoogleTimestamp(new Timestamp(timeValue.getTime()))); + } else if (UUID.class.isAssignableFrom(value.getClass())) { + // Bind UUID values as untyped strings to allow them to be used with all types that support + // string values (e.g. STRING, UUID). + return binder.to( + Value.untyped( + com.google.protobuf.Value.newBuilder().setStringValue(value.toString()).build())); + } else if (String.class.isAssignableFrom(value.getClass())) { + String stringVal = (String) value; + return binder.to(stringVal); + } else if (Reader.class.isAssignableFrom(value.getClass())) { + try { + Reader readable = (Reader) value; + return binder.to(CharStreams.toString(readable)); + } catch (IOException e) { + throw new IllegalArgumentException("Could not read from readable", e); + } + } else if (Clob.class.isAssignableFrom(value.getClass())) { + try { + Clob clob = (Clob) value; + return binder.to(CharStreams.toString(clob.getCharacterStream())); + } catch (IOException e) { + throw new IllegalArgumentException("Could not read from readable", e); + } + } else if (Character.class.isAssignableFrom(value.getClass())) { + return binder.to(((Character) value).toString()); + } else if (Character[].class.isAssignableFrom(value.getClass())) { + List list = Arrays.asList((Character[]) value); + StringBuilder s = new StringBuilder(); + for (Character c : list) { + s.append(c.charValue()); + } + return binder.to(s.toString()); + } else if (char[].class.isAssignableFrom(value.getClass())) { + return binder.to(String.valueOf((char[]) value)); + } else if (URL.class.isAssignableFrom(value.getClass())) { + return binder.to(value.toString()); + } else if (UUID.class.isAssignableFrom(value.getClass())) { + return binder.to(((UUID) value).toString()); + } else if (byte[].class.isAssignableFrom(value.getClass())) { + return binder.to(ByteArray.copyFrom((byte[]) value)); + } else if (InputStream.class.isAssignableFrom(value.getClass())) { + try { + return binder.to(ByteArray.copyFrom((InputStream) value)); + } catch (IOException e) { + throw new IllegalArgumentException( + "Could not copy bytes from input stream: " + e.getMessage(), e); + } + } else if (Blob.class.isAssignableFrom(value.getClass())) { + try { + return binder.to(ByteArray.copyFrom(((Blob) value).getBinaryStream())); + } catch (IOException e) { + throw new IllegalArgumentException( + "Could not copy bytes from input stream: " + e.getMessage(), e); + } + } else if (Array.class.isAssignableFrom(value.getClass())) { + try { + Array jdbcArray = (Array) value; + return setArrayValue(binder, jdbcArray.getBaseType(), jdbcArray.getArray()); + } catch (SQLException e) { + throw new IllegalArgumentException( + "Unsupported parameter type: " + value.getClass().getName() + " - " + value); + } + } else if (AbstractMessage.class.isAssignableFrom(value.getClass())) { + return binder.to((AbstractMessage) value); + } else if (ProtocolMessageEnum.class.isAssignableFrom(value.getClass())) { + return binder.to((ProtocolMessageEnum) value); + } + return null; + } + + private Builder setArrayValue(ValueBinder binder, int type, Object value) + throws SQLException { + if (value == null) { + switch (type) { + case Types.BIT: + case Types.BOOLEAN: + return binder.toBoolArray((boolean[]) null); + case Types.TINYINT: + case Types.SMALLINT: + case Types.INTEGER: + case Types.BIGINT: + return binder.toInt64Array((long[]) null); + case Types.REAL: + return binder.toFloat32Array((float[]) null); + case Types.FLOAT: + case Types.DOUBLE: + return binder.toFloat64Array((double[]) null); + case Types.NUMERIC: + case Types.DECIMAL: + if (dialect == Dialect.POSTGRESQL) { + return binder.toPgNumericArray(null); + } else { + return binder.toNumericArray(null); + } + case Types.CHAR: + case Types.VARCHAR: + case Types.LONGVARCHAR: + case Types.NCHAR: + case Types.NVARCHAR: + case Types.LONGNVARCHAR: + case Types.CLOB: + case Types.NCLOB: + return binder.toStringArray(null); + case JsonType.VENDOR_TYPE_NUMBER: + case JsonType.SHORT_VENDOR_TYPE_NUMBER: + return binder.toJsonArray(null); + case PgJsonbType.VENDOR_TYPE_NUMBER: + case PgJsonbType.SHORT_VENDOR_TYPE_NUMBER: + return binder.toPgJsonbArray(null); + case Types.DATE: + return binder.toDateArray(null); + case Types.TIME: + case Types.TIME_WITH_TIMEZONE: + case Types.TIMESTAMP: + case Types.TIMESTAMP_WITH_TIMEZONE: + return binder.toTimestampArray(null); + case Types.BINARY: + case Types.VARBINARY: + case Types.LONGVARBINARY: + case Types.BLOB: + return binder.toBytesArray(null); + case ProtoMessageType.VENDOR_TYPE_NUMBER: + case ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER: + case ProtoEnumType.VENDOR_TYPE_NUMBER: + case ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER: + return binder.to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())); + case UuidType.VENDOR_TYPE_NUMBER: + case UuidType.SHORT_VENDOR_TYPE_NUMBER: + return binder.toUuidArray(null); + default: + return binder.to( + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setNullValue(NullValue.NULL_VALUE) + .build())); + } + } + + if (boolean[].class.isAssignableFrom(value.getClass())) { + return binder.toBoolArray((boolean[]) value); + } else if (Boolean[].class.isAssignableFrom(value.getClass())) { + return binder.toBoolArray(Arrays.asList((Boolean[]) value)); + } else if (Byte[].class.isAssignableFrom(value.getClass())) { + return binder.toInt64Array(toLongList((Byte[]) value)); + } else if (short[].class.isAssignableFrom(value.getClass())) { + long[] l = new long[((short[]) value).length]; + for (int i = 0; i < l.length; i++) { + l[i] = ((short[]) value)[i]; + } + return binder.toInt64Array(l); + } else if (Short[].class.isAssignableFrom(value.getClass())) { + return binder.toInt64Array(toLongList((Short[]) value)); + } else if (int[].class.isAssignableFrom(value.getClass())) { + long[] l = new long[((int[]) value).length]; + for (int i = 0; i < l.length; i++) { + l[i] = ((int[]) value)[i]; + } + return binder.toInt64Array(l); + } else if (Integer[].class.isAssignableFrom(value.getClass())) { + return binder.toInt64Array(toLongList((Integer[]) value)); + } else if (long[].class.isAssignableFrom(value.getClass())) { + return binder.toInt64Array((long[]) value); + } else if (Long[].class.isAssignableFrom(value.getClass())) { + return binder.toInt64Array(toLongList((Long[]) value)); + } else if (float[].class.isAssignableFrom(value.getClass())) { + return binder.toFloat32Array((float[]) value); + } else if (Float[].class.isAssignableFrom(value.getClass())) { + return binder.toFloat32Array(toFloatList((Float[]) value)); + } else if (double[].class.isAssignableFrom(value.getClass())) { + return binder.toFloat64Array((double[]) value); + } else if (Double[].class.isAssignableFrom(value.getClass())) { + return binder.toFloat64Array(toDoubleList((Double[]) value)); + } else if (BigDecimal[].class.isAssignableFrom(value.getClass())) { + if (dialect == Dialect.POSTGRESQL) { + return binder.toPgNumericArray( + Arrays.stream((BigDecimal[]) value) + .map(bigDecimal -> bigDecimal == null ? null : bigDecimal.toString()) + .collect(Collectors.toList())); + } else { + return binder.toNumericArray(Arrays.asList((BigDecimal[]) value)); + } + } else if (Date[].class.isAssignableFrom(value.getClass())) { + return binder.toDateArray(JdbcTypeConverter.toGoogleDates((Date[]) value)); + } else if (Timestamp[].class.isAssignableFrom(value.getClass())) { + return binder.toTimestampArray(JdbcTypeConverter.toGoogleTimestamps((Timestamp[]) value)); + } else if (UUID[].class.isAssignableFrom(value.getClass())) { + return binder.toUuidArray(Arrays.asList((UUID[]) value)); + } else if (String[].class.isAssignableFrom(value.getClass())) { + if (type == JsonType.VENDOR_TYPE_NUMBER || type == JsonType.SHORT_VENDOR_TYPE_NUMBER) { + return binder.toJsonArray(Arrays.asList((String[]) value)); + } else if (type == PgJsonbType.VENDOR_TYPE_NUMBER + || type == PgJsonbType.SHORT_VENDOR_TYPE_NUMBER) { + return binder.toPgJsonbArray(Arrays.asList((String[]) value)); + } else { + return binder.toStringArray(Arrays.asList((String[]) value)); + } + } else if (byte[][].class.isAssignableFrom(value.getClass())) { + return binder.toBytesArray(JdbcTypeConverter.toGoogleBytes((byte[][]) value)); + } else if (AbstractMessage[].class.isAssignableFrom(value.getClass())) { + return bindProtoMessageArray(binder, value); + } else if (ProtocolMessageEnum[].class.isAssignableFrom(value.getClass())) { + return bindProtoEnumArray(binder, value); + } + return null; + } + + private Builder bindProtoMessageArray(ValueBinder binder, Object value) + throws SQLException { + Class componentType = value.getClass().getComponentType(); + int length = java.lang.reflect.Array.getLength(value); + List convertedArray = new ArrayList<>(); + try { + Method method = componentType.getMethod("toByteArray"); + for (int i = 0; i < length; i++) { + Object element = java.lang.reflect.Array.get(value, i); + if (element != null) { + byte[] l = (byte[]) method.invoke(element); + convertedArray.add(ByteArray.copyFrom(l)); + } else { + convertedArray.add(null); + } + } + + Message.Builder builder = + (Message.Builder) componentType.getMethod("newBuilder").invoke(null); + Descriptor msgDescriptor = builder.getDescriptorForType(); + + return binder.toProtoMessageArray(convertedArray, msgDescriptor.getFullName()); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + "Error occurred when binding Array of Proto Message input", Code.UNKNOWN, e); + } + } + + private Builder bindProtoEnumArray(ValueBinder binder, Object value) + throws SQLException { + Class componentType = value.getClass().getComponentType(); + int length = java.lang.reflect.Array.getLength(value); + List convertedArray = new ArrayList<>(); + try { + Method method = componentType.getMethod("getNumber"); + for (int i = 0; i < length; i++) { + Object element = java.lang.reflect.Array.get(value, i); + if (element != null) { + int op = (int) method.invoke(element); + convertedArray.add((long) op); + } else { + convertedArray.add(null); + } + } + + Descriptors.EnumDescriptor enumDescriptor = + (Descriptors.EnumDescriptor) componentType.getMethod("getDescriptor").invoke(null); + + return binder.toProtoEnumArray(convertedArray, enumDescriptor.getFullName()); + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + "Error occurred when binding Array of Proto Enum input", Code.UNKNOWN, e); + } + } + + private List toLongList(Number[] input) { + List res = new ArrayList<>(input.length); + for (Number number : input) { + res.add(number == null ? null : number.longValue()); + } + return res; + } + + private List toFloatList(Number[] input) { + List res = new ArrayList<>(input.length); + for (Number number : input) { + res.add(number == null ? null : number.floatValue()); + } + return res; + } + + private List toDoubleList(Number[] input) { + List res = new ArrayList<>(input.length); + for (Number number : input) { + res.add(number == null ? null : number.doubleValue()); + } + return res; + } + + /** + * Sets a null value with a specific SQL type. If the sqlType is null, the value will be set as a + * String. + */ + private Builder setNullValue(ValueBinder binder, Integer sqlType) { + if (sqlType == null) { + return binder.to( + Value.untyped( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build())); + } + int type = sqlType; + switch (type) { + case Types.BIGINT: + return binder.to((Long) null); + case Types.BINARY: + case ProtoMessageType.VENDOR_TYPE_NUMBER: + case ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER: + return binder.to((ByteArray) null); + case Types.BLOB: + return binder.to((ByteArray) null); + case Types.BIT: + case Types.BOOLEAN: + return binder.to((Boolean) null); + case Types.CHAR: + return binder.to((String) null); + case Types.CLOB: + return binder.to((String) null); + case Types.DATE: + return binder.to((com.google.cloud.Date) null); + case Types.NUMERIC: + case Types.DECIMAL: + if (dialect == Dialect.POSTGRESQL) { + return binder.to(Value.pgNumeric(null)); + } else { + return binder.to((BigDecimal) null); + } + case Types.FLOAT: + case Types.DOUBLE: + return binder.to((Double) null); + case Types.INTEGER: + case ProtoEnumType.VENDOR_TYPE_NUMBER: + case ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER: + return binder.to((Long) null); + case Types.LONGNVARCHAR: + return binder.to((String) null); + case Types.LONGVARBINARY: + return binder.to((ByteArray) null); + case Types.LONGVARCHAR: + return binder.to((String) null); + case Types.NCHAR: + return binder.to((String) null); + case Types.NCLOB: + return binder.to((String) null); + case Types.NVARCHAR: + return binder.to((String) null); + case Types.REAL: + return binder.to((Float) null); + case Types.SMALLINT: + return binder.to((Long) null); + case Types.SQLXML: + return binder.to((String) null); + case Types.TIME: + case Types.TIME_WITH_TIMEZONE: + case Types.TIMESTAMP: + case Types.TIMESTAMP_WITH_TIMEZONE: + return binder.to((com.google.cloud.Timestamp) null); + case Types.TINYINT: + return binder.to((Long) null); + case Types.VARBINARY: + return binder.to((ByteArray) null); + case Types.VARCHAR: + return binder.to((String) null); + case JsonType.VENDOR_TYPE_NUMBER: + case JsonType.SHORT_VENDOR_TYPE_NUMBER: + return binder.to(Value.json(null)); + case PgJsonbType.VENDOR_TYPE_NUMBER: + case PgJsonbType.SHORT_VENDOR_TYPE_NUMBER: + return binder.to(Value.pgJsonb(null)); + default: + return binder.to( + Value.untyped( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build())); + } + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPartitionedQueryResultSet.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPartitionedQueryResultSet.java new file mode 100644 index 000000000000..1d5ef66fd0eb --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPartitionedQueryResultSet.java @@ -0,0 +1,51 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.connection.PartitionedQueryResultSet; +import com.google.common.base.Preconditions; +import java.sql.Statement; + +/** + * {@link java.sql.ResultSet} implementation that is returned for queries that are executed with + * `RUN PARTITIONED QUERY ...`. + */ +class JdbcPartitionedQueryResultSet extends JdbcResultSet + implements CloudSpannerJdbcPartitionedQueryResultSet { + static JdbcPartitionedQueryResultSet of( + Statement statement, PartitionedQueryResultSet resultSet) { + return new JdbcPartitionedQueryResultSet( + Preconditions.checkNotNull(statement), Preconditions.checkNotNull(resultSet)); + } + + private final PartitionedQueryResultSet partitionedQueryResultSet; + + private JdbcPartitionedQueryResultSet(Statement statement, PartitionedQueryResultSet resultSet) { + super(statement, resultSet); + this.partitionedQueryResultSet = resultSet; + } + + @Override + public int getNumPartitions() { + return partitionedQueryResultSet.getNumPartitions(); + } + + @Override + public int getParallelism() { + return partitionedQueryResultSet.getParallelism(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPreconditions.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPreconditions.java new file mode 100644 index 000000000000..19586efe1637 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPreconditions.java @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.rpc.Code; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import javax.annotation.Nullable; + +/** + * Convenience class for throwing {@link SQLException}s when a certain condition has not been met. + */ +class JdbcPreconditions { + + /** + * Ensures the truth of an expression involving a parameter to a method. + * + * @param expression the boolean expression that should be true. + * @param value the parameter value that is being checked. + * @throws SQLException with {@link Code#INVALID_ARGUMENT} if {@code expression} is false + */ + static void checkArgument(boolean expression, Object value) throws SQLException { + if (!expression) { + throw JdbcSqlExceptionFactory.of("invalid argument: " + value, Code.INVALID_ARGUMENT); + } + } + + /** + * Ensures the truth of an expression involving the state of the calling instance, but not + * involving any parameters to the calling method. + * + * @param expression a boolean expression + * @param errorMessage the exception message to use if the check fails; will be converted to a + * string using {@link String#valueOf(Object)} + * @throws SQLException with {@link Code#FAILED_PRECONDITION} if {@code expression} is false + */ + static void checkState(boolean expression, @Nullable Object errorMessage) throws SQLException { + if (!expression) { + throw JdbcSqlExceptionFactory.of(String.valueOf(errorMessage), Code.FAILED_PRECONDITION); + } + } + + /** + * Ensures the support of a certain JDBC feature. + * + * @param expression the boolean expression that indicates whether the feature is supported. + * @param message the exception message to use if the feature is not supported. + * @throws SQLFeatureNotSupportedException if the feature is not supported. + */ + static void checkSqlFeatureSupported(boolean expression, String message) + throws SQLFeatureNotSupportedException { + if (!expression) { + throw JdbcSqlExceptionFactory.unsupported(message); + } + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatement.java new file mode 100644 index 000000000000..ad74234221c4 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatement.java @@ -0,0 +1,209 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParametersInfo; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.rpc.Code; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; + +/** Implementation of {@link PreparedStatement} for Cloud Spanner. */ +class JdbcPreparedStatement extends AbstractJdbcPreparedStatement + implements CloudSpannerJdbcPreparedStatement { + private static final char POS_PARAM_CHAR = '?'; + private final String sql; + private final ParametersInfo parameters; + private JdbcParameterMetaData cachedParameterMetadata; + private final ImmutableList generatedKeysColumns; + + JdbcPreparedStatement( + JdbcConnection connection, String sql, ImmutableList generatedKeysColumns) + throws SQLException { + super(connection); + this.sql = sql; + try { + // The PostgreSQL parser allows comments to be present in the SQL string that is used to parse + // the query parameters. + String sqlForParameterExtraction = + getConnection().getDialect() == Dialect.POSTGRESQL + ? this.sql + : parser.removeCommentsAndTrim(this.sql); + this.parameters = + parser.convertPositionalParametersToNamedParameters( + POS_PARAM_CHAR, sqlForParameterExtraction); + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + this.generatedKeysColumns = Preconditions.checkNotNull(generatedKeysColumns); + } + + ParametersInfo getParametersInfo() { + return parameters; + } + + @VisibleForTesting + Statement createStatement() throws SQLException { + ParametersInfo paramInfo = getParametersInfo(); + Statement.Builder builder = Statement.newBuilder(paramInfo.sqlWithNamedParameters); + for (int index = 1; index <= getParameters().getHighestIndex(); index++) { + getParameters().bindParameterValue(builder.bind("p" + index), index); + } + return builder.build(); + } + + @Override + public ResultSet executeQuery() throws SQLException { + checkClosed(); + return executeQuery(createStatement()); + } + + ResultSet executeQueryWithOptions(QueryOption... options) throws SQLException { + checkClosed(); + return executeQuery(createStatement(), options); + } + + @Override + public int executeUpdate() throws SQLException { + long count = executeLargeUpdate(createStatement(), generatedKeysColumns); + if (count > Integer.MAX_VALUE) { + throw JdbcSqlExceptionFactory.of( + "update count too large for executeUpdate: " + count, Code.OUT_OF_RANGE); + } + return (int) count; + } + + @Override + public long executeLargeUpdate() throws SQLException { + return executeLargeUpdate(createStatement(), generatedKeysColumns); + } + + @Override + public boolean execute() throws SQLException { + return executeStatement(createStatement(), generatedKeysColumns); + } + + @Override + public void addBatch() throws SQLException { + checkClosed(); + checkAndSetBatchType(sql); + batchedStatements.add(createStatement()); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + checkClosed(); + + // NOTE: JdbcSimpleParameterMetaData is an experimental feature that can be removed without + // warning in a future version. Your application should not assume that this feature will + // continue to be supported. + if (JdbcSimpleParameterMetaData.useSimpleParameterMetadata()) { + return new JdbcSimpleParameterMetaData(this.parameters); + } + + if (cachedParameterMetadata == null) { + if (getConnection().getParser().isUpdateStatement(sql) + && !getConnection().getParser().checkReturningClause(sql)) { + cachedParameterMetadata = getParameterMetadataForUpdate(); + } else { + cachedParameterMetadata = getParameterMetadataForQuery(); + } + } + return cachedParameterMetadata; + } + + private JdbcParameterMetaData getParameterMetadataForUpdate() { + try (com.google.cloud.spanner.ResultSet resultSet = + getConnection() + .getSpannerConnection() + .analyzeUpdateStatement( + Statement.of(parameters.sqlWithNamedParameters), QueryAnalyzeMode.PLAN)) { + return new JdbcParameterMetaData(this, resultSet); + } + } + + private JdbcParameterMetaData getParameterMetadataForQuery() { + try (com.google.cloud.spanner.ResultSet resultSet = + getConnection() + .getSpannerConnection() + .analyzeQuery(Statement.of(parameters.sqlWithNamedParameters), QueryAnalyzeMode.PLAN)) { + return new JdbcParameterMetaData(this, resultSet); + } + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + checkClosed(); + if (getConnection().getParser().isUpdateStatement(sql)) { + // Return metadata for an empty result set as DML statements do not return any results (as a + // result set). + com.google.cloud.spanner.ResultSet resultSet = + ResultSets.forRows(Type.struct(), ImmutableList.of()); + resultSet.next(); + return new JdbcResultSetMetaData(JdbcResultSet.of(resultSet), this); + } + try (ResultSet rs = analyzeQuery(createStatement(), QueryAnalyzeMode.PLAN)) { + return rs.getMetaData(); + } + } + + @Override + public ResultSet partitionQuery(PartitionOptions partitionOptions, QueryOption... options) + throws SQLException { + return runWithStatementTimeout( + connection -> + JdbcResultSet.of( + this, connection.partitionQuery(createStatement(), partitionOptions, options))); + } + + @Override + public ResultSet runPartition() throws SQLException { + return runWithStatementTimeout( + connection -> { + if (getParameters().getHighestIndex() < 1 || getParameters().getParameter(1) == null) { + throw JdbcSqlExceptionFactory.of( + "No query parameter has been set. runPartition() requires the partition ID to be set as a query parameter with index 1. Call PreparedStatement#setString(1, \"some-partition-id\") before calling runPartition().", + Code.FAILED_PRECONDITION); + } + String partitionId = getParameters().getParameter(1).toString(); + return JdbcResultSet.of(this, connection.runPartition(partitionId)); + }); + } + + @Override + public CloudSpannerJdbcPartitionedQueryResultSet runPartitionedQuery( + PartitionOptions partitionOptions, QueryOption... options) throws SQLException { + return runWithStatementTimeout( + connection -> + JdbcPartitionedQueryResultSet.of( + this, + connection.runPartitionedQuery(createStatement(), partitionOptions, options))); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcResultSet.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcResultSet.java new file mode 100644 index 000000000000..da0620ee6f9f --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcResultSet.java @@ -0,0 +1,1249 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.getMainTypeCode; + +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.PartitionedQueryResultSet; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.Reader; +import java.io.StringReader; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.UUID; +import javax.annotation.Nonnull; + +/** Implementation of {@link ResultSet} for Cloud Spanner */ +class JdbcResultSet extends AbstractJdbcResultSet { + + static JdbcResultSet of(com.google.cloud.spanner.ResultSet resultSet) { + return of(resultSet, ImmutableSet.of()); + } + + static JdbcResultSet of( + com.google.cloud.spanner.ResultSet resultSet, + ImmutableSet columnsAllowedUncheckedLongCastToShort) { + return new JdbcResultSet( + null, + Preconditions.checkNotNull(resultSet), + Preconditions.checkNotNull(columnsAllowedUncheckedLongCastToShort)); + } + + static JdbcResultSet of(Statement statement, com.google.cloud.spanner.ResultSet resultSet) { + // Return a JDBC version of a PartitionedQueryResultSet if the Cloud Spanner Java client + // returned a PartitionedQueryResultSet. + if (resultSet instanceof PartitionedQueryResultSet) { + return JdbcPartitionedQueryResultSet.of(statement, (PartitionedQueryResultSet) resultSet); + } + return new JdbcResultSet( + Preconditions.checkNotNull(statement), Preconditions.checkNotNull(resultSet)); + } + + /** + * Creates a JDBC result set by copying the given Spanner {@link + * com.google.cloud.spanner.ResultSet}. This can be used for result sets that are known not to be + * too large. This type of result set should be preferred for results that are unlikely to be + * closed by the client application, such as the returned generated keys of an update statement. + * The copy will not hold on to a reference to a Cloud Spanner session or result stream. All the + * data in the given Spanner {@link com.google.cloud.spanner.ResultSet} have been consumed after + * calling this method. The {@link com.google.cloud.spanner.ResultSet} is not closed by this + * method. + */ + static JdbcResultSet copyOf(@Nonnull com.google.cloud.spanner.ResultSet resultSet) { + Preconditions.checkNotNull(resultSet); + // Make the copy first. This ensures that ResultSet#next() has been called at least once, which + // is necessary to get the type of the result set. + ImmutableList rows = ImmutableList.copyOf(new ResultSetIterator(resultSet)); + return of(ResultSets.forRows(resultSet.getType(), rows)); + } + + /** + * {@link Iterator} implementation for {@link com.google.cloud.spanner.ResultSet}. This is used to + * create a copy of an existing result set without the need to iterate the rows more than once. + */ + private static class ResultSetIterator implements Iterator { + private final com.google.cloud.spanner.ResultSet resultSet; + private boolean calculatedHasNext = false; + private boolean hasNext = false; + + ResultSetIterator(com.google.cloud.spanner.ResultSet resultSet) { + this.resultSet = resultSet; + } + + @Override + public boolean hasNext() { + if (!calculatedHasNext) { + calculatedHasNext = true; + hasNext = resultSet.next(); + } + return hasNext; + } + + @Override + public Struct next() { + if (hasNext()) { + // Indicate that the next call to hasNext() must re-check whether there are more results. + calculatedHasNext = false; + return resultSet.getCurrentRowAsStruct(); + } + throw new NoSuchElementException(); + } + } + + private boolean closed = false; + private final Statement statement; + private boolean wasNull = false; + private boolean nextReturnedFalse = false; + private boolean nextCalledForMetaData = false; + private boolean nextCalledForMetaDataResult = false; + private long currentRow = 0L; + private final ImmutableSet columnsAllowedUncheckedLongCastToShort; + + JdbcResultSet(Statement statement, com.google.cloud.spanner.ResultSet spanner) { + this(statement, spanner, ImmutableSet.of()); + } + + JdbcResultSet( + Statement statement, + com.google.cloud.spanner.ResultSet spanner, + ImmutableSet columnsAllowedUncheckedLongCastToShort) { + super(spanner); + this.statement = statement; + this.columnsAllowedUncheckedLongCastToShort = columnsAllowedUncheckedLongCastToShort; + } + + void checkClosedAndValidRow() throws SQLException { + checkClosed(); + if (currentRow == 0L) { + throw JdbcSqlExceptionFactory.of( + "ResultSet is before first row. Call next() first.", + com.google.rpc.Code.FAILED_PRECONDITION); + } + if (nextReturnedFalse) { + throw JdbcSqlExceptionFactory.of( + "ResultSet is after last row. There is no more data available.", + com.google.rpc.Code.FAILED_PRECONDITION); + } + } + + @Override + public boolean next() throws SQLException { + checkClosed(); + currentRow++; + if (nextCalledForMetaData) { + nextReturnedFalse = !nextCalledForMetaDataResult; + nextCalledForMetaData = false; + } else { + nextReturnedFalse = !spanner.next(); + } + + return !nextReturnedFalse; + } + + @Override + public void close() { + spanner.close(); + this.closed = true; + } + + @Override + public boolean wasNull() throws SQLException { + checkClosedAndValidRow(); + return wasNull; + } + + private boolean isNull(int columnIndex) { + wasNull = spanner.isNull(columnIndex - 1); + return wasNull; + } + + SQLException createInvalidToGetAs(String sqlType, Code type) { + return JdbcSqlExceptionFactory.of( + String.format("Invalid column type to get as %s: %s", sqlType, type.name()), + com.google.rpc.Code.INVALID_ARGUMENT); + } + + SQLException createCastException(String sqlType, Object value) { + return JdbcSqlExceptionFactory.of( + String.format("Cannot cast to %s: %s", sqlType, value), + com.google.rpc.Code.INVALID_ARGUMENT); + } + + @Override + public String getString(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return isNull ? null : String.valueOf(spanner.getBoolean(spannerIndex)); + case BYTES: + case PROTO: + return isNull ? null : spanner.getBytes(spannerIndex).toBase64(); + case DATE: + return isNull ? null : spanner.getDate(spannerIndex).toString(); + case FLOAT32: + return isNull ? null : Float.toString(spanner.getFloat(spannerIndex)); + case FLOAT64: + return isNull ? null : Double.toString(spanner.getDouble(spannerIndex)); + case INT64: + case ENUM: + return isNull ? null : Long.toString(spanner.getLong(spannerIndex)); + case NUMERIC: + return isNull ? null : spanner.getBigDecimal(spannerIndex).toString(); + case PG_NUMERIC: + return isNull ? null : spanner.getString(spannerIndex); + case STRING: + return isNull ? null : spanner.getString(spannerIndex); + case JSON: + return isNull ? null : spanner.getJson(spannerIndex); + case PG_JSONB: + return isNull ? null : spanner.getPgJsonb(spannerIndex); + case TIMESTAMP: + return isNull ? null : spanner.getTimestamp(spannerIndex).toString(); + case STRUCT: + case ARRAY: + default: + throw createInvalidToGetAs("string", type); + } + } + + @Override + public boolean getBoolean(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return !isNull && spanner.getBoolean(spannerIndex); + case FLOAT32: + return !isNull && spanner.getFloat(spannerIndex) != 0f; + case FLOAT64: + return !isNull && spanner.getDouble(spannerIndex) != 0D; + case INT64: + case ENUM: + return !isNull && spanner.getLong(spannerIndex) != 0L; + case NUMERIC: + return !isNull && !spanner.getBigDecimal(spannerIndex).equals(BigDecimal.ZERO); + case PG_NUMERIC: + return !isNull && !spanner.getString(spannerIndex).equals("0"); + case STRING: + return !isNull && Boolean.parseBoolean(spanner.getString(spannerIndex)); + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case STRUCT: + case TIMESTAMP: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("boolean", type); + } + } + + @Override + public byte getByte(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return isNull ? (byte) 0 : (spanner.getBoolean(spannerIndex) ? (byte) 1 : 0); + case FLOAT32: + return isNull + ? (byte) 0 + : checkedCastToByte(Float.valueOf(spanner.getFloat(spannerIndex)).longValue()); + case FLOAT64: + return isNull + ? (byte) 0 + : checkedCastToByte(Double.valueOf(spanner.getDouble(spannerIndex)).longValue()); + case INT64: + case ENUM: + return isNull ? (byte) 0 : checkedCastToByte(spanner.getLong(spannerIndex)); + case NUMERIC: + return isNull + ? (byte) 0 + : checkedCastToByte(spanner.getBigDecimal(spannerIndex).toBigInteger()); + case PG_NUMERIC: + return isNull + ? (byte) 0 + : checkedCastToByte(parseBigDecimal(spanner.getString(spannerIndex)).toBigInteger()); + case STRING: + return isNull ? (byte) 0 : checkedCastToByte(parseLong(spanner.getString(spannerIndex))); + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case STRUCT: + case TIMESTAMP: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("byte", type); + } + } + + @Override + public short getShort(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return isNull ? 0 : (spanner.getBoolean(spannerIndex) ? (short) 1 : 0); + case FLOAT32: + return isNull + ? 0 + : checkedCastToShort(Float.valueOf(spanner.getFloat(spannerIndex)).longValue()); + case FLOAT64: + return isNull + ? 0 + : checkedCastToShort(Double.valueOf(spanner.getDouble(spannerIndex)).longValue()); + case INT64: + case ENUM: + if (this.columnsAllowedUncheckedLongCastToShort.contains(columnIndex)) { + // This is used to allow frameworks that call getShort(int) on the ResultSet that is + // returned by DatabaseMetadata#getTypeInfo() to get the type code as a short, even when + // the value is out of range for a short. + return isNull ? 0 : (short) spanner.getLong(spannerIndex); + } + return isNull ? 0 : checkedCastToShort(spanner.getLong(spannerIndex)); + case NUMERIC: + return isNull + ? (short) 0 + : checkedCastToShort(spanner.getBigDecimal(spannerIndex).toBigInteger()); + case PG_NUMERIC: + return isNull + ? 0 + : checkedCastToShort(parseBigDecimal(spanner.getString(spannerIndex)).toBigInteger()); + case STRING: + return isNull ? 0 : checkedCastToShort(parseLong(spanner.getString(spannerIndex))); + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case STRUCT: + case TIMESTAMP: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("short", type); + } + } + + @Override + public int getInt(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return isNull ? 0 : (spanner.getBoolean(spannerIndex) ? 1 : 0); + case FLOAT32: + return isNull + ? 0 + : checkedCastToInt(Float.valueOf(spanner.getFloat(spannerIndex)).longValue()); + case FLOAT64: + return isNull + ? 0 + : checkedCastToInt(Double.valueOf(spanner.getDouble(spannerIndex)).longValue()); + case INT64: + case ENUM: + return isNull ? 0 : checkedCastToInt(spanner.getLong(spannerIndex)); + case NUMERIC: + return isNull ? 0 : checkedCastToInt(spanner.getBigDecimal(spannerIndex).toBigInteger()); + case PG_NUMERIC: + return isNull + ? 0 + : checkedCastToInt(parseBigDecimal(spanner.getString(spannerIndex)).toBigInteger()); + case STRING: + return isNull ? 0 : checkedCastToInt(parseLong(spanner.getString(spannerIndex))); + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case STRUCT: + case TIMESTAMP: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("int", type); + } + } + + @Override + public long getLong(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return isNull ? 0L : (spanner.getBoolean(spannerIndex) ? 1L : 0L); + case FLOAT32: + return isNull ? 0L : Float.valueOf(spanner.getFloat(spannerIndex)).longValue(); + case FLOAT64: + return isNull ? 0L : Double.valueOf(spanner.getDouble(spannerIndex)).longValue(); + case INT64: + case ENUM: + return isNull ? 0L : spanner.getLong(spannerIndex); + case NUMERIC: + return isNull ? 0L : checkedCastToLong(spanner.getBigDecimal(spannerIndex).toBigInteger()); + case PG_NUMERIC: + return isNull + ? 0L + : checkedCastToLong(parseBigDecimal(spanner.getString(spannerIndex)).toBigInteger()); + case STRING: + return isNull ? 0L : parseLong(spanner.getString(spannerIndex)); + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case STRUCT: + case TIMESTAMP: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("long", type); + } + } + + @Override + public float getFloat(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return isNull ? 0 : (spanner.getBoolean(spannerIndex) ? (float) 1 : 0); + case FLOAT32: + return isNull ? 0 : spanner.getFloat(spannerIndex); + case FLOAT64: + return isNull ? 0 : checkedCastToFloat(spanner.getDouble(spannerIndex)); + case INT64: + case ENUM: + return isNull ? 0 : checkedCastToFloat(spanner.getLong(spannerIndex)); + case NUMERIC: + return isNull ? 0 : spanner.getBigDecimal(spannerIndex).floatValue(); + case PG_NUMERIC: + return isNull ? 0 : parseFloat(spanner.getString(spannerIndex)); + case STRING: + return isNull ? 0 : checkedCastToFloat(parseDouble(spanner.getString(spannerIndex))); + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case STRUCT: + case TIMESTAMP: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("float", type); + } + } + + @Override + public double getDouble(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case BOOL: + return isNull ? 0 : (spanner.getBoolean(spannerIndex) ? (double) 1 : 0); + case FLOAT32: + return isNull ? 0 : spanner.getFloat(spannerIndex); + case FLOAT64: + return isNull ? 0 : spanner.getDouble(spannerIndex); + case INT64: + case ENUM: + return isNull ? 0 : spanner.getLong(spannerIndex); + case NUMERIC: + return isNull ? 0 : spanner.getBigDecimal(spannerIndex).doubleValue(); + case PG_NUMERIC: + return isNull ? 0 : parseDouble(spanner.getString(spannerIndex)); + case STRING: + return isNull ? 0 : parseDouble(spanner.getString(spannerIndex)); + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case STRUCT: + case TIMESTAMP: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("double", type); + } + } + + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + final boolean isNull = isNull(columnIndex); + final int spannerIndex = columnIndex - 1; + return isNull ? null : spanner.getBytes(spannerIndex).toByteArray(); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case DATE: + return isNull ? null : JdbcTypeConverter.toSqlDate(spanner.getDate(spannerIndex)); + case STRING: + return isNull ? null : parseDate(spanner.getString(spannerIndex)); + case TIMESTAMP: + return isNull + ? null + : new Date(spanner.getTimestamp(spannerIndex).toSqlTimestamp().getTime()); + case BOOL: + case FLOAT32: + case FLOAT64: + case INT64: + case NUMERIC: + case PG_NUMERIC: + case BYTES: + case JSON: + case PG_JSONB: + case STRUCT: + case PROTO: + case ENUM: + case ARRAY: + default: + throw createInvalidToGetAs("date", type); + } + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case STRING: + return isNull ? null : parseTime(spanner.getString(spannerIndex)); + case TIMESTAMP: + return isNull ? null : JdbcTypeConverter.toSqlTime(spanner.getTimestamp(spannerIndex)); + case BOOL: + case DATE: + case FLOAT32: + case FLOAT64: + case INT64: + case NUMERIC: + case PG_NUMERIC: + case BYTES: + case JSON: + case PG_JSONB: + case STRUCT: + case PROTO: + case ENUM: + case ARRAY: + default: + throw createInvalidToGetAs("time", type); + } + } + + @Override + public Timestamp getTimestamp(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case DATE: + return isNull ? null : JdbcTypeConverter.toSqlTimestamp(spanner.getDate(spannerIndex)); + case STRING: + return isNull ? null : parseTimestamp(spanner.getString(spannerIndex)); + case TIMESTAMP: + return isNull ? null : JdbcTypeConverter.toSqlTimestamp(spanner.getTimestamp(spannerIndex)); + case BOOL: + case FLOAT32: + case FLOAT64: + case INT64: + case NUMERIC: + case PG_NUMERIC: + case BYTES: + case JSON: + case PG_JSONB: + case STRUCT: + case PROTO: + case ENUM: + case ARRAY: + default: + throw createInvalidToGetAs("timestamp", type); + } + } + + public UUID getUUID(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + if (isNull(columnIndex)) { + return null; + } + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case UUID: + return spanner.getUuid(spannerIndex); + case STRING: + return UUID.fromString(spanner.getString(spannerIndex)); + case BYTES: + case DATE: + case TIMESTAMP: + case BOOL: + case FLOAT32: + case FLOAT64: + case INT64: + case NUMERIC: + case PG_NUMERIC: + case JSON: + case PG_JSONB: + case STRUCT: + case PROTO: + case ENUM: + case ARRAY: + default: + throw createInvalidToGetAs("uuid", type); + } + } + + private InputStream getInputStream(String val, Charset charset) { + if (val == null) return null; + byte[] b = val.getBytes(charset); + return new ByteArrayInputStream(b); + } + + @Override + public InputStream getAsciiStream(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + return getInputStream(getString(columnIndex), StandardCharsets.US_ASCII); + } + + @Override + @Deprecated + public InputStream getUnicodeStream(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + return getInputStream(getString(columnIndex), StandardCharsets.UTF_16LE); + } + + @Override + public InputStream getBinaryStream(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + byte[] val = getBytes(columnIndex); + return val == null ? null : new ByteArrayInputStream(val); + } + + @Override + public String getString(String columnLabel) throws SQLException { + return getString(findColumn(columnLabel)); + } + + @Override + public boolean getBoolean(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getBoolean(findColumn(columnLabel)); + } + + @Override + public byte getByte(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getByte(findColumn(columnLabel)); + } + + @Override + public short getShort(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getShort(findColumn(columnLabel)); + } + + @Override + public int getInt(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getInt(findColumn(columnLabel)); + } + + @Override + public long getLong(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getLong(findColumn(columnLabel)); + } + + @Override + public float getFloat(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getFloat(findColumn(columnLabel)); + } + + @Override + public double getDouble(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getDouble(findColumn(columnLabel)); + } + + @Override + public byte[] getBytes(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getBytes(findColumn(columnLabel)); + } + + @Override + public Date getDate(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getDate(findColumn(columnLabel)); + } + + @Override + public Time getTime(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getTime(findColumn(columnLabel)); + } + + @Override + public Timestamp getTimestamp(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getTimestamp(findColumn(columnLabel)); + } + + @Override + public InputStream getAsciiStream(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getAsciiStream(findColumn(columnLabel)); + } + + @Override + @Deprecated + public InputStream getUnicodeStream(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getUnicodeStream(findColumn(columnLabel)); + } + + @Override + public InputStream getBinaryStream(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getBinaryStream(findColumn(columnLabel)); + } + + @Override + public JdbcResultSetMetaData getMetaData() throws SQLException { + checkClosed(); + if (isBeforeFirst() && !nextCalledForMetaData) { + // do a call to next() on the underlying resultset to initialize metadata + nextCalledForMetaData = true; + nextCalledForMetaDataResult = spanner.next(); + } + return new JdbcResultSetMetaData(this, statement); + } + + @Override + public Object getObject(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getObject(findColumn(columnLabel)); + } + + @Override + public Object getObject(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + Type type = spanner.getColumnType(columnIndex - 1); + return isNull(columnIndex) ? null : getObject(type, columnIndex); + } + + private Object getObject(Type type, int columnIndex) throws SQLException { + JdbcPreconditions.checkArgument(type != null, "type is null"); + switch (type.getCode()) { + case BOOL: + return getBoolean(columnIndex); + case BYTES: + case PROTO: + return getBytes(columnIndex); + case DATE: + return getDate(columnIndex); + case FLOAT32: + return getFloat(columnIndex); + case FLOAT64: + return getDouble(columnIndex); + case INT64: + case PG_OID: + case ENUM: + return getLong(columnIndex); + case NUMERIC: + return getBigDecimal(columnIndex); + case PG_NUMERIC: + final String value = getString(columnIndex); + try { + return parseBigDecimal(value); + } catch (Exception e) { + return parseDouble(value); + } + case STRING: + case JSON: + case PG_JSONB: + return getString(columnIndex); + case TIMESTAMP: + return getTimestamp(columnIndex); + case UUID: + return getUUID(columnIndex); + case ARRAY: + return getArray(columnIndex); + default: + throw JdbcSqlExceptionFactory.of( + "Unknown type: " + type, com.google.rpc.Code.INVALID_ARGUMENT); + } + } + + @Override + public int findColumn(String columnLabel) throws SQLException { + checkClosed(); + try { + return spanner.getColumnIndex(columnLabel) + 1; + } catch (IllegalArgumentException e) { + throw JdbcSqlExceptionFactory.of( + "no column with label " + columnLabel + " found", com.google.rpc.Code.INVALID_ARGUMENT); + } + } + + @Override + public Reader getCharacterStream(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + String val = getString(columnIndex); + return val == null ? null : new StringReader(val); + } + + @Override + public Reader getCharacterStream(String columnLabel) throws SQLException { + return getCharacterStream(findColumn(columnLabel)); + } + + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + return getBigDecimal(columnIndex, false, 0); + } + + @Override + public BigDecimal getBigDecimal(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getBigDecimal(findColumn(columnLabel), false, 0); + } + + @Override + @Deprecated + public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { + checkClosedAndValidRow(); + return getBigDecimal(columnIndex, true, scale); + } + + @Override + @Deprecated + public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { + checkClosedAndValidRow(); + return getBigDecimal(findColumn(columnLabel), true, scale); + } + + private BigDecimal getBigDecimal(int columnIndex, boolean fixedScale, int scale) + throws SQLException { + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + boolean isNull = isNull(columnIndex); + BigDecimal res; + switch (type) { + case BOOL: + res = + isNull + ? null + : (spanner.getBoolean(columnIndex - 1) ? BigDecimal.ONE : BigDecimal.ZERO); + break; + case FLOAT32: + res = isNull ? null : BigDecimal.valueOf(spanner.getFloat(spannerIndex)); + break; + case FLOAT64: + res = isNull ? null : BigDecimal.valueOf(spanner.getDouble(spannerIndex)); + break; + case INT64: + case ENUM: + res = isNull ? null : BigDecimal.valueOf(spanner.getLong(spannerIndex)); + break; + case NUMERIC: + res = isNull ? null : spanner.getBigDecimal(spannerIndex); + break; + case PG_NUMERIC: + res = isNull ? null : parseBigDecimal(spanner.getString(spannerIndex)); + break; + case STRING: + try { + res = isNull ? null : new BigDecimal(spanner.getString(spannerIndex)); + break; + } catch (NumberFormatException e) { + throw JdbcSqlExceptionFactory.of( + "The column does not contain a valid BigDecimal", + com.google.rpc.Code.INVALID_ARGUMENT, + e); + } + case BYTES: + case JSON: + case PG_JSONB: + case DATE: + case TIMESTAMP: + case STRUCT: + case PROTO: + case ARRAY: + default: + throw createInvalidToGetAs("BigDecimal", type); + } + if (res != null && fixedScale) { + res = res.setScale(scale, RoundingMode.HALF_UP); + } + return res; + } + + @Override + public boolean isBeforeFirst() throws SQLException { + checkClosed(); + return currentRow == 0L; + } + + @Override + public boolean isAfterLast() throws SQLException { + checkClosed(); + return nextReturnedFalse; + } + + @Override + public boolean isFirst() throws SQLException { + checkClosed(); + return currentRow == 1L; + } + + @Override + public int getRow() throws SQLException { + checkClosed(); + return checkedCastToInt(currentRow); + } + + @Override + public Statement getStatement() throws SQLException { + checkClosed(); + return statement; + } + + @Override + public Array getArray(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getArray(findColumn(columnLabel)); + } + + @Override + public Array getArray(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + if (isNull(columnIndex)) return null; + Type type = spanner.getColumnType(columnIndex - 1); + if (type.getCode() != Code.ARRAY) + throw JdbcSqlExceptionFactory.of( + "Column with index " + columnIndex + " does not contain an array", + com.google.rpc.Code.INVALID_ARGUMENT); + final Code elementCode = getMainTypeCode(type.getArrayElementType()); + final JdbcDataType dataType = JdbcDataType.getType(elementCode); + try { + List elements = dataType.getArrayElements(spanner, columnIndex - 1); + return JdbcArray.createArray(dataType, elements); + } catch (NumberFormatException e) { + final String sqlType = "ARRAY<" + type.getArrayElementType() + ">"; + final Value value = spanner.getValue(columnIndex - 1); + throw createCastException(sqlType, value); + } + } + + @Override + public Date getDate(int columnIndex, Calendar cal) throws SQLException { + checkClosedAndValidRow(); + if (isNull(columnIndex)) { + return null; + } + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case DATE: + return JdbcTypeConverter.toSqlDate(spanner.getDate(spannerIndex), cal); + case STRING: + return parseDate(spanner.getString(spannerIndex), cal); + case TIMESTAMP: + return new Date( + JdbcTypeConverter.getAsSqlTimestamp(spanner.getTimestamp(spannerIndex), cal).getTime()); + case BOOL: + case FLOAT32: + case FLOAT64: + case INT64: + case NUMERIC: + case BYTES: + case JSON: + case PG_JSONB: + case STRUCT: + case PROTO: + case ENUM: + case ARRAY: + default: + throw createInvalidToGetAs("date", type); + } + } + + @Override + public Date getDate(String columnLabel, Calendar cal) throws SQLException { + return getDate(findColumn(columnLabel), cal); + } + + @Override + public Time getTime(int columnIndex, Calendar cal) throws SQLException { + checkClosedAndValidRow(); + boolean isNull = isNull(columnIndex); + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case STRING: + return isNull ? null : parseTime(spanner.getString(spannerIndex), cal); + case TIMESTAMP: + return isNull ? null : JdbcTypeConverter.toSqlTime(spanner.getTimestamp(spannerIndex), cal); + case BOOL: + case DATE: + case FLOAT32: + case FLOAT64: + case INT64: + case NUMERIC: + case BYTES: + case JSON: + case PG_JSONB: + case STRUCT: + case PROTO: + case ENUM: + case ARRAY: + default: + throw createInvalidToGetAs("time", type); + } + } + + @Override + public Time getTime(String columnLabel, Calendar cal) throws SQLException { + return getTime(findColumn(columnLabel), cal); + } + + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + checkClosedAndValidRow(); + if (isNull(columnIndex)) { + return null; + } + int spannerIndex = columnIndex - 1; + Code type = getMainTypeCode(spanner.getColumnType(spannerIndex)); + switch (type) { + case DATE: + return JdbcTypeConverter.toSqlTimestamp(spanner.getDate(spannerIndex), cal); + case STRING: + return parseTimestamp(spanner.getString(spannerIndex), cal); + case TIMESTAMP: + return JdbcTypeConverter.getAsSqlTimestamp(spanner.getTimestamp(spannerIndex), cal); + case BOOL: + case FLOAT32: + case FLOAT64: + case INT64: + case NUMERIC: + case BYTES: + case JSON: + case PG_JSONB: + case STRUCT: + case PROTO: + case ENUM: + case ARRAY: + default: + throw createInvalidToGetAs("timestamp", type); + } + } + + @Override + public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { + return getTimestamp(findColumn(columnLabel), cal); + } + + @Override + public URL getURL(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + try { + return isNull(columnIndex) ? null : new URL(getString(columnIndex)); + } catch (MalformedURLException e) { + throw JdbcSqlExceptionFactory.of( + "Invalid URL: " + spanner.getString(columnIndex - 1), + com.google.rpc.Code.INVALID_ARGUMENT); + } + } + + @Override + public URL getURL(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getURL(findColumn(columnLabel)); + } + + @Override + public int getHoldability() throws SQLException { + checkClosed(); + return CLOSE_CURSORS_AT_COMMIT; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public String getNString(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + return getString(columnIndex); + } + + @Override + public String getNString(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getString(columnLabel); + } + + @Override + public Reader getNCharacterStream(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + return getCharacterStream(columnIndex); + } + + @Override + public Reader getNCharacterStream(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + return getCharacterStream(columnLabel); + } + + @Override + public T getObject(int columnIndex, Class type) throws SQLException { + checkClosedAndValidRow(); + return convertObject(getObject(columnIndex), type, spanner.getColumnType(columnIndex - 1)); + } + + @Override + public T getObject(String columnLabel, Class type) throws SQLException { + checkClosedAndValidRow(); + return convertObject(getObject(columnLabel), type, spanner.getColumnType(columnLabel)); + } + + @Override + public Object getObject(int columnIndex, Map> map) throws SQLException { + checkClosedAndValidRow(); + return convertObject(getObject(columnIndex), map, spanner.getColumnType(columnIndex - 1)); + } + + @Override + public Object getObject(String columnLabel, Map> map) throws SQLException { + checkClosedAndValidRow(); + return convertObject(getObject(columnLabel), map, spanner.getColumnType(columnLabel)); + } + + @Override + public Blob getBlob(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + byte[] val = getBytes(columnIndex); + return val == null ? null : new JdbcBlob(val); + } + + @Override + public Blob getBlob(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + byte[] val = getBytes(columnLabel); + return val == null ? null : new JdbcBlob(val); + } + + @Override + public Clob getClob(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + String val = getString(columnIndex); + return val == null ? null : new JdbcClob(val); + } + + @Override + public Clob getClob(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + String val = getString(columnLabel); + return val == null ? null : new JdbcClob(val); + } + + @Override + public NClob getNClob(int columnIndex) throws SQLException { + checkClosedAndValidRow(); + String val = getString(columnIndex); + return val == null ? null : new JdbcClob(val); + } + + @Override + public NClob getNClob(String columnLabel) throws SQLException { + checkClosedAndValidRow(); + String val = getString(columnLabel); + return val == null ? null : new JdbcClob(val); + } + + @SuppressWarnings("unchecked") + private T convertObject(Object o, Class javaType, Type type) throws SQLException { + return (T) JdbcTypeConverter.convert(o, type, javaType); + } + + private Object convertObject(Object o, Map> map, Type type) throws SQLException { + if (map == null) + throw JdbcSqlExceptionFactory.of("Map may not be null", com.google.rpc.Code.INVALID_ARGUMENT); + if (o == null) return null; + Class javaType = map.get(type.getCode().name()); + if (javaType == null) return o; + return JdbcTypeConverter.convert(o, type, javaType); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcResultSetMetaData.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcResultSetMetaData.java new file mode 100644 index 000000000000..1502a97031ab --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcResultSetMetaData.java @@ -0,0 +1,235 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.ConnectionProperties; +import com.google.common.base.Preconditions; +import java.sql.Connection; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Types; + +/** Implementation of {@link ResultSetMetaData} for Cloud Spanner */ +class JdbcResultSetMetaData extends AbstractJdbcWrapper implements ResultSetMetaData { + private final ResultSet spannerResultSet; + private final Statement statement; + + JdbcResultSetMetaData(JdbcResultSet jdbcResultSet, Statement statement) { + Preconditions.checkNotNull(jdbcResultSet); + this.spannerResultSet = jdbcResultSet.spanner; + this.statement = statement; + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public int getColumnCount() { + return spannerResultSet.getColumnCount(); + } + + @Override + public boolean isAutoIncrement(int column) { + return false; + } + + @Override + public boolean isCaseSensitive(int column) { + int type = getColumnType(column); + return type == Types.NVARCHAR || type == Types.BINARY; + } + + @Override + public boolean isSearchable(int column) { + return true; + } + + @Override + public boolean isCurrency(int column) { + return false; + } + + @Override + public int isNullable(int column) { + return columnNullableUnknown; + } + + @Override + public boolean isSigned(int column) { + int type = getColumnType(column); + return type == Types.DOUBLE || type == Types.BIGINT || type == Types.NUMERIC; + } + + @Override + public int getColumnDisplaySize(int column) throws SQLException { + int colType = getColumnType(column); + switch (colType) { + case Types.ARRAY: + return getUnknownLength(); + case Types.BOOLEAN: + return 5; + case Types.BINARY: + return getPrecision(column); + case Types.DATE: + return 10; + case Types.REAL: + return JdbcDataType.FLOAT32.getDefaultColumnDisplaySize(); + case Types.FLOAT: + case Types.DOUBLE: + return 14; + case Types.BIGINT: + return 10; + case Types.NUMERIC: + return 14; + case Types.NVARCHAR: + return getPrecision(column); + case Types.TIMESTAMP: + return 16; + default: + return 10; + } + } + + @Override + public String getColumnLabel(int column) { + return spannerResultSet.getType().getStructFields().get(column - 1).getName(); + } + + @Override + public String getColumnName(int column) { + return spannerResultSet.getType().getStructFields().get(column - 1).getName(); + } + + @Override + public String getSchemaName(int column) throws SQLException { + return statement.getConnection().getSchema(); + } + + @Override + public int getPrecision(int column) throws SQLException { + int colType = getColumnType(column); + switch (colType) { + case Types.BOOLEAN: + return 1; + case Types.DATE: + return 10; + case Types.REAL: + return JdbcDataType.FLOAT32.getPrecision(); + case Types.FLOAT: + case Types.DOUBLE: + return 14; + case Types.BIGINT: + case Types.INTEGER: + return 10; + case Types.NUMERIC: + return 14; + case Types.TIMESTAMP: + return 24; + default: + // For column types with variable size, such as text columns, we should return the length + // in characters. We could try to fetch it from INFORMATION_SCHEMA, but that would mean + // parsing the SQL statement client side in order to figure out which column it actually + // is. Instead, we return a configurable fixed length. This is also consistent with for + // example the PostgreSQL JDBC driver. See the 'unknownLength' connection property: + // https://jdbc.postgresql.org/documentation/use/#connection-parameters + return getUnknownLength(); + } + } + + private int getUnknownLength() throws SQLException { + Connection connection = statement.getConnection(); + if (connection instanceof JdbcConnection) { + JdbcConnection jdbcConnection = (JdbcConnection) connection; + return jdbcConnection.getColumnTypeUnknownLength(); + } + return ConnectionProperties.UNKNOWN_LENGTH.getDefaultValue(); + } + + @Override + public int getScale(int column) { + int colType = getColumnType(column); + if (colType == Types.REAL) { + return JdbcDataType.FLOAT32.getScale(); + } + if (colType == Types.DOUBLE || colType == Types.NUMERIC) { + return 15; + } + return 0; + } + + @Override + public String getTableName(int column) { + return ""; + } + + @Override + public String getCatalogName(int column) throws SQLException { + return statement.getConnection().getCatalog(); + } + + @Override + public int getColumnType(int column) { + return extractColumnType(spannerResultSet.getColumnType(column - 1)); + } + + @Override + public String getColumnTypeName(int column) { + Type columnType = spannerResultSet.getColumnType(column - 1); + if (statement instanceof JdbcStatement) { + Dialect dialect = ((JdbcStatement) statement).getConnection().getDialect(); + return getSpannerColumnTypeName(columnType, dialect); + } + return columnType.getCode().name(); + } + + @Override + public boolean isReadOnly(int column) { + return false; + } + + @Override + public boolean isWritable(int column) { + return !isReadOnly(column); + } + + @Override + public boolean isDefinitelyWritable(int column) { + return false; + } + + @Override + public String getColumnClassName(int column) { + return getClassName(spannerResultSet.getColumnType(column - 1)); + } + + @Override + public String toString() { + StringBuilder res = new StringBuilder(); + for (int col = 1; col <= getColumnCount(); col++) { + res.append("Col ").append(col).append(": "); + res.append(getColumnName(col)).append(" ").append(getColumnTypeName(col)); + res.append("\n"); + } + return res.toString(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSavepoint.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSavepoint.java new file mode 100644 index 000000000000..3cd4c41377ef --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSavepoint.java @@ -0,0 +1,58 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import java.sql.SQLException; +import java.sql.Savepoint; +import java.util.concurrent.atomic.AtomicInteger; + +class JdbcSavepoint implements Savepoint { + private static final AtomicInteger COUNTER = new AtomicInteger(); + + static JdbcSavepoint named(String name) { + return new JdbcSavepoint(-1, name); + } + + static JdbcSavepoint unnamed() { + int id = COUNTER.incrementAndGet(); + return new JdbcSavepoint(id, String.format("s_%d", id)); + } + + private final int id; + private final String name; + + private JdbcSavepoint(int id, String name) { + this.id = id; + this.name = name; + } + + @Override + public int getSavepointId() throws SQLException { + JdbcPreconditions.checkState(this.id >= 0, "This is a named savepoint"); + return id; + } + + @Override + public String getSavepointName() throws SQLException { + JdbcPreconditions.checkState(this.id < 0, "This is an unnamed savepoint"); + return name; + } + + String internalGetSavepointName() { + return name; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSimpleParameterMetaData.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSimpleParameterMetaData.java new file mode 100644 index 000000000000..b458573d0d39 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSimpleParameterMetaData.java @@ -0,0 +1,102 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.api.core.BetaApi; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParametersInfo; +import java.sql.ParameterMetaData; +import java.sql.SQLException; +import java.sql.Types; + +/** + * {@link JdbcSimpleParameterMetaData} implements {@link ParameterMetaData} without a round-trip to + * Spanner. This is an experimental feature that can be removed in a future version without prior + * warning. + */ +@BetaApi +class JdbcSimpleParameterMetaData implements ParameterMetaData { + static final String USE_SIMPLE_PARAMETER_METADATA_KEY = + "spanner.jdbc.use_simple_parameter_metadata"; + private final ParametersInfo parametersInfo; + + /** + * This is an experimental feature that can be removed in a future version without prior warning. + */ + @BetaApi + static boolean useSimpleParameterMetadata() { + return Boolean.parseBoolean(System.getProperty(USE_SIMPLE_PARAMETER_METADATA_KEY, "false")); + } + + JdbcSimpleParameterMetaData(ParametersInfo parametersInfo) { + this.parametersInfo = parametersInfo; + } + + @Override + public int getParameterCount() throws SQLException { + return this.parametersInfo.numberOfParameters; + } + + @Override + public int isNullable(int param) throws SQLException { + return ParameterMetaData.parameterNullableUnknown; + } + + @Override + public boolean isSigned(int param) throws SQLException { + return false; + } + + @Override + public int getPrecision(int param) throws SQLException { + return 0; + } + + @Override + public int getScale(int param) throws SQLException { + return 0; + } + + @Override + public int getParameterType(int param) throws SQLException { + return Types.OTHER; + } + + @Override + public String getParameterTypeName(int param) throws SQLException { + return "unknown"; + } + + @Override + public String getParameterClassName(int param) throws SQLException { + return Object.class.getName(); + } + + @Override + public int getParameterMode(int param) throws SQLException { + return ParameterMetaData.parameterModeIn; + } + + @Override + public T unwrap(Class iface) throws SQLException { + throw new SQLException("This is not a wrapper for " + iface.getName()); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return false; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSqlException.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSqlException.java new file mode 100644 index 000000000000..90f6df0434bc --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSqlException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.rpc.Code; +import java.sql.SQLException; + +/** + * Base interface for all Cloud Spanner {@link SQLException}s. All {@link SQLException}s that are + * thrown by the Cloud Spanner JDBC driver implement this interface. + */ +public interface JdbcSqlException { + + /** + * @see Throwable#getMessage() + */ + String getMessage(); + + /** + * @see Throwable#getCause() + */ + Throwable getCause(); + + /** + * @see SQLException#getSQLState() + */ + String getSQLState(); + + /** Returns the gRPC error code as an int */ + int getErrorCode(); + + /** Returns the corresponding gRPC code for this exception */ + Code getCode(); +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSqlExceptionFactory.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSqlExceptionFactory.java new file mode 100644 index 000000000000..6ddc7d683d8f --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcSqlExceptionFactory.java @@ -0,0 +1,333 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.rpc.Code; +import java.sql.BatchUpdateException; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLTimeoutException; +import java.util.Collections; + +/** Factory class for creating {@link SQLException}s for Cloud Spanner */ +public final class JdbcSqlExceptionFactory { + + /** Base {@link SQLException} for Cloud Spanner */ + public static class JdbcSqlExceptionImpl extends SQLException implements JdbcSqlException { + private static final long serialVersionUID = 235381453830069910L; + private final Code code; + + private JdbcSqlExceptionImpl(String message, Code code) { + super(message, null, code.getNumber(), null); + this.code = code; + } + + private JdbcSqlExceptionImpl(String message, Code code, Throwable cause) { + super(message, null, code.getNumber(), cause); + this.code = code; + } + + private JdbcSqlExceptionImpl(SpannerException e) { + super(e.getMessage(), null, e.getCode(), e); + this.code = Code.forNumber(e.getCode()); + } + + private JdbcSqlExceptionImpl(String message, SpannerException e) { + super(message, null, e.getCode(), e); + this.code = Code.forNumber(e.getCode()); + } + + @Override + public Code getCode() { + return code; + } + } + + /** Specific {@link SQLException} that is thrown when a statement times out */ + public static class JdbcSqlTimeoutException extends SQLTimeoutException + implements JdbcSqlException { + private static final long serialVersionUID = 2363793358642102814L; + + private JdbcSqlTimeoutException(SpannerException e) { + super(e.getMessage(), "Timed out", Code.DEADLINE_EXCEEDED_VALUE, e); + } + + private JdbcSqlTimeoutException(String message) { + super(message, "Timed out", Code.DEADLINE_EXCEEDED_VALUE); + } + + @Override + public Code getCode() { + return Code.DEADLINE_EXCEEDED; + } + } + + /** Specific {@link SQLException} that is thrown when setting client info on a connection */ + public static class JdbcSqlClientInfoException extends SQLClientInfoException + implements JdbcSqlException { + private static final long serialVersionUID = 5341238042343668540L; + private final Code code; + + private JdbcSqlClientInfoException(String message, Code code) { + super(message, Collections.emptyMap()); + this.code = code; + } + + @Override + public Code getCode() { + return code; + } + } + + /** Specific {@link SQLException} that is thrown for unsupported methods and values */ + public static class JdbcSqlFeatureNotSupportedException extends SQLFeatureNotSupportedException + implements JdbcSqlException { + private static final long serialVersionUID = 2363793358642102814L; + + private JdbcSqlFeatureNotSupportedException(String message) { + super(message, "not supported", Code.UNIMPLEMENTED_VALUE); + } + + @Override + public Code getCode() { + return Code.UNIMPLEMENTED; + } + } + + /** + * Specific {@link SQLException} that is thrown when a {@link SpannerBatchUpdateException} occurs. + */ + public static class JdbcSqlBatchUpdateException extends BatchUpdateException + implements JdbcSqlException { + private static final long serialVersionUID = 8894995110837971444L; + private final Code code; + + private JdbcSqlBatchUpdateException(int[] updateCounts, SpannerBatchUpdateException cause) { + super(cause.getMessage(), updateCounts, cause); + this.code = Code.forNumber(cause.getCode()); + } + + private JdbcSqlBatchUpdateException(long[] updateCounts, SpannerBatchUpdateException cause) { + super( + cause.getMessage(), + cause.getErrorCode().toString(), + cause.getCode(), + updateCounts, + cause); + this.code = Code.forNumber(cause.getCode()); + } + + @Override + public Code getCode() { + return code; + } + } + + /** + * Specific {@link SQLException} that is thrown when a transaction was aborted and could not be + * successfully retried. + */ + public static class JdbcAbortedException extends JdbcSqlExceptionImpl { + private JdbcAbortedException(AbortedException cause) { + super(cause); + } + + private JdbcAbortedException(String message) { + super(message, Code.ABORTED); + } + + private JdbcAbortedException(String message, AbortedException cause) { + super(message, cause); + } + + @Override + public synchronized AbortedException getCause() { + return (AbortedException) super.getCause(); + } + } + + /** + * Specific {@link SQLException} that is thrown when a transaction was aborted and could not be + * retried due to a concurrent modification. + */ + public static class JdbcAbortedDueToConcurrentModificationException extends JdbcAbortedException { + private JdbcAbortedDueToConcurrentModificationException( + AbortedDueToConcurrentModificationException cause) { + super(cause); + } + + private JdbcAbortedDueToConcurrentModificationException( + String message, AbortedDueToConcurrentModificationException cause) { + super(message, cause); + } + + @Override + public synchronized AbortedDueToConcurrentModificationException getCause() { + return (AbortedDueToConcurrentModificationException) super.getCause(); + } + + public SpannerException getDatabaseErrorDuringRetry() { + return getCause().getDatabaseErrorDuringRetry(); + } + } + + /** Creates a {@link JdbcSqlException} from the given {@link SpannerException}. */ + static SQLException of(SpannerException e) { + switch (e.getErrorCode()) { + case ABORTED: + if (e instanceof AbortedDueToConcurrentModificationException) { + return new JdbcAbortedDueToConcurrentModificationException( + (AbortedDueToConcurrentModificationException) e); + } else if (e instanceof AbortedException) { + return new JdbcAbortedException((AbortedException) e); + } + case DEADLINE_EXCEEDED: + return new JdbcSqlTimeoutException(e); + case ALREADY_EXISTS: + case CANCELLED: + case DATA_LOSS: + case FAILED_PRECONDITION: + case INTERNAL: + case INVALID_ARGUMENT: + case NOT_FOUND: + case OUT_OF_RANGE: + case PERMISSION_DENIED: + case RESOURCE_EXHAUSTED: + case UNAUTHENTICATED: + case UNAVAILABLE: + case UNIMPLEMENTED: + case UNKNOWN: + default: + } + return new JdbcSqlExceptionImpl(e); + } + + /** Creates a {@link JdbcSqlException} with the given message and error code. */ + static SQLException of(String message, Code code) { + switch (code) { + case ABORTED: + return new JdbcAbortedException(code.name() + ": " + message); + case DEADLINE_EXCEEDED: + return new JdbcSqlTimeoutException(code.name() + ": " + message); + case ALREADY_EXISTS: + case CANCELLED: + case DATA_LOSS: + case FAILED_PRECONDITION: + case INTERNAL: + case INVALID_ARGUMENT: + case NOT_FOUND: + case OUT_OF_RANGE: + case PERMISSION_DENIED: + case RESOURCE_EXHAUSTED: + case UNAUTHENTICATED: + case UNAVAILABLE: + case UNIMPLEMENTED: + case UNKNOWN: + default: + } + return new JdbcSqlExceptionImpl(code.name() + ": " + message, code); + } + + /** Creates a {@link JdbcSqlException} with the given message and cause. */ + static SQLException of(String message, SpannerException e) { + switch (e.getErrorCode()) { + case ABORTED: + if (e instanceof AbortedDueToConcurrentModificationException) { + return new JdbcAbortedDueToConcurrentModificationException( + message, (AbortedDueToConcurrentModificationException) e); + } else if (e instanceof AbortedException) { + return new JdbcAbortedException(message, (AbortedException) e); + } + case DEADLINE_EXCEEDED: + return new JdbcSqlTimeoutException(e.getErrorCode().name() + ": " + message); + case ALREADY_EXISTS: + case CANCELLED: + case DATA_LOSS: + case FAILED_PRECONDITION: + case INTERNAL: + case INVALID_ARGUMENT: + case NOT_FOUND: + case OUT_OF_RANGE: + case PERMISSION_DENIED: + case RESOURCE_EXHAUSTED: + case UNAUTHENTICATED: + case UNAVAILABLE: + case UNIMPLEMENTED: + case UNKNOWN: + default: + } + return new JdbcSqlExceptionImpl(e.getErrorCode().name() + ": " + message, e); + } + + /** Creates a {@link JdbcSqlException} with the given message, error code and cause. */ + static SQLException of(String message, Code code, Throwable cause) { + switch (code) { + case ABORTED: + if (cause instanceof AbortedDueToConcurrentModificationException) { + return new JdbcAbortedDueToConcurrentModificationException( + message, (AbortedDueToConcurrentModificationException) cause); + } else if (cause instanceof AbortedException) { + return new JdbcAbortedException(message, (AbortedException) cause); + } + case DEADLINE_EXCEEDED: + return new JdbcSqlTimeoutException(code.name() + ": " + message); + case ALREADY_EXISTS: + case CANCELLED: + case DATA_LOSS: + case FAILED_PRECONDITION: + case INTERNAL: + case INVALID_ARGUMENT: + case NOT_FOUND: + case OUT_OF_RANGE: + case PERMISSION_DENIED: + case RESOURCE_EXHAUSTED: + case UNAUTHENTICATED: + case UNAVAILABLE: + case UNIMPLEMENTED: + case UNKNOWN: + default: + } + return new JdbcSqlExceptionImpl(code.name() + ": " + message, code, cause); + } + + /** Creates a {@link JdbcSqlException} for unsupported methods/values. */ + static SQLFeatureNotSupportedException unsupported(String message) { + return new JdbcSqlFeatureNotSupportedException(message); + } + + /** Creates a {@link JdbcSqlException} for client info exceptions. */ + static SQLClientInfoException clientInfoException(String message, Code code) { + return new JdbcSqlClientInfoException(code.name() + ": " + message, code); + } + + /** Creates a {@link JdbcSqlException} for batch update exceptions. */ + static BatchUpdateException batchException( + int[] updateCounts, SpannerBatchUpdateException cause) { + return new JdbcSqlBatchUpdateException(updateCounts, cause); + } + + /** Creates a {@link JdbcSqlException} for large batch update exceptions. */ + static BatchUpdateException batchException( + long[] updateCounts, SpannerBatchUpdateException cause) { + return new JdbcSqlBatchUpdateException(updateCounts, cause); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcStatement.java new file mode 100644 index 000000000000..0a764b351b4d --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcStatement.java @@ -0,0 +1,619 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcConnection.NO_GENERATED_KEY_COLUMNS; + +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParsedStatement; +import com.google.cloud.spanner.connection.StatementResult; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.rpc.Code; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +/** Implementation of {@link java.sql.Statement} for Google Cloud Spanner. */ +class JdbcStatement extends AbstractJdbcStatement implements CloudSpannerJdbcStatement { + static final ImmutableList ALL_COLUMNS = ImmutableList.of("*"); + + enum BatchType { + NONE, + DML, + DDL + } + + private ResultSet currentResultSet; + private ResultSet currentGeneratedKeys; + private long currentUpdateCount; + private int fetchSize; + private BatchType currentBatchType = BatchType.NONE; + final List batchedStatements = new ArrayList<>(); + + JdbcStatement(JdbcConnection connection) throws SQLException { + super(connection); + } + + @Override + public void close() throws SQLException { + setCurrentResultSet(null); + super.close(); + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + checkClosed(); + return executeQuery(Statement.of(sql)); + } + + /** + * @see java.sql.Statement#executeUpdate(String) + *

This method allows both DML and DDL statements to be executed. It assumes that the user + * knows what kind of statement is being executed, and the method will therefore return 0 for + * both DML statements that changed 0 rows as well as for all DDL statements. + */ + @Override + public int executeUpdate(String sql) throws SQLException { + return executeUpdate(sql, NO_GENERATED_KEY_COLUMNS); + } + + private int executeUpdate(String sql, ImmutableList generatedKeysColumns) + throws SQLException { + return checkedCast(executeLargeUpdate(sql, generatedKeysColumns)); + } + + /** + * @see java.sql.Statement#executeLargeUpdate(String) + *

This method allows both DML and DDL statements to be executed. It assumes that the user + * knows what kind of statement is being executed, and the method will therefore return 0 for + * both DML statements that changed 0 rows as well as for all DDL statements. + */ + @Override + public long executeLargeUpdate(String sql) throws SQLException { + return executeLargeUpdate(sql, NO_GENERATED_KEY_COLUMNS); + } + + private long executeLargeUpdate(String sql, ImmutableList generatedKeysColumns) + throws SQLException { + return executeLargeUpdate(Statement.of(sql), generatedKeysColumns); + } + + protected long executeLargeUpdate(Statement statement, ImmutableList generatedKeysColumns) + throws SQLException { + Preconditions.checkNotNull(generatedKeysColumns); + checkClosed(); + Statement statementWithReturningClause = + addReturningToStatement(statement, generatedKeysColumns); + StatementResult result = execute(statementWithReturningClause); + switch (result.getResultType()) { + case RESULT_SET: + if (generatedKeysColumns.isEmpty()) { + // Close the result set as we are not going to return it to the user. This prevents the + // underlying session from potentially being leaked. + throw closeResultSetAndCreateInvalidQueryException(result); + } + // Make a copy of the result set as it does not matter if the user does not close the result + // set. This also consumes all rows of the result set, which again means that it is safe to + // extract the update count. + this.currentGeneratedKeys = JdbcResultSet.copyOf(result.getResultSet()); + return extractUpdateCountAndClose(result.getResultSet()); + case UPDATE_COUNT: + return result.getUpdateCount(); + case NO_RESULT: + return 0L; + default: + throw JdbcSqlExceptionFactory.of( + "unknown result: " + result.getResultType(), Code.FAILED_PRECONDITION); + } + } + + /** + * Extracts the update count from the given result set and then closes the result set. This method + * may only be called for a {@link com.google.cloud.spanner.ResultSet} where all rows have been + * fetched. That is; {@link com.google.cloud.spanner.ResultSet#next()} must have returned false. + */ + private long extractUpdateCountAndClose(com.google.cloud.spanner.ResultSet resultSet) + throws SQLException { + try { + if (resultSet.getStats() == null) { + throw JdbcSqlExceptionFactory.of( + "Result does not contain any stats", Code.FAILED_PRECONDITION); + } + long updateCount; + if (resultSet.getStats().hasRowCountExact()) { + updateCount = resultSet.getStats().getRowCountExact(); + } else if (resultSet.getStats().hasRowCountLowerBound()) { + // This is returned by Cloud Spanner if the user had set the autocommit_dml_mode to + // 'partitioned_non_atomic' (i.e. PDML). + updateCount = resultSet.getStats().getRowCountLowerBound(); + } else { + throw JdbcSqlExceptionFactory.of( + "Result does not contain an update count", Code.FAILED_PRECONDITION); + } + return updateCount; + } catch (UnsupportedOperationException unsupportedOperationException) { + throw JdbcSqlExceptionFactory.of( + unsupportedOperationException.getMessage(), + Code.FAILED_PRECONDITION, + unsupportedOperationException); + } finally { + resultSet.close(); + } + } + + private SQLException closeResultSetAndCreateInvalidQueryException(StatementResult result) { + //noinspection finally + try { + result.getResultSet().close(); + } finally { + //noinspection ReturnInsideFinallyBlock + return JdbcSqlExceptionFactory.of( + "The statement is not a non-returning DML or DDL statement", Code.INVALID_ARGUMENT); + } + } + + /** + * Adds a THEN RETURN/RETURNING clause to the given statement if the following conditions are all + * met: + * + *

    + *
  1. The generatedKeysColumns is not empty + *
  2. The statement is a DML statement + *
  3. The DML statement does not already contain a THEN RETURN/RETURNING clause + *
+ */ + Statement addReturningToStatement(Statement statement, ImmutableList generatedKeysColumns) + throws SQLException { + if (Preconditions.checkNotNull(generatedKeysColumns).isEmpty()) { + return statement; + } + // Check if the statement is a DML statement or not. + ParsedStatement parsedStatement = getConnection().getParser().parse(statement); + if (parsedStatement.isUpdate() && !parsedStatement.hasReturningClause()) { + if (generatedKeysColumns.size() == 1 + && ALL_COLUMNS.get(0).equals(generatedKeysColumns.get(0))) { + // Add a 'THEN RETURN/RETURNING *' clause to the statement. + return statement.toBuilder() + .replace(statement.getSql() + getReturningAllColumnsClause()) + .build(); + } + // Add a 'THEN RETURN/RETURNING col1, col2, ...' to the statement. + // The column names will be quoted using the dialect-specific identifier quoting character. + return statement.toBuilder() + .replace( + generatedKeysColumns.stream() + .map(this::quoteColumn) + .collect( + Collectors.joining( + ", ", statement.getSql() + getReturningClause() + " ", ""))) + .build(); + } + return statement; + } + + /** Returns the dialect-specific clause for returning values from a DML statement. */ + String getReturningAllColumnsClause() { + switch (getConnection().getDialect()) { + case POSTGRESQL: + return "\nRETURNING *"; + case GOOGLE_STANDARD_SQL: + default: + return "\nTHEN RETURN *"; + } + } + + /** Returns the dialect-specific clause for returning values from a DML statement. */ + String getReturningClause() { + switch (getConnection().getDialect()) { + case POSTGRESQL: + return "\nRETURNING"; + case GOOGLE_STANDARD_SQL: + default: + return "\nTHEN RETURN"; + } + } + + /** Adds dialect-specific quotes to the given column name. */ + String quoteColumn(String column) { + switch (getConnection().getDialect()) { + case POSTGRESQL: + return "\"" + column + "\""; + case GOOGLE_STANDARD_SQL: + default: + return "`" + column + "`"; + } + } + + @Override + public boolean execute(String sql) throws SQLException { + return executeStatement(Statement.of(sql), NO_GENERATED_KEY_COLUMNS); + } + + boolean executeStatement(Statement statement, ImmutableList generatedKeysColumns) + throws SQLException { + checkClosed(); + // This will return the same Statement instance if no THEN RETURN clause is added to the + // statement. + Statement statementWithReturning = addReturningToStatement(statement, generatedKeysColumns); + StatementResult result = execute(statementWithReturning); + switch (result.getResultType()) { + case RESULT_SET: + // Check whether the statement was modified to include a RETURNING clause for generated + // keys. If so, then we return the result as an update count and the rows as the generated + // keys. We can safely use '==', as the addReturningToStatement(..) method returns the same + // instance if no generated keys were requested. + if (statementWithReturning == statement) { + setCurrentResultSet(JdbcResultSet.of(this, result.getResultSet())); + currentUpdateCount = JdbcConstants.STATEMENT_RESULT_SET; + return true; + } + this.currentGeneratedKeys = JdbcResultSet.copyOf(result.getResultSet()); + this.currentUpdateCount = extractUpdateCountAndClose(result.getResultSet()); + return false; + case UPDATE_COUNT: + setCurrentResultSet(null); + currentUpdateCount = result.getUpdateCount(); + return false; + case NO_RESULT: + setCurrentResultSet(null); + currentUpdateCount = JdbcConstants.STATEMENT_NO_RESULT; + return false; + default: + throw JdbcSqlExceptionFactory.of( + "unknown result: " + result.getResultType(), Code.FAILED_PRECONDITION); + } + } + + @Override + public ResultSet getResultSet() throws SQLException { + checkClosed(); + return currentResultSet; + } + + void setCurrentResultSet(ResultSet resultSet) throws SQLException { + if (this.currentResultSet != null) { + this.currentResultSet.close(); + } + this.currentResultSet = resultSet; + } + + /** + * Returns the update count of the last update statement. Will return {@link + * JdbcConstants#STATEMENT_RESULT_SET} if the last statement returned a {@link ResultSet} and will + * return {@link JdbcConstants#STATEMENT_NO_RESULT} if the last statement did not have any return + * value, such as for example DDL statements. + */ + @Override + public int getUpdateCount() throws SQLException { + checkClosed(); + return checkedCast(currentUpdateCount); + } + + /** + * Returns the update count of the last update statement as a {@link Long}. Will return {@link + * JdbcConstants#STATEMENT_RESULT_SET} if the last statement returned a {@link ResultSet} and will + * return {@link JdbcConstants#STATEMENT_NO_RESULT} if the last statement did not have any return + * value, such as for example DDL statements. + */ + @Override + public long getLargeUpdateCount() throws SQLException { + checkClosed(); + return currentUpdateCount; + } + + @Override + public boolean getMoreResults() throws SQLException { + checkClosed(); + return getMoreResults(CLOSE_CURRENT_RESULT); + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + checkClosed(); + if (currentResultSet != null + && !currentResultSet.isClosed() + && (current == CLOSE_CURRENT_RESULT || current == CLOSE_ALL_RESULTS)) { + currentResultSet.close(); + } + currentResultSet = null; + currentUpdateCount = -1L; + return false; + } + + /** This value is set as the value for {@link Options#prefetchChunks(int)} */ + @Override + public void setFetchSize(int rows) throws SQLException { + checkClosed(); + this.fetchSize = rows; + } + + /** This value is set as the value for {@link Options#prefetchChunks(int)} */ + @Override + public int getFetchSize() throws SQLException { + checkClosed(); + return fetchSize; + } + + /** + * Determine the batch type (DML/DDL) based on the sql statement. + * + * @throws SQLException if the sql statement is not allowed for batching. + */ + private BatchType determineStatementBatchType(String sql) throws SQLException { + String sqlWithoutComments = parser.removeCommentsAndTrim(sql); + if (parser.isDdlStatement(sqlWithoutComments)) { + return BatchType.DDL; + } else if (parser.isUpdateStatement(sqlWithoutComments)) { + return BatchType.DML; + } + throw JdbcSqlExceptionFactory.of( + "The statement is not suitable for batching. Only DML and DDL statements are allowed for batching.", + Code.INVALID_ARGUMENT); + } + + /** + * Check that the sql statement is of the same type as the current batch on this statement. If + * there is no active batch on this statement, a batch will be started with the type that is + * determined from the sql statement (DML/DDL). + * + * @throws SQLException if the sql statement is of a different type than the already active batch + * on this statement, if the statement is not allowed for batching (i.e. it is a query or a + * client side statement) or if the connection of this statement has an active batch. + */ + void checkAndSetBatchType(String sql) throws SQLException { + BatchType type = determineStatementBatchType(sql); + if (this.currentBatchType == BatchType.NONE) { + this.currentBatchType = type; + } else if (this.currentBatchType != type) { + throw JdbcSqlExceptionFactory.of( + "Mixing DML and DDL statements in a batch is not allowed.", Code.INVALID_ARGUMENT); + } + } + + @Override + public void addBatch(String sql) throws SQLException { + checkClosed(); + checkAndSetBatchType(sql); + batchedStatements.add(Statement.of(sql)); + } + + @Override + public void clearBatch() throws SQLException { + checkClosed(); + batchedStatements.clear(); + this.currentBatchType = BatchType.NONE; + } + + @Override + public int[] executeBatch() throws SQLException { + return convertUpdateCounts(executeBatch(false)); + } + + public long[] executeLargeBatch() throws SQLException { + return executeBatch(true); + } + + private long[] executeBatch(boolean large) throws SQLException { + checkClosed(); + StatementTimeout originalTimeout = setTemporaryStatementTimeout(); + try { + switch (this.currentBatchType) { + case DML: + try { + return getConnection().getSpannerConnection().executeBatchUpdate(batchedStatements); + } catch (SpannerBatchUpdateException e) { + if (large) { + throw JdbcSqlExceptionFactory.batchException(e.getUpdateCounts(), e); + } else { + throw JdbcSqlExceptionFactory.batchException( + convertUpdateCounts(e.getUpdateCounts()), e); + } + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + case DDL: + try { + getConnection().getSpannerConnection().startBatchDdl(); + for (Statement statement : batchedStatements) { + execute(statement); + } + getConnection().getSpannerConnection().runBatch(); + long[] res = new long[batchedStatements.size()]; + Arrays.fill(res, java.sql.Statement.SUCCESS_NO_INFO); + return res; + } catch (SpannerBatchUpdateException e) { + long[] res = new long[batchedStatements.size()]; + Arrays.fill(res, java.sql.Statement.EXECUTE_FAILED); + convertUpdateCountsToSuccessNoInfo(e.getUpdateCounts(), res); + if (large) { + throw JdbcSqlExceptionFactory.batchException(res, e); + } else { + throw JdbcSqlExceptionFactory.batchException(convertUpdateCounts(res), e); + } + } catch (SpannerException e) { + throw JdbcSqlExceptionFactory.of(e); + } + case NONE: + // There is no batch on this statement, this is a no-op. + return new long[0]; + default: + throw JdbcSqlExceptionFactory.unsupported( + String.format("Unknown batch type: %s", this.currentBatchType.name())); + } + } finally { + resetStatementTimeout(originalTimeout); + batchedStatements.clear(); + this.currentBatchType = BatchType.NONE; + } + } + + @VisibleForTesting + int[] convertUpdateCounts(long[] updateCounts) throws SQLException { + int[] res = new int[updateCounts.length]; + for (int index = 0; index < updateCounts.length; index++) { + res[index] = checkedCast(updateCounts[index]); + } + return res; + } + + @VisibleForTesting + void convertUpdateCountsToSuccessNoInfo(long[] updateCounts, long[] res) { + Preconditions.checkNotNull(updateCounts); + Preconditions.checkNotNull(res); + Preconditions.checkArgument(res.length >= updateCounts.length); + for (int index = 0; index < updateCounts.length; index++) { + if (updateCounts[index] > 0L) { + res[index] = java.sql.Statement.SUCCESS_NO_INFO; + } else { + res[index] = java.sql.Statement.EXECUTE_FAILED; + } + } + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + checkClosed(); + if (this.currentGeneratedKeys == null) { + // Return an empty result set instead of throwing an exception, as that is what the JDBC spec + // says we should do. Note that we need to create a new instance every time, as users could in + // theory call close() on the returned result set. + this.currentGeneratedKeys = + JdbcResultSet.of( + ResultSets.forRows( + Type.struct( + StructField.of("COLUMN_NAME", Type.string()), + StructField.of("VALUE", Type.int64())), + Collections.emptyList())); + } + return this.currentGeneratedKeys; + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return executeUpdate( + sql, + autoGeneratedKeys == java.sql.Statement.RETURN_GENERATED_KEYS + ? ALL_COLUMNS + : NO_GENERATED_KEY_COLUMNS); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + // This should preferably have returned an error, but the initial version of the driver just + // accepted and ignored this. Starting to throw an error now would be a breaking change. + // TODO: Consider throwing an Unsupported error for the next major version bump. + return executeUpdate(sql, NO_GENERATED_KEY_COLUMNS); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return executeUpdate( + sql, + isNullOrEmpty(columnNames) ? NO_GENERATED_KEY_COLUMNS : ImmutableList.copyOf(columnNames)); + } + + @Override + public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return executeLargeUpdate( + sql, + autoGeneratedKeys == java.sql.Statement.RETURN_GENERATED_KEYS + ? ALL_COLUMNS + : NO_GENERATED_KEY_COLUMNS); + } + + @Override + public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { + // This should preferably have returned an error, but the initial version of the driver just + // accepted and ignored this. Starting to throw an error now would be a breaking change. + // TODO: Consider throwing an Unsupported error for the next major version bump. + return executeLargeUpdate(sql, NO_GENERATED_KEY_COLUMNS); + } + + @Override + public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { + return executeLargeUpdate( + sql, + isNullOrEmpty(columnNames) ? NO_GENERATED_KEY_COLUMNS : ImmutableList.copyOf(columnNames)); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return executeStatement( + Statement.of(sql), + autoGeneratedKeys == java.sql.Statement.RETURN_GENERATED_KEYS + ? ALL_COLUMNS + : NO_GENERATED_KEY_COLUMNS); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + // This should preferably have returned an error, but the initial version of the driver just + // accepted and ignored this. Starting to throw an error now would be a breaking change. + // TODO: Consider throwing an Unsupported error for the next major version bump. + return executeStatement(Statement.of(sql), NO_GENERATED_KEY_COLUMNS); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return executeStatement( + Statement.of(sql), + isNullOrEmpty(columnNames) ? NO_GENERATED_KEY_COLUMNS : ImmutableList.copyOf(columnNames)); + } + + static boolean isNullOrEmpty(String[] columnNames) { + return columnNames == null || columnNames.length == 0; + } + + @Override + public ResultSet partitionQuery( + String query, PartitionOptions partitionOptions, QueryOption... options) throws SQLException { + return runWithStatementTimeout( + connection -> + JdbcResultSet.of( + this, connection.partitionQuery(Statement.of(query), partitionOptions, options))); + } + + @Override + public ResultSet runPartition(String encodedPartitionId) throws SQLException { + return runWithStatementTimeout( + connection -> JdbcResultSet.of(this, connection.runPartition(encodedPartitionId))); + } + + @Override + public CloudSpannerJdbcPartitionedQueryResultSet runPartitionedQuery( + String query, PartitionOptions partitionOptions, QueryOption... options) throws SQLException { + return runWithStatementTimeout( + connection -> + JdbcPartitionedQueryResultSet.of( + this, + connection.runPartitionedQuery(Statement.of(query), partitionOptions, options))); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcTypeConverter.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcTypeConverter.java new file mode 100644 index 000000000000..8aee7692d6a4 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcTypeConverter.java @@ -0,0 +1,633 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.Value; +import com.google.common.base.Preconditions; +import com.google.protobuf.AbstractMessage; +import com.google.protobuf.ProtocolMessageEnum; +import java.lang.reflect.Method; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.sql.Array; +import java.sql.SQLException; +import java.sql.Time; +import java.time.Instant; +import java.time.LocalDate; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.List; +import java.util.TimeZone; +import java.util.concurrent.TimeUnit; + +/** Convenience class for converting values between Java, JDBC and Cloud Spanner. */ +class JdbcTypeConverter { + private static final DateTimeFormatter TIMESTAMP_FORMAT = DateTimeFormatter.ISO_OFFSET_DATE_TIME; + private static final Charset UTF8 = StandardCharsets.UTF_8; + + // TODO: Remove when this is supported in the Java client library. + static Code getMainTypeCode(Type type) { + Preconditions.checkNotNull(type); + switch (type.getCode()) { + case PG_OID: + return Code.INT64; + default: + return type.getCode(); + } + } + + /** + * Converts the given value from the Google {@link Type} to the Java {@link Class} type. The input + * value and the {@link Type} must be consistent with each other. + * + * @param value The value to convert. This value must be in the default type used for a Cloud + * Spanner database type. I.e. if the type argument is {@link Type#string()}, then the input + * value must be an instance of {@link java.lang.String}. + * @param type The type in the database. + * @param targetType The java class target type to convert to. + * @return The converted value. + * @throws SQLException if the given value cannot be converted to the specified type, or if the + * input value and input type are not consistent with each other. + */ + static Object convert(Object value, Type type, Class targetType) throws SQLException { + JdbcPreconditions.checkArgument(type != null, "type may not be null"); + JdbcPreconditions.checkArgument(targetType != null, "targetType may not be null"); + checkValidTypeAndValueForConvert(type, value); + + if (value == null) { + return null; + } + if (value.getClass().equals(targetType)) { + return value; + } + try { + if (targetType.equals(Value.class)) { + return convertToSpannerValue(value, type); + } + if (targetType.equals(String.class)) { + if (type.getCode() == Code.BYTES || type.getCode() == Code.PROTO) + return new String((byte[]) value, UTF8); + if (type.getCode() == Code.TIMESTAMP) { + Timestamp timestamp = Timestamp.of((java.sql.Timestamp) value); + return TIMESTAMP_FORMAT.format( + ZonedDateTime.ofInstant( + Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos()), + ZoneId.systemDefault())); + } + return value.toString(); + } + if (targetType.equals(byte[].class)) { + if (type.getCode() == Code.BYTES || type.getCode() == Code.PROTO) return value; + if (type.getCode() == Code.STRING + || type.getCode() == Code.JSON + || type.getCode() == Code.PG_JSONB) return ((String) value).getBytes(UTF8); + } + if (targetType.equals(Boolean.class)) { + if (type.getCode() == Code.BOOL) return value; + if (type.getCode() == Code.INT64 || type.getCode() == Code.ENUM) return (Long) value != 0; + if (type.getCode() == Code.FLOAT32) { + return (Float) value != 0f; + } + if (type.getCode() == Code.FLOAT64) return (Double) value != 0d; + if (type.getCode() == Code.NUMERIC) return !value.equals(BigDecimal.ZERO); + if (type.getCode() == Code.PG_NUMERIC) + return !AbstractJdbcWrapper.parseBigDecimal((String) value).equals(BigDecimal.ZERO); + } + if (targetType.equals(BigDecimal.class)) { + if (type.getCode() == Code.BOOL) return (Boolean) value ? BigDecimal.ONE : BigDecimal.ZERO; + if (type.getCode() == Code.INT64 || type.getCode() == Code.ENUM) + return BigDecimal.valueOf((Long) value); + if (type.getCode() == Code.NUMERIC) return value; + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.parseBigDecimal((String) value); + } + if (targetType.equals(Long.class)) { + if (type.getCode() == Code.BOOL) return (Boolean) value ? 1L : 0L; + if (type.getCode() == Code.INT64 || type.getCode() == Code.ENUM) return value; + if (type.getCode() == Code.NUMERIC) + return AbstractJdbcWrapper.checkedCastToLong(((BigDecimal) value).toBigInteger()); + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.checkedCastToLong( + AbstractJdbcWrapper.parseBigDecimal((String) value).toBigInteger()); + } + if (targetType.equals(Integer.class)) { + if (type.getCode() == Code.BOOL) return (Boolean) value ? 1 : 0; + if (type.getCode() == Code.INT64 || type.getCode() == Code.ENUM) + return AbstractJdbcWrapper.checkedCastToInt((Long) value); + if (type.getCode() == Code.NUMERIC) + return AbstractJdbcWrapper.checkedCastToInt(((BigDecimal) value).toBigInteger()); + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.checkedCastToInt( + AbstractJdbcWrapper.parseBigDecimal((String) value).toBigInteger()); + } + if (targetType.equals(Short.class)) { + if (type.getCode() == Code.BOOL) return (Boolean) value ? 1 : 0; + if (type.getCode() == Code.INT64 || type.getCode() == Code.ENUM) + return AbstractJdbcWrapper.checkedCastToShort((Long) value); + if (type.getCode() == Code.NUMERIC) + return AbstractJdbcWrapper.checkedCastToShort(((BigDecimal) value).toBigInteger()); + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.checkedCastToShort( + AbstractJdbcWrapper.parseBigDecimal((String) value).toBigInteger()); + } + if (targetType.equals(Byte.class)) { + if (type.getCode() == Code.BOOL) return (Boolean) value ? 1 : 0; + if (type.getCode() == Code.INT64 || type.getCode() == Code.ENUM) + return AbstractJdbcWrapper.checkedCastToByte((Long) value); + if (type.getCode() == Code.NUMERIC) + return AbstractJdbcWrapper.checkedCastToByte(((BigDecimal) value).toBigInteger()); + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.checkedCastToByte( + AbstractJdbcWrapper.parseBigDecimal((String) value).toBigInteger()); + } + if (targetType.equals(BigInteger.class)) { + if (type.getCode() == Code.BOOL) return (Boolean) value ? BigInteger.ONE : BigInteger.ZERO; + if (type.getCode() == Code.INT64 || type.getCode() == Code.ENUM) + return BigInteger.valueOf((Long) value); + if (type.getCode() == Code.NUMERIC) + return AbstractJdbcWrapper.checkedCastToBigInteger((BigDecimal) value); + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.checkedCastToBigInteger( + AbstractJdbcWrapper.parseBigDecimal((String) value)); + } + if (targetType.equals(Float.class)) { + if (type.getCode() == Code.BOOL) + return (Boolean) value ? Float.valueOf(1f) : Float.valueOf(0f); + if (type.getCode() == Code.FLOAT32) { + return value; + } + if (type.getCode() == Code.FLOAT64) + return AbstractJdbcWrapper.checkedCastToFloat((Double) value); + if (type.getCode() == Code.NUMERIC) return ((BigDecimal) value).floatValue(); + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.parseFloat((String) value); + } + if (targetType.equals(Double.class)) { + if (type.getCode() == Code.BOOL) + return (Boolean) value ? Double.valueOf(1d) : Double.valueOf(0d); + if (type.getCode() == Code.FLOAT64 || type.getCode() == Code.FLOAT32) { + return value; + } + if (type.getCode() == Code.NUMERIC) return ((BigDecimal) value).doubleValue(); + if (type.getCode() == Code.PG_NUMERIC) + return AbstractJdbcWrapper.parseDouble((String) value); + } + if (targetType.equals(java.sql.Date.class)) { + if (type.getCode() == Code.DATE) return value; + } + if (targetType.equals(LocalDate.class)) { + if (type.getCode() == Code.DATE) { + return ((java.sql.Date) value).toLocalDate(); + } + } + if (targetType.equals(java.sql.Timestamp.class)) { + if (type.getCode() == Code.TIMESTAMP) return value; + } + if (targetType.equals(OffsetDateTime.class)) { + if (type.getCode() == Code.TIMESTAMP) { + Timestamp timestamp = Timestamp.of((java.sql.Timestamp) value); + return OffsetDateTime.ofInstant( + Instant.ofEpochSecond(timestamp.getSeconds(), timestamp.getNanos()), + ZoneId.systemDefault()); + } + } + if (AbstractMessage.class.isAssignableFrom(targetType)) { + if (type.getCode() == Code.PROTO || type.getCode() == Code.BYTES) { + Method parseMethodParseFrom = targetType.getMethod("parseFrom", byte[].class); + return targetType.cast(parseMethodParseFrom.invoke(null, value)); + } + } + if (ProtocolMessageEnum.class.isAssignableFrom(targetType)) { + if (type.getCode() == Code.ENUM || type.getCode() == Code.INT64) { + Method parseMethodForNumber = targetType.getMethod("forNumber", int.class); + return targetType.cast( + parseMethodForNumber.invoke( + null, AbstractJdbcWrapper.checkedCastToInt((Long) value))); + } + } + if (targetType.equals(java.sql.Array.class)) { + if (type.getCode() == Code.ARRAY) return value; + } + if (targetType.isArray() && type.getCode() == Code.ARRAY) { + if (type.getArrayElementType().getCode() == Code.PROTO + || type.getArrayElementType().getCode() == Code.BYTES) { + Object res = convertArrayOfProtoMessage(value, targetType); + if (res != null) { + return res; + } + } + if (type.getArrayElementType().getCode() == Code.ENUM + || type.getArrayElementType().getCode() == Code.INT64) { + Object res = convertArrayOfProtoEnum(value, targetType); + if (res != null) { + return res; + } + } + } + } catch (SQLException e) { + throw e; + } catch (Exception e) { + throw JdbcSqlExceptionFactory.of( + "Cannot convert " + value + " to " + targetType.getName(), + com.google.rpc.Code.INVALID_ARGUMENT, + e); + } + throw JdbcSqlExceptionFactory.of( + "Cannot convert " + type.getCode().name() + " to " + targetType.getName(), + com.google.rpc.Code.INVALID_ARGUMENT); + } + + /** + * Converts the given value to the Java {@link Class} type. The targetType {@link Class} must be + * an array of {@link AbstractMessage}. + */ + static Object convertArrayOfProtoMessage(Object value, Class targetType) throws Exception { + Class componentType = targetType.getComponentType(); + if (AbstractMessage.class.isAssignableFrom(componentType)) { + byte[][] result = (byte[][]) ((JdbcArray) value).getArray(); + Object obj = java.lang.reflect.Array.newInstance(componentType, result.length); + Method parseMethodParseFrom = componentType.getMethod("parseFrom", byte[].class); + for (int i = 0; i < result.length; i++) { + if (result[i] != null) { + java.lang.reflect.Array.set( + obj, i, componentType.cast(parseMethodParseFrom.invoke(null, result[i]))); + } + } + return obj; + } + return null; + } + + /** + * Converts the given value to the Java {@link Class} type. The targetType {@link Class} must be + * an array of {@link ProtocolMessageEnum}. + */ + static Object convertArrayOfProtoEnum(Object value, Class targetType) throws Exception { + Class componentType = targetType.getComponentType(); + if (ProtocolMessageEnum.class.isAssignableFrom(componentType)) { + Long[] result = (Long[]) ((JdbcArray) value).getArray(); + Object obj = java.lang.reflect.Array.newInstance(componentType, result.length); + Method parseMethodForNumber = componentType.getMethod("forNumber", int.class); + for (int i = 0; i < result.length; i++) { + if (result[i] != null) { + java.lang.reflect.Array.set( + obj, + i, + componentType.cast( + parseMethodForNumber.invoke( + null, AbstractJdbcWrapper.checkedCastToInt(result[i])))); + } + } + return obj; + } + return null; + } + + private static Value convertToSpannerValue(Object value, Type type) throws SQLException { + switch (type.getCode()) { + case ARRAY: + switch (type.getArrayElementType().getCode()) { + case BOOL: + return Value.boolArray(Arrays.asList((Boolean[]) ((java.sql.Array) value).getArray())); + case BYTES: + return Value.bytesArray(toGoogleBytes((byte[][]) ((java.sql.Array) value).getArray())); + case PROTO: + return Value.protoMessageArray( + toGoogleBytes((byte[][]) ((java.sql.Array) value).getArray()), + type.getArrayElementType().getProtoTypeFqn()); + case DATE: + return Value.dateArray( + toGoogleDates((java.sql.Date[]) ((java.sql.Array) value).getArray())); + case FLOAT32: + return Value.float32Array(Arrays.asList((Float[]) ((java.sql.Array) value).getArray())); + case FLOAT64: + return Value.float64Array( + Arrays.asList((Double[]) ((java.sql.Array) value).getArray())); + case INT64: + return Value.int64Array(Arrays.asList((Long[]) ((java.sql.Array) value).getArray())); + case ENUM: + return Value.protoEnumArray( + Arrays.asList((Long[]) ((java.sql.Array) value).getArray()), + type.getArrayElementType().getProtoTypeFqn()); + case PG_OID: + return Value.pgOidArray(Arrays.asList((Long[]) ((java.sql.Array) value).getArray())); + case NUMERIC: + return Value.numericArray( + Arrays.asList((BigDecimal[]) ((java.sql.Array) value).getArray())); + case PG_NUMERIC: + return Value.pgNumericArray( + Arrays.asList((String[]) ((java.sql.Array) value).getArray())); + case STRING: + return Value.stringArray(Arrays.asList((String[]) ((java.sql.Array) value).getArray())); + case TIMESTAMP: + return Value.timestampArray( + toGoogleTimestamps((java.sql.Timestamp[]) ((java.sql.Array) value).getArray())); + case JSON: + return Value.jsonArray(Arrays.asList((String[]) ((java.sql.Array) value).getArray())); + case PG_JSONB: + return Value.pgJsonbArray( + Arrays.asList((String[]) ((java.sql.Array) value).getArray())); + case STRUCT: + default: + throw JdbcSqlExceptionFactory.of( + "invalid argument: " + value, com.google.rpc.Code.INVALID_ARGUMENT); + } + case BOOL: + return Value.bool((Boolean) value); + case BYTES: + return Value.bytes(ByteArray.copyFrom((byte[]) value)); + case DATE: + return Value.date(toGoogleDate((java.sql.Date) value)); + case FLOAT32: + return Value.float32((Float) value); + case FLOAT64: + return Value.float64((Double) value); + case INT64: + return Value.int64((Long) value); + case PG_OID: + return Value.pgOid((Long) value); + case NUMERIC: + return Value.numeric((BigDecimal) value); + case PG_NUMERIC: + return Value.pgNumeric(value == null ? null : value.toString()); + case STRING: + return Value.string((String) value); + case TIMESTAMP: + return Value.timestamp(toGoogleTimestamp((java.sql.Timestamp) value)); + case JSON: + return Value.json((String) value); + case PG_JSONB: + return Value.pgJsonb((String) value); + case PROTO: + return Value.protoMessage(ByteArray.copyFrom((byte[]) value), type.getProtoTypeFqn()); + case ENUM: + return Value.protoEnum((Long) value, type.getProtoTypeFqn()); + case STRUCT: + default: + throw JdbcSqlExceptionFactory.of( + "invalid argument: " + value, com.google.rpc.Code.INVALID_ARGUMENT); + } + } + + private static void checkValidTypeAndValueForConvert(Type type, Object value) + throws SQLException { + if (value == null) { + return; + } + JdbcPreconditions.checkArgument( + type.getCode() != Code.ARRAY || Array.class.isAssignableFrom(value.getClass()), + "input type is array, but input value is not an instance of java.sql.Array"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.BOOL || value.getClass().equals(Boolean.class), + "input type is bool, but input value is not an instance of Boolean"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.BYTES || value.getClass().equals(byte[].class), + "input type is bytes, but input value is not an instance of byte[]"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.DATE || value.getClass().equals(java.sql.Date.class), + "input type is date, but input value is not an instance of java.sql.Date"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.FLOAT32 || value.getClass().equals(Float.class), + "input type is float32, but input value is not an instance of Float"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.FLOAT64 || value.getClass().equals(Double.class), + "input type is float64, but input value is not an instance of Double"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.INT64 || value.getClass().equals(Long.class), + "input type is int64, but input value is not an instance of Long"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.STRING || value.getClass().equals(String.class), + "input type is string, but input value is not an instance of String"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.TIMESTAMP || value.getClass().equals(java.sql.Timestamp.class), + "input type is timestamp, but input value is not an instance of java.sql.Timestamp"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.NUMERIC || value.getClass().equals(BigDecimal.class), + "input type is numeric, but input value is not an instance of BigDecimal"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.PROTO || value.getClass().equals(byte[].class), + "input type is proto, but input value is not an instance of byte[]"); + JdbcPreconditions.checkArgument( + type.getCode() != Code.ENUM || value.getClass().equals(Long.class), + "input type is enum, but input value is not an instance of Long"); + } + + @SuppressWarnings("deprecation") + static Date toGoogleDate(java.sql.Date date) { + return date == null + ? null + : Date.fromYearMonthDay(date.getYear() + 1900, date.getMonth() + 1, date.getDate()); + } + + static Date toGoogleDate(java.sql.Time date) { + return Date.fromYearMonthDay(1970, 1, 1); + } + + @SuppressWarnings("deprecation") + static Date toGoogleDate(java.sql.Timestamp date) { + return date == null + ? null + : Date.fromYearMonthDay(date.getYear() + 1900, date.getMonth() + 1, date.getDate()); + } + + static List toGoogleDates(java.sql.Date[] dates) { + List res = new ArrayList<>(dates.length); + for (java.sql.Date date : dates) { + res.add(toGoogleDate(date)); + } + return res; + } + + static java.sql.Date toSqlDate(Date date) { + return toSqlDate(date, Calendar.getInstance()); + } + + static java.sql.Date toSqlDate(Date date, Calendar cal) { + if (date != null) { + //noinspection MagicConstant + cal.set(date.getYear(), date.getMonth() - 1, date.getDayOfMonth(), 0, 0, 0); + cal.clear(Calendar.MILLISECOND); + return new java.sql.Date(cal.getTimeInMillis()); + } + return null; + } + + static List toSqlDates(List dates) { + List res = new ArrayList<>(dates.size()); + for (Date date : dates) res.add(toSqlDate(date)); + return res; + } + + static java.sql.Timestamp toSqlTimestamp(Timestamp ts) { + return ts == null ? null : ts.toSqlTimestamp(); + } + + static java.sql.Timestamp toSqlTimestamp(Date date) { + return date == null ? null : new java.sql.Timestamp(toSqlDate(date).getTime()); + } + + static java.sql.Timestamp toSqlTimestamp(Date date, Calendar cal) { + return date == null ? null : new java.sql.Timestamp(toSqlDate(date, cal).getTime()); + } + + static java.sql.Timestamp getAsSqlTimestamp(Timestamp ts, Calendar cal) { + return ts == null ? null : getTimestampInCalendar(ts.toSqlTimestamp(), cal); + } + + static java.sql.Timestamp getTimestampInCalendar(java.sql.Timestamp sqlTs, Calendar cal) { + return getOrSetTimestampInCalendar(sqlTs, cal, GetOrSetTimestampInCalendar.GET); + } + + static java.sql.Timestamp setTimestampInCalendar(java.sql.Timestamp sqlTs, Calendar cal) { + return getOrSetTimestampInCalendar(sqlTs, cal, GetOrSetTimestampInCalendar.SET); + } + + private enum GetOrSetTimestampInCalendar { + GET, + SET + } + + private static java.sql.Timestamp getOrSetTimestampInCalendar( + java.sql.Timestamp sqlTs, Calendar cal, GetOrSetTimestampInCalendar getOrSet) { + if (sqlTs != null) { + // Get a calendar in the requested timezone + Calendar newCal = Calendar.getInstance(cal.getTimeZone()); + // set the millisecond time on this calendar from the timestamp + newCal.setTimeInMillis(sqlTs.getTime()); + + TimeZone timeZone = newCal.getTimeZone(); + long totalMillis = newCal.getTimeInMillis(); + // to calculate the offset for DST correctly, we need to add DST savings and check if + // given epoch milli is in daylight savings time. + if (getOrSet == GetOrSetTimestampInCalendar.GET) { + totalMillis += timeZone.getRawOffset() + timeZone.getDSTSavings(); + } + + // then shift the time of the calendar by the difference between UTC and the timezone of the + // given calendar + int offset = newCal.getTimeZone().getOffset(totalMillis); + newCal.add( + Calendar.MILLISECOND, getOrSet == GetOrSetTimestampInCalendar.GET ? offset : -offset); + // then use that to create a sql timestamp + java.sql.Timestamp res = new java.sql.Timestamp(newCal.getTimeInMillis()); + // set the nanosecond value that will also set the millisecond value of the timestamp + // as the nanosecond value contains all fraction of a second information + res.setNanos(sqlTs.getNanos()); + return res; + } + return null; + } + + static List toSqlTimestamps(List timestamps) { + List res = new ArrayList<>(timestamps.size()); + for (Timestamp timestamp : timestamps) { + res.add(toSqlTimestamp(timestamp)); + } + return res; + } + + static Timestamp toGoogleTimestamp(java.util.Date ts) { + if (ts != null) { + long milliseconds = ts.getTime(); + long seconds = milliseconds / 1000L; + long nanos = (milliseconds - (seconds * 1000)) * 1000000; + return com.google.cloud.Timestamp.ofTimeSecondsAndNanos(seconds, (int) nanos); + } + return null; + } + + static Timestamp toGoogleTimestamp(java.sql.Timestamp ts) { + if (ts != null) { + long milliseconds = ts.getTime(); + long seconds = milliseconds / 1000L; + int nanos = ts.getNanos(); + return com.google.cloud.Timestamp.ofTimeSecondsAndNanos(seconds, nanos); + } + return null; + } + + static List toGoogleTimestamps(java.sql.Timestamp[] timestamps) { + List res = new ArrayList<>(timestamps.length); + for (java.sql.Timestamp timestamp : timestamps) { + res.add(toGoogleTimestamp(timestamp)); + } + return res; + } + + @SuppressWarnings("deprecation") + static Time toSqlTime(Timestamp ts) { + if (ts != null) { + java.sql.Timestamp sqlTs = toSqlTimestamp(ts); + Time time = new Time(sqlTs.getHours(), sqlTs.getMinutes(), sqlTs.getSeconds()); + time.setTime( + time.getTime() + TimeUnit.MILLISECONDS.convert(sqlTs.getNanos(), TimeUnit.NANOSECONDS)); + return time; + } + return null; + } + + @SuppressWarnings("deprecation") + static Time toSqlTime(Timestamp ts, Calendar cal) { + if (ts != null) { + java.sql.Timestamp sqlTs = getAsSqlTimestamp(ts, cal); + Time time = new Time(sqlTs.getHours(), sqlTs.getMinutes(), sqlTs.getSeconds()); + time.setTime( + time.getTime() + TimeUnit.MILLISECONDS.convert(sqlTs.getNanos(), TimeUnit.NANOSECONDS)); + return time; + } + return null; + } + + @SuppressWarnings("deprecation") + static java.sql.Time parseSqlTime(String val, Calendar cal) { + if (val != null) { + Time time = Time.valueOf(val); + cal.set(1970, Calendar.JANUARY, 1, time.getHours(), time.getMinutes(), time.getSeconds()); + cal.clear(Calendar.MILLISECOND); + return new java.sql.Time(cal.getTimeInMillis()); + } + return null; + } + + static List toGoogleBytes(byte[][] bytes) { + List res = new ArrayList<>(bytes.length); + for (byte[] aByte : bytes) { + res.add(aByte == null ? null : ByteArray.copyFrom(aByte)); + } + return res; + } + + static List toJavaByteArrays(List bytes) { + List res = new ArrayList<>(bytes.size()); + for (ByteArray ba : bytes) { + res.add(ba == null ? null : ba.toByteArray()); + } + return res; + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JsonType.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JsonType.java new file mode 100644 index 000000000000..747f8732ead3 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JsonType.java @@ -0,0 +1,63 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.spanner.v1.TypeCode; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.SQLType; + +/** + * Custom SQL type for Spanner JSON data type. This type (or the vendor type number) must be used + * when setting a JSON parameter using {@link PreparedStatement#setObject(int, Object, SQLType)}. + */ +public class JsonType implements SQLType { + public static final JsonType INSTANCE = new JsonType(); + + /** + * Spanner does not have any type numbers, but the code values are unique. Add 100,000 to avoid + * conflicts with the type numbers in java.sql.Types. + */ + public static final int VENDOR_TYPE_NUMBER = 100_000 + TypeCode.JSON_VALUE; + + /** + * Define a short type number as well, as this is what is expected to be returned in {@link + * DatabaseMetaData#getTypeInfo()}. + */ + public static final short SHORT_VENDOR_TYPE_NUMBER = (short) VENDOR_TYPE_NUMBER; + + private JsonType() {} + + @Override + public String getName() { + return "JSON"; + } + + @Override + public String getVendor() { + return JsonType.class.getPackage().getName(); + } + + @Override + public Integer getVendorTypeNumber() { + return VENDOR_TYPE_NUMBER; + } + + public String toString() { + return getName(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/Metrics.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/Metrics.java new file mode 100644 index 000000000000..77f238466ebe --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/Metrics.java @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.spanner.jdbc; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.util.Arrays; +import java.util.List; + +class Metrics { + static final String INSTRUMENTATION_SCOPE = "cloud.google.com/java"; + static final String SPANNER_CLIENT_LIB_LATENCY = "spanner/jdbc/client_lib_latencies"; + static final String SPANNER_CLIENT_LIB_LATENCY_DESCRIPTION = + "Latency when the client library receives a call and returns a response"; + + private final LongHistogram spannerClientLibLatencies; + + Metrics(OpenTelemetry openTelemetry) { + Meter meter = openTelemetry.getMeter(INSTRUMENTATION_SCOPE); + List RPC_MILLIS_BUCKET_BOUNDARIES = + Arrays.asList( + 1L, 2L, 3L, 4L, 5L, 6L, 8L, 10L, 13L, 16L, 20L, 25L, 30L, 40L, 50L, 65L, 80L, 100L, + 130L, 160L, 200L, 250L, 300L, 400L, 500L, 650L, 800L, 1000L, 2000L, 5000L, 10000L, + 20000L, 50000L, 100000L); + spannerClientLibLatencies = + meter + .histogramBuilder(SPANNER_CLIENT_LIB_LATENCY) + .ofLongs() + .setDescription(SPANNER_CLIENT_LIB_LATENCY_DESCRIPTION) + .setUnit("ms") + .setExplicitBucketBoundariesAdvice(RPC_MILLIS_BUCKET_BOUNDARIES) + .build(); + } + + void recordClientLibLatency(long value, Attributes attributes) { + spannerClientLibLatencies.record(value, attributes); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/PgJsonbType.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/PgJsonbType.java new file mode 100644 index 000000000000..d447233ca7c0 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/PgJsonbType.java @@ -0,0 +1,59 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.spanner.v1.TypeCode; +import java.sql.DatabaseMetaData; +import java.sql.SQLType; + +public class PgJsonbType implements SQLType { + public static final PgJsonbType INSTANCE = new PgJsonbType(); + + /** + * Spanner/Spangres does not have any type numbers, but the code values are unique. Add 200,000 to + * avoid conflicts with the type numbers in java.sql.Types. Native Cloud Spanner types already use + * the range starting at 100,000 (see {@link JsonType}). + */ + public static final int VENDOR_TYPE_NUMBER = 200_000 + TypeCode.JSON_VALUE; + + /** + * Define a short type number as well, as this is what is expected to be returned in {@link + * DatabaseMetaData#getTypeInfo()}. + */ + public static final short SHORT_VENDOR_TYPE_NUMBER = (short) VENDOR_TYPE_NUMBER; + + private PgJsonbType() {} + + @Override + public String getName() { + return "JSONB"; + } + + @Override + public String getVendor() { + return PgJsonbType.class.getPackage().getName(); + } + + @Override + public Integer getVendorTypeNumber() { + return VENDOR_TYPE_NUMBER; + } + + public String toString() { + return getName(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ProtoEnumType.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ProtoEnumType.java new file mode 100644 index 000000000000..7c710bc90d71 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ProtoEnumType.java @@ -0,0 +1,63 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.spanner.v1.TypeCode; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.SQLType; + +/** + * Custom SQL type for Spanner PROTO data type. This type (or the vendor type number) must be used + * when setting a PROTO parameter using {@link PreparedStatement#setObject(int, Object, SQLType)}. + */ +public class ProtoEnumType implements SQLType { + public static final ProtoEnumType INSTANCE = new ProtoEnumType(); + + /** + * Spanner does not have any type numbers, but the code values are unique. Add 100,000 to avoid + * conflicts with the type numbers in java.sql.Types. + */ + public static final int VENDOR_TYPE_NUMBER = 100_000 + TypeCode.ENUM_VALUE; + + /** + * Define a short type number as well, as this is what is expected to be returned in {@link + * DatabaseMetaData#getTypeInfo()}. + */ + public static final short SHORT_VENDOR_TYPE_NUMBER = (short) VENDOR_TYPE_NUMBER; + + private ProtoEnumType() {} + + @Override + public String getName() { + return "ENUM"; + } + + @Override + public String getVendor() { + return ProtoEnumType.class.getPackage().getName(); + } + + @Override + public Integer getVendorTypeNumber() { + return VENDOR_TYPE_NUMBER; + } + + public String toString() { + return getName(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ProtoMessageType.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ProtoMessageType.java new file mode 100644 index 000000000000..df5fd98a2036 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ProtoMessageType.java @@ -0,0 +1,63 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.spanner.v1.TypeCode; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.SQLType; + +/** + * Custom SQL type for Spanner PROTO data type. This type (or the vendor type number) must be used + * when setting a PROTO parameter using {@link PreparedStatement#setObject(int, Object, SQLType)}. + */ +public class ProtoMessageType implements SQLType { + public static final ProtoMessageType INSTANCE = new ProtoMessageType(); + + /** + * Spanner does not have any type numbers, but the code values are unique. Add 100,000 to avoid + * conflicts with the type numbers in java.sql.Types. + */ + public static final int VENDOR_TYPE_NUMBER = 100_000 + TypeCode.PROTO_VALUE; + + /** + * Define a short type number as well, as this is what is expected to be returned in {@link + * DatabaseMetaData#getTypeInfo()}. + */ + public static final short SHORT_VENDOR_TYPE_NUMBER = (short) VENDOR_TYPE_NUMBER; + + private ProtoMessageType() {} + + @Override + public String getName() { + return "PROTO"; + } + + @Override + public String getVendor() { + return ProtoMessageType.class.getPackage().getName(); + } + + @Override + public Integer getVendorTypeNumber() { + return VENDOR_TYPE_NUMBER; + } + + public String toString() { + return getName(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/SpannerPool.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/SpannerPool.java new file mode 100644 index 000000000000..2122d7dc4028 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/SpannerPool.java @@ -0,0 +1,33 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +/** + * @see com.google.cloud.spanner.connection.SpannerPool + */ +@Deprecated +public class SpannerPool { + private SpannerPool() {} + + /** + * @see com.google.cloud.spanner.connection.SpannerPool#closeSpannerPool() + */ + @Deprecated + public static void closeSpannerPool() { + com.google.cloud.spanner.connection.SpannerPool.closeSpannerPool(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/TransactionRetryListener.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/TransactionRetryListener.java new file mode 100644 index 000000000000..d837e7364dfa --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/TransactionRetryListener.java @@ -0,0 +1,80 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.api.core.InternalApi; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.AbortedException; + +/** Use {@link com.google.cloud.spanner.connection.TransactionRetryListener} */ +@InternalApi +@Deprecated +public interface TransactionRetryListener { + /** Use {@link com.google.cloud.spanner.connection.TransactionRetryListener.RetryResult} */ + @InternalApi + @Deprecated + enum RetryResult { + /** The retry executed successfully and the transaction will continue. */ + RETRY_SUCCESSFUL, + /** The retry was aborted by Spanner and another retry attempt will be started. */ + RETRY_ABORTED_AND_RESTARTING, + /** + * The retry was aborted by the {@link java.sql.Connection} because of a concurrent + * modification. The transaction cannot continue and will throw an {@link + * AbortedDueToConcurrentModificationException}. + */ + RETRY_ABORTED_DUE_TO_CONCURRENT_MODIFICATION, + /** + * The retry was aborted by Spanner and the maximum number of retry attempts allowed has been + * exceeded. The transaction cannot continue and will throw an {@link AbortedException}. + */ + RETRY_ABORTED_AND_MAX_ATTEMPTS_EXCEEDED, + /** + * An unexpected error occurred during transaction retry, the transaction cannot continue and + * will throw an exception. + */ + RETRY_ERROR + } + + /** + * This method is called when a retry is about to start. + * + * @param transactionStarted The start date/time of the transaction that is retrying. + * @param transactionId An internally assigned ID of the transaction (unique during the lifetime + * of the JVM) that can be used to identify the transaction for logging purposes. + * @param retryAttempt The number of retry attempts the current transaction has executed, + * including the current retry attempt. + */ + void retryStarting(Timestamp transactionStarted, long transactionId, int retryAttempt); + + /** + * This method is called when a retry has finished. + * + * @param transactionStarted The start date/time of the transaction that is retrying. + * @param transactionId An internally assigned ID of the transaction (unique during the lifetime + * of the JVM) that can be used to identify the transaction for logging purposes. + * @param retryAttempt The number of retry attempts the current transaction has executed, + * including the current retry attempt. + * @param result The result of the retry indicating whether the retry was successful or not. + */ + void retryFinished( + Timestamp transactionStarted, + long transactionId, + int retryAttempt, + TransactionRetryListener.RetryResult result); +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/UuidType.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/UuidType.java new file mode 100644 index 000000000000..22bf3666b8b7 --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/UuidType.java @@ -0,0 +1,63 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.spanner.v1.TypeCode; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.SQLType; + +/** + * Custom SQL type for Spanner UUID data type. This type (or the vendor type number) must be used + * when setting a UUID parameter using {@link PreparedStatement#setObject(int, Object, SQLType)}. + */ +public class UuidType implements SQLType { + public static final UuidType INSTANCE = new UuidType(); + + /** + * Spanner does not have any type numbers, but the code values are unique. Add 100,000 to avoid + * conflicts with the type numbers in java.sql.Types. + */ + public static final int VENDOR_TYPE_NUMBER = 100_000 + TypeCode.UUID_VALUE; + + /** + * Define a short type number as well, as this is what is expected to be returned in {@link + * DatabaseMetaData#getTypeInfo()}. + */ + public static final short SHORT_VENDOR_TYPE_NUMBER = (short) VENDOR_TYPE_NUMBER; + + private UuidType() {} + + @Override + public String getName() { + return "UUID"; + } + + @Override + public String getVendor() { + return UuidType.class.getPackage().getName(); + } + + @Override + public Integer getVendorTypeNumber() { + return VENDOR_TYPE_NUMBER; + } + + public String toString() { + return getName(); + } +} diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/package-info.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/package-info.java new file mode 100644 index 000000000000..27e15386b49d --- /dev/null +++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/package-info.java @@ -0,0 +1,48 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A JDBC driver for Cloud Spanner - A no-compromise relational database service. + * + *

Example for creating a JDBC connection to Cloud Spanner. + * + *

{@code
+ * String projectId = "my-project";
+ * String instanceId = "my-instance";
+ * String databaseId = "my-database";
+ *
+ * try (Connection connection =
+ *     DriverManager.getConnection(
+ *         String.format(
+ *             "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s",
+ *             projectId, instanceId, databaseId))) {
+ *   try (Statement statement = connection.createStatement()) {
+ *     try (ResultSet rs = statement.executeQuery("SELECT CURRENT_TIMESTAMP()")) {
+ *       while (rs.next()) {
+ *         System.out.printf(
+ *             "Connected to Cloud Spanner at [%s]%n", rs.getTimestamp(1).toString());
+ *       }
+ *     }
+ *   }
+ * }
+ * }
+ * + * @see JdbcDriver + * java doc for all supported connection URL properties. + * @see Cloud Spanner JDBC Driver + */ +package com.google.cloud.spanner.jdbc; diff --git a/java-spanner-jdbc/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner-jdbc/native-image.properties b/java-spanner-jdbc/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner-jdbc/native-image.properties new file mode 100644 index 000000000000..f9e42bad8bff --- /dev/null +++ b/java-spanner-jdbc/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner-jdbc/native-image.properties @@ -0,0 +1,19 @@ +Args = --initialize-at-build-time==com.google.cloud.spanner.IntegrationTestEnv,\ + com.google.cloud.spanner.jdbc.it.JdbcIntegrationTestEnv,\ + com.google.cloud.spanner.jdbc.it.DialectTestParameter,\ + com.google.common.collect.RegularImmutableMap,\ + com.google.common.collect.ImmutableMapEntry,\ + com.google.common.collect.ImmutableMapEntry$NonTerminalImmutableMapEntry,\ + com.google.common.collect.SingletonImmutableBiMap,\ + com.google.cloud.spanner.Dialect,\ + com.google.cloud.spanner.Dialect$2,\ + com.google.cloud.spanner.Dialect$1,\ + com.google.spanner.admin.database.v1.DatabaseDialect,\ + com.google.spanner.admin.database.v1.DatabaseDialect$1,\ + org.junit.runners.MethodSorters,\ + org.junit.runners.Parameterized,\ + org.junit.runner.RunWith,\ + org.junit.FixMethodOrder,\ + org.junit.experimental.categories.Category,\ + org.junit.runners.parameterized.BlockJUnit4ClassRunnerWithParameters,\ + org.junit.runners.model.FrameworkField,\ \ No newline at end of file diff --git a/java-spanner-jdbc/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner-jdbc/resource-config.json b/java-spanner-jdbc/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner-jdbc/resource-config.json new file mode 100644 index 000000000000..2a215e4434c5 --- /dev/null +++ b/java-spanner-jdbc/src/main/resources/META-INF/native-image/com.google.cloud/google-cloud-spanner-jdbc/resource-config.json @@ -0,0 +1,9 @@ +{ + "resources":{ + "includes":[ + {"pattern":".*.sql"}, + {"pattern":".*.json"}, + {"pattern":".*.txt"} + ] + } +} \ No newline at end of file diff --git a/java-spanner-jdbc/src/main/resources/META-INF/services/java.sql.Driver b/java-spanner-jdbc/src/main/resources/META-INF/services/java.sql.Driver new file mode 100644 index 000000000000..5a8873f221ee --- /dev/null +++ b/java-spanner-jdbc/src/main/resources/META-INF/services/java.sql.Driver @@ -0,0 +1 @@ +com.google.cloud.spanner.jdbc.JdbcDriver diff --git a/java-spanner-jdbc/src/main/resources/com/google/cloud/spanner/jdbc/DatabaseMetaData_GetColumns.sql b/java-spanner-jdbc/src/main/resources/com/google/cloud/spanner/jdbc/DatabaseMetaData_GetColumns.sql new file mode 100644 index 000000000000..818b45ae3b92 --- /dev/null +++ b/java-spanner-jdbc/src/main/resources/com/google/cloud/spanner/jdbc/DatabaseMetaData_GetColumns.sql @@ -0,0 +1,93 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +SELECT TABLE_CATALOG AS TABLE_CAT, TABLE_SCHEMA AS TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, + CASE + WHEN SPANNER_TYPE LIKE 'ARRAY%' THEN 2003 + WHEN SPANNER_TYPE = 'BOOL' THEN 16 + WHEN SPANNER_TYPE LIKE 'BYTES%' THEN -2 + WHEN SPANNER_TYPE = 'DATE' THEN 91 + WHEN SPANNER_TYPE = 'FLOAT64' THEN 8 + WHEN SPANNER_TYPE = 'FLOAT32' THEN 7 + WHEN SPANNER_TYPE = 'INT64' THEN -5 + WHEN SPANNER_TYPE = 'NUMERIC' THEN 2 + WHEN SPANNER_TYPE LIKE 'STRING%' THEN -9 + WHEN SPANNER_TYPE = 'JSON' THEN -9 + WHEN SPANNER_TYPE = 'TIMESTAMP' THEN 93 + WHEN SPANNER_TYPE = 'UUID' THEN 1111 + ELSE 1111 + END AS DATA_TYPE, + SPANNER_TYPE AS TYPE_NAME, + CASE + WHEN STRPOS(SPANNER_TYPE, '(')=0 THEN + CASE + WHEN SPANNER_TYPE = 'INT64' OR SPANNER_TYPE = 'ARRAY' THEN 19 + WHEN SPANNER_TYPE = 'NUMERIC' OR SPANNER_TYPE = 'ARRAY' THEN 15 + WHEN SPANNER_TYPE = 'FLOAT64' OR SPANNER_TYPE = 'ARRAY' THEN 15 + WHEN SPANNER_TYPE = 'FLOAT32' OR SPANNER_TYPE = 'ARRAY' THEN 15 + WHEN SPANNER_TYPE = 'BOOL' OR SPANNER_TYPE = 'ARRAY' THEN NULL + WHEN SPANNER_TYPE = 'DATE' OR SPANNER_TYPE = 'ARRAY' THEN 10 + WHEN SPANNER_TYPE = 'TIMESTAMP' OR SPANNER_TYPE = 'ARRAY' THEN 35 + WHEN SPANNER_TYPE = 'JSON' OR SPANNER_TYPE = 'ARRAY' THEN 2621440 + ELSE 0 + END + ELSE CAST(REPLACE(SUBSTR(SPANNER_TYPE, STRPOS(SPANNER_TYPE, '(')+1, STRPOS(SPANNER_TYPE, ')')-STRPOS(SPANNER_TYPE, '(')-1), 'MAX', CASE WHEN UPPER(SPANNER_TYPE) LIKE '%STRING%' THEN '2621440' ELSE '10485760' END) AS INT64) + END AS COLUMN_SIZE, + 0 AS BUFFER_LENGTH, + CASE + WHEN SPANNER_TYPE LIKE '%FLOAT64%' THEN 16 + WHEN SPANNER_TYPE LIKE '%FLOAT32%' THEN 16 + ELSE NULL + END AS DECIMAL_DIGITS, + CASE + WHEN SPANNER_TYPE LIKE '%INT64%' THEN 10 + WHEN SPANNER_TYPE LIKE '%NUMERIC%' THEN 10 + WHEN SPANNER_TYPE LIKE '%FLOAT64%' THEN 2 + WHEN SPANNER_TYPE LIKE '%FLOAT32%' THEN 2 + ELSE NULL + END AS NUM_PREC_RADIX, + CASE + WHEN IS_NULLABLE = 'YES' THEN 1 + WHEN IS_NULLABLE = 'NO' THEN 0 + ELSE 2 + END AS NULLABLE, + NULL AS REMARKS, + COLUMN_DEFAULT AS COLUMN_DEF, + 0 AS SQL_DATA_TYPE, + 0 AS SQL_DATETIME_SUB, + CASE + WHEN (SPANNER_TYPE LIKE 'STRING%' OR SPANNER_TYPE LIKE 'ARRAY supported = Sets.newHashSet(ResultSet.FETCH_FORWARD); + for (int direction : + new int[] {ResultSet.FETCH_FORWARD, ResultSet.FETCH_REVERSE, ResultSet.FETCH_UNKNOWN}) { + try { + rs.setFetchDirection(direction); + assertTrue(supported.contains(direction)); + } catch (JdbcSqlExceptionImpl e) { + assertFalse(supported.contains(direction)); + assertEquals(Code.INVALID_ARGUMENT, e.getCode()); + } + } + } + + @Test + public void testUnsupportedFeatures() { + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.getCursorName()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.isLast()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.beforeFirst()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.afterLast()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.first()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.last()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.absolute(1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.relative(1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.previous()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateNull(1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateNull("test")); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateBoolean(1, Boolean.TRUE)); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBoolean("test", Boolean.TRUE)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateByte(1, (byte) 1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateByte("test", (byte) 1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateShort(1, (short) 1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateShort("test", (short) 1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateInt(1, 1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateInt("test", 1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateLong(1, 1L)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateLong("test", 1L)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateFloat(1, 1F)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateFloat("test", 1F)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateDouble(1, 1D)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateDouble("test", 1D)); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBigDecimal(1, BigDecimal.ONE)); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBigDecimal("test", BigDecimal.ONE)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateString(1, "value")); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateString("test", "value")); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBytes(1, "value".getBytes())); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBytes("test", "value".getBytes())); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateDate(1, new Date(System.currentTimeMillis()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateDate("test", new Date(System.currentTimeMillis()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateTimestamp(1, new Timestamp(System.currentTimeMillis()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateTimestamp("test", new Timestamp(System.currentTimeMillis()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateTime(1, new Time(System.currentTimeMillis()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateTime("test", new Time(System.currentTimeMillis()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateAsciiStream(1, new ByteArrayInputStream("value".getBytes()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateAsciiStream("test", new ByteArrayInputStream("value".getBytes()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateAsciiStream(1, new ByteArrayInputStream("value".getBytes()), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateAsciiStream("test", new ByteArrayInputStream("value".getBytes()), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateAsciiStream(1, new ByteArrayInputStream("value".getBytes()), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateAsciiStream("test", new ByteArrayInputStream("value".getBytes()), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBinaryStream(1, new ByteArrayInputStream("value".getBytes()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBinaryStream("test", new ByteArrayInputStream("value".getBytes()))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBinaryStream(1, new ByteArrayInputStream("value".getBytes()), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBinaryStream("test", new ByteArrayInputStream("value".getBytes()), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBinaryStream(1, new ByteArrayInputStream("value".getBytes()), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBinaryStream("test", new ByteArrayInputStream("value".getBytes()), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateCharacterStream(1, new StringReader("value"))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateCharacterStream("test", new StringReader("value"))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateCharacterStream(1, new StringReader("value"), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateCharacterStream("test", new StringReader("value"), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateCharacterStream(1, new StringReader("value"), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateCharacterStream("test", new StringReader("value"), 1L)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateObject(1, new Object())); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateObject(1, new Object(), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateObject(1, new Object(), mock(SQLType.class))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateObject(1, new Object(), mock(SQLType.class), 0)); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateObject("test", new Object())); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateObject("test", new Object(), 1)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateObject("test", new Object(), mock(SQLType.class))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateObject("test", new Object(), mock(SQLType.class), 1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.insertRow()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateRow()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.deleteRow()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.refreshRow()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.cancelRowUpdates()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.moveToInsertRow()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.moveToCurrentRow()); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.getRef(1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.getRef("test")); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateRef(1, mock(Ref.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateRef("test", mock(Ref.class))); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateBlob(1, mock(Blob.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBlob("test", mock(Blob.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBlob(1, mock(InputStream.class))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBlob("test", mock(InputStream.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateBlob(1, mock(InputStream.class), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateBlob("test", mock(InputStream.class), 1L)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateClob(1, mock(Clob.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateClob("test", mock(Clob.class))); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateClob(1, mock(Reader.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateClob("test", mock(Reader.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateClob(1, mock(Reader.class), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateClob("test", mock(Reader.class), 1L)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateArray(1, mock(Array.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateArray("test", mock(Array.class))); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.getRowId(1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.getRowId("test")); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateRowId(1, mock(RowId.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateRowId("test", mock(RowId.class))); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateNString(1, "value")); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateNString("test", "value")); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.updateNClob(1, mock(NClob.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateNClob("test", mock(NClob.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateNClob(1, mock(Reader.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateNClob("test", mock(Reader.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateNClob(1, mock(Reader.class), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateNClob("test", mock(Reader.class), 1L)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.getSQLXML(1)); + assertThrows(SQLFeatureNotSupportedException.class, () -> rs.getSQLXML("test")); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateSQLXML(1, mock(SQLXML.class))); + assertThrows( + SQLFeatureNotSupportedException.class, () -> rs.updateSQLXML("test", mock(SQLXML.class))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateNCharacterStream(1, new StringReader("value"))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateNCharacterStream("test", new StringReader("value"))); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateNCharacterStream(1, new StringReader("value"), 1L)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> rs.updateNCharacterStream("test", new StringReader("value"), 1L)); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AbstractJdbcWrapperTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AbstractJdbcWrapperTest.java new file mode 100644 index 000000000000..f381d56586d1 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AbstractJdbcWrapperTest.java @@ -0,0 +1,525 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.AbstractJdbcWrapper.getSpannerColumnTypeName; +import static com.google.cloud.spanner.jdbc.AbstractJdbcWrapper.getSpannerTypeName; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Type; +import com.google.rpc.Code; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.sql.Date; +import java.sql.SQLException; +import java.sql.Time; +import java.util.Calendar; +import java.util.TimeZone; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AbstractJdbcWrapperTest { + /** Create a concrete sub class to use for testing. */ + private static class TestWrapper extends AbstractJdbcWrapper { + @Override + public boolean isClosed() { + return false; + } + } + + /** Add a sub class for the test class for testing wrapping. */ + private static class SubTestWrapper extends TestWrapper {} + + @Test + public void testIsWrapperFor() { + TestWrapper subject = new TestWrapper(); + assertThat(subject.isWrapperFor(TestWrapper.class)).isTrue(); + assertThat(subject.isWrapperFor(SubTestWrapper.class)).isFalse(); + assertThat(subject.isWrapperFor(Object.class)).isTrue(); + assertThat(subject.isWrapperFor(getClass())).isFalse(); + + subject = new SubTestWrapper(); + assertThat(subject.isWrapperFor(TestWrapper.class)).isTrue(); + assertThat(subject.isWrapperFor(SubTestWrapper.class)).isTrue(); + assertThat(subject.isWrapperFor(Object.class)).isTrue(); + assertThat(subject.isWrapperFor(getClass())).isFalse(); + } + + @Test + public void testUnwrap() { + TestWrapper subject = new TestWrapper(); + assertThat(unwrapSucceeds(subject, TestWrapper.class)).isTrue(); + assertThat(unwrapSucceeds(subject, SubTestWrapper.class)).isFalse(); + assertThat(unwrapSucceeds(subject, Object.class)).isTrue(); + assertThat(unwrapSucceeds(subject, getClass())).isFalse(); + } + + @FunctionalInterface + private interface SqlFunction { + R apply(T value) throws SQLException; + } + + private static final class CheckedCastChecker { + + private final SqlFunction checker; + + public CheckedCastChecker(SqlFunction checker) { + this.checker = checker; + } + + public boolean cast(T value) { + try { + checker.apply(value); + return true; + } catch (SQLException e) { + return false; + } + } + } + + @Test + public void testCheckedCastToByte() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToByte); + assertThat(checker.cast(0L)).isTrue(); + assertThat(checker.cast(1L)).isTrue(); + assertThat(checker.cast((long) Byte.MAX_VALUE)).isTrue(); + assertThat(checker.cast((long) Byte.MAX_VALUE + 1L)).isFalse(); + assertThat(checker.cast(Long.MAX_VALUE)).isFalse(); + assertThat(checker.cast(-1L)).isTrue(); + assertThat(checker.cast((long) Byte.MIN_VALUE)).isTrue(); + assertThat(checker.cast((long) Byte.MIN_VALUE - 1L)).isFalse(); + assertThat(checker.cast(Long.MIN_VALUE)).isFalse(); + } + + @Test + public void testCheckedCastFromBigDecimalToByte() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToByte); + assertTrue(checker.cast(BigDecimal.ZERO)); + assertTrue(checker.cast(BigDecimal.ONE)); + assertTrue(checker.cast(BigDecimal.valueOf(-1))); + assertTrue(checker.cast(BigDecimal.valueOf(Byte.MIN_VALUE))); + assertTrue(checker.cast(BigDecimal.valueOf(Byte.MAX_VALUE))); + + assertFalse(checker.cast(BigDecimal.valueOf(Byte.MAX_VALUE).add(BigDecimal.ONE))); + assertFalse(checker.cast(BigDecimal.valueOf(Byte.MIN_VALUE).subtract(BigDecimal.ONE))); + } + + @Test + public void testCheckedCastFromBigIntegerToByte() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToByte); + assertTrue(checker.cast(BigInteger.ZERO)); + assertTrue(checker.cast(BigInteger.ONE)); + assertTrue(checker.cast(BigInteger.valueOf(-1))); + assertTrue(checker.cast(BigInteger.valueOf(Byte.MIN_VALUE))); + assertTrue(checker.cast(BigInteger.valueOf(Byte.MAX_VALUE))); + + assertFalse(checker.cast(BigInteger.valueOf(Byte.MAX_VALUE).add(BigInteger.ONE))); + assertFalse(checker.cast(BigInteger.valueOf(Byte.MIN_VALUE).subtract(BigInteger.ONE))); + } + + @Test + public void testCheckedCastToShort() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToShort); + assertThat(checker.cast(0L)).isTrue(); + assertThat(checker.cast(1L)).isTrue(); + assertThat(checker.cast((long) Short.MAX_VALUE)).isTrue(); + assertThat(checker.cast((long) Short.MAX_VALUE + 1L)).isFalse(); + assertThat(checker.cast(Long.MAX_VALUE)).isFalse(); + assertThat(checker.cast(-1L)).isTrue(); + assertThat(checker.cast((long) Short.MIN_VALUE)).isTrue(); + assertThat(checker.cast((long) Short.MIN_VALUE - 1L)).isFalse(); + assertThat(checker.cast(Long.MIN_VALUE)).isFalse(); + } + + @Test + public void testCheckedCastFromBigDecimalToShort() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToShort); + assertTrue(checker.cast(BigDecimal.ZERO)); + assertTrue(checker.cast(BigDecimal.ONE)); + assertTrue(checker.cast(BigDecimal.valueOf(-1))); + assertTrue(checker.cast(BigDecimal.valueOf(Short.MIN_VALUE))); + assertTrue(checker.cast(BigDecimal.valueOf(Short.MAX_VALUE))); + + assertFalse(checker.cast(BigDecimal.valueOf(Short.MAX_VALUE).add(BigDecimal.ONE))); + assertFalse(checker.cast(BigDecimal.valueOf(Short.MIN_VALUE).subtract(BigDecimal.ONE))); + } + + @Test + public void testCheckedCastFromBigIntegerToShort() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToShort); + assertTrue(checker.cast(BigInteger.ZERO)); + assertTrue(checker.cast(BigInteger.ONE)); + assertTrue(checker.cast(BigInteger.valueOf(-1))); + assertTrue(checker.cast(BigInteger.valueOf(Short.MIN_VALUE))); + assertTrue(checker.cast(BigInteger.valueOf(Short.MAX_VALUE))); + + assertFalse(checker.cast(BigInteger.valueOf(Short.MAX_VALUE).add(BigInteger.ONE))); + assertFalse(checker.cast(BigInteger.valueOf(Short.MIN_VALUE).subtract(BigInteger.ONE))); + } + + @Test + public void testCheckedCastToInt() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToInt); + assertThat(checker.cast(0L)).isTrue(); + assertThat(checker.cast(1L)).isTrue(); + assertThat(checker.cast((long) Integer.MAX_VALUE)).isTrue(); + assertThat(checker.cast((long) Integer.MAX_VALUE + 1L)).isFalse(); + assertThat(checker.cast(Long.MAX_VALUE)).isFalse(); + assertThat(checker.cast(-1L)).isTrue(); + assertThat(checker.cast((long) Integer.MIN_VALUE)).isTrue(); + assertThat(checker.cast((long) Integer.MIN_VALUE - 1L)).isFalse(); + assertThat(checker.cast(Long.MIN_VALUE)).isFalse(); + } + + @Test + public void testCheckedCastFromBigDecimalToInt() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToInt); + assertTrue(checker.cast(BigDecimal.ZERO)); + assertTrue(checker.cast(BigDecimal.ONE)); + assertTrue(checker.cast(BigDecimal.valueOf(-1))); + assertTrue(checker.cast(BigDecimal.valueOf(Integer.MIN_VALUE))); + assertTrue(checker.cast(BigDecimal.valueOf(Integer.MAX_VALUE))); + + assertFalse(checker.cast(BigDecimal.valueOf(Integer.MAX_VALUE).add(BigDecimal.ONE))); + assertFalse(checker.cast(BigDecimal.valueOf(Integer.MIN_VALUE).subtract(BigDecimal.ONE))); + } + + @Test + public void testCheckedCastFromBigIntegerToInt() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToInt); + assertTrue(checker.cast(BigInteger.ZERO)); + assertTrue(checker.cast(BigInteger.ONE)); + assertTrue(checker.cast(BigInteger.valueOf(-1))); + assertTrue(checker.cast(BigInteger.valueOf(Integer.MIN_VALUE))); + assertTrue(checker.cast(BigInteger.valueOf(Integer.MAX_VALUE))); + + assertFalse(checker.cast(BigInteger.valueOf(Integer.MAX_VALUE).add(BigInteger.ONE))); + assertFalse(checker.cast(BigInteger.valueOf(Integer.MIN_VALUE).subtract(BigInteger.ONE))); + } + + @Test + public void testCheckedCastFromBigDecimalToLong() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToLong); + assertTrue(checker.cast(BigDecimal.ZERO)); + assertTrue(checker.cast(BigDecimal.ONE)); + assertTrue(checker.cast(BigDecimal.valueOf(-1))); + assertTrue(checker.cast(BigDecimal.valueOf(Long.MIN_VALUE))); + assertTrue(checker.cast(BigDecimal.valueOf(Long.MAX_VALUE))); + + assertFalse(checker.cast(BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE))); + assertFalse(checker.cast(BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE))); + } + + @Test + public void testCheckedCastFromBigIntegerToLong() { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToLong); + assertTrue(checker.cast(BigInteger.ZERO)); + assertTrue(checker.cast(BigInteger.ONE)); + assertTrue(checker.cast(BigInteger.valueOf(-1))); + assertTrue(checker.cast(BigInteger.valueOf(Long.MIN_VALUE))); + assertTrue(checker.cast(BigInteger.valueOf(Long.MAX_VALUE))); + + assertFalse(checker.cast(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE))); + assertFalse(checker.cast(BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE))); + } + + @Test + public void testCheckedCastToFloat() throws SQLException { + final CheckedCastChecker checker = + new CheckedCastChecker<>(AbstractJdbcWrapper::checkedCastToFloat); + assertThat(checker.cast(0D)).isTrue(); + assertThat(checker.cast(1D)).isTrue(); + assertThat(checker.cast((double) Float.MAX_VALUE)).isTrue(); + assertThat(checker.cast((double) Float.MAX_VALUE * 2.0D)).isFalse(); + assertThat(checker.cast(Double.MAX_VALUE)).isFalse(); + assertThat(checker.cast(-1D)).isTrue(); + assertThat(checker.cast((double) Float.MIN_VALUE)).isTrue(); + assertThat(checker.cast(-Float.MAX_VALUE * 2d)).isFalse(); + assertThat(checker.cast(-Double.MAX_VALUE)).isFalse(); + + assertEquals( + Float.POSITIVE_INFINITY, + AbstractJdbcWrapper.checkedCastToFloat(Double.POSITIVE_INFINITY), + 0.0d); + assertEquals( + Float.NEGATIVE_INFINITY, + AbstractJdbcWrapper.checkedCastToFloat(Double.NEGATIVE_INFINITY), + 0.0d); + assertEquals(Float.NaN, AbstractJdbcWrapper.checkedCastToFloat(Double.NaN), 0.0d); + } + + @Test + public void testParseBigDecimal() throws SQLException { + assertEquals(BigDecimal.valueOf(123, 2), AbstractJdbcWrapper.parseBigDecimal("1.23")); + try { + AbstractJdbcWrapper.parseBigDecimal("NaN"); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT.getNumber(), e.getErrorCode()); + } + } + + @Test + public void testParseFloat() throws SQLException { + assertEquals(3.14F, AbstractJdbcWrapper.parseFloat("3.14"), 0.001F); + try { + AbstractJdbcWrapper.parseFloat("invalid number"); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT.getNumber(), e.getErrorCode()); + } + } + + private boolean unwrapSucceeds(AbstractJdbcWrapper subject, Class iface) { + try { + subject.unwrap(iface); + return true; + } catch (SQLException e) { + return false; + } + } + + @Test + public void testParseDouble() throws SQLException { + assertThat(AbstractJdbcWrapper.parseDouble("3.14")).isEqualTo(3.14D); + try { + AbstractJdbcWrapper.parseDouble("not a number"); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + assertThat(((JdbcSqlException) e).getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testParseDate() throws SQLException { + assertThat(AbstractJdbcWrapper.parseDate("2020-06-01")).isEqualTo(new Date(2020 - 1900, 5, 1)); + try { + AbstractJdbcWrapper.parseDate("01-06-2020"); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + assertThat(((JdbcSqlException) e).getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @Test + public void testParseDateWithCalendar() throws SQLException { + assertThat( + AbstractJdbcWrapper.parseDate( + "2020-06-01", Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles")))) + .isEqualTo( + new Date( + Timestamp.parseTimestamp("2020-06-01T00:00:00-07:00").toSqlTimestamp().getTime())); + assertThat( + AbstractJdbcWrapper.parseDate( + "2020-06-01", Calendar.getInstance(TimeZone.getTimeZone("Europe/Amsterdam")))) + .isEqualTo( + new Date( + Timestamp.parseTimestamp("2020-06-01T00:00:00+02:00").toSqlTimestamp().getTime())); + try { + AbstractJdbcWrapper.parseDate("01-06-2020", Calendar.getInstance()); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + assertThat(((JdbcSqlException) e).getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testParseTime() throws SQLException { + assertThat(AbstractJdbcWrapper.parseTime("10:31:05")).isEqualTo(new Time(10, 31, 5)); + try { + AbstractJdbcWrapper.parseTime("10.31.05"); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + assertThat(((JdbcSqlException) e).getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @Test + public void testParseTimeWithCalendar() throws SQLException { + assertThat( + AbstractJdbcWrapper.parseTime( + "10:31:05", Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles")))) + .isEqualTo( + new Time( + Timestamp.parseTimestamp("1970-01-01T10:31:05-08:00").toSqlTimestamp().getTime())); + assertThat( + AbstractJdbcWrapper.parseTime( + "10:31:05", Calendar.getInstance(TimeZone.getTimeZone("Pacific/Auckland")))) + .isEqualTo( + new Time( + Timestamp.parseTimestamp("1970-01-01T10:31:05+12:00").toSqlTimestamp().getTime())); + try { + AbstractJdbcWrapper.parseTime("10.31.05", Calendar.getInstance()); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + assertThat(((JdbcSqlException) e).getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @Test + public void testParseTimestamp() throws SQLException { + assertThat(AbstractJdbcWrapper.parseTimestamp("2020-06-01T10:31:05Z")) + .isEqualTo(Timestamp.parseTimestamp("2020-06-01T10:31:05Z").toSqlTimestamp()); + assertThat(AbstractJdbcWrapper.parseTimestamp("2020-06-01T10:31:05.123Z")) + .isEqualTo(Timestamp.parseTimestamp("2020-06-01T10:31:05.123Z").toSqlTimestamp()); + assertThat(AbstractJdbcWrapper.parseTimestamp("2020-06-01T10:31Z")) + .isEqualTo(Timestamp.parseTimestamp("2020-06-01T10:31:00Z").toSqlTimestamp()); + assertThat(AbstractJdbcWrapper.parseTimestamp("2020-06-01T10:31")) + .isEqualTo(Timestamp.parseTimestamp("2020-06-01T10:31:00Z").toSqlTimestamp()); + assertThat(AbstractJdbcWrapper.parseTimestamp("1970-01-01T00:00:00Z")) + .isEqualTo(Timestamp.ofTimeMicroseconds(0L).toSqlTimestamp()); + try { + AbstractJdbcWrapper.parseTimestamp("2020-06-01T10"); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + assertThat(((JdbcSqlException) e).getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @Test + public void testParseTimestampWithCalendar() throws SQLException { + assertThat( + AbstractJdbcWrapper.parseTimestamp( + "2020-02-01T10:31:05Z", + Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles")))) + .isEqualTo(Timestamp.parseTimestamp("2020-02-01T10:31:05-08:00").toSqlTimestamp()); + assertThat( + AbstractJdbcWrapper.parseTimestamp( + "2020-06-01T10:31:05Z", + Calendar.getInstance(TimeZone.getTimeZone("Europe/Amsterdam")))) + .isEqualTo(Timestamp.parseTimestamp("2020-06-01T10:31:05+02:00").toSqlTimestamp()); + try { + AbstractJdbcWrapper.parseTimestamp( + "2020-06-01T10", Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles"))); + fail("missing expected SQLException"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + assertThat(((JdbcSqlException) e).getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @Test + public void testGoogleSQLTypeNames() { + assertEquals("INT64", getSpannerTypeName(Type.int64(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("BOOL", getSpannerTypeName(Type.bool(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("FLOAT64", getSpannerTypeName(Type.float64(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("STRING", getSpannerTypeName(Type.string(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("BYTES", getSpannerTypeName(Type.bytes(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("DATE", getSpannerTypeName(Type.date(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("TIMESTAMP", getSpannerTypeName(Type.timestamp(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("JSON", getSpannerTypeName(Type.json(), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals("NUMERIC", getSpannerTypeName(Type.numeric(), Dialect.GOOGLE_STANDARD_SQL)); + + assertEquals( + "ARRAY", getSpannerTypeName(Type.array(Type.int64()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", getSpannerTypeName(Type.array(Type.bool()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + getSpannerTypeName(Type.array(Type.float64()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + getSpannerTypeName(Type.array(Type.string()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", getSpannerTypeName(Type.array(Type.bytes()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", getSpannerTypeName(Type.array(Type.date()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + getSpannerTypeName(Type.array(Type.timestamp()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", getSpannerTypeName(Type.array(Type.json()), Dialect.GOOGLE_STANDARD_SQL)); + assertEquals( + "ARRAY", + getSpannerTypeName(Type.array(Type.numeric()), Dialect.GOOGLE_STANDARD_SQL)); + } + + @Test + public void testPostgreSQLTypeNames() { + assertEquals("bigint", getSpannerTypeName(Type.int64(), Dialect.POSTGRESQL)); + assertEquals("boolean", getSpannerTypeName(Type.bool(), Dialect.POSTGRESQL)); + assertEquals("double precision", getSpannerTypeName(Type.float64(), Dialect.POSTGRESQL)); + assertEquals("character varying", getSpannerTypeName(Type.string(), Dialect.POSTGRESQL)); + assertEquals("bytea", getSpannerTypeName(Type.bytes(), Dialect.POSTGRESQL)); + assertEquals("date", getSpannerTypeName(Type.date(), Dialect.POSTGRESQL)); + assertEquals( + "timestamp with time zone", getSpannerTypeName(Type.timestamp(), Dialect.POSTGRESQL)); + assertEquals("jsonb", getSpannerTypeName(Type.pgJsonb(), Dialect.POSTGRESQL)); + assertEquals("numeric", getSpannerTypeName(Type.pgNumeric(), Dialect.POSTGRESQL)); + + assertEquals("bigint[]", getSpannerTypeName(Type.array(Type.int64()), Dialect.POSTGRESQL)); + assertEquals("boolean[]", getSpannerTypeName(Type.array(Type.bool()), Dialect.POSTGRESQL)); + assertEquals( + "double precision[]", getSpannerTypeName(Type.array(Type.float64()), Dialect.POSTGRESQL)); + assertEquals( + "character varying[]", getSpannerTypeName(Type.array(Type.string()), Dialect.POSTGRESQL)); + assertEquals("bytea[]", getSpannerTypeName(Type.array(Type.bytes()), Dialect.POSTGRESQL)); + assertEquals("date[]", getSpannerTypeName(Type.array(Type.date()), Dialect.POSTGRESQL)); + assertEquals( + "timestamp with time zone[]", + getSpannerTypeName(Type.array(Type.timestamp()), Dialect.POSTGRESQL)); + assertEquals("jsonb[]", getSpannerTypeName(Type.array(Type.pgJsonb()), Dialect.POSTGRESQL)); + assertEquals("numeric[]", getSpannerTypeName(Type.array(Type.pgNumeric()), Dialect.POSTGRESQL)); + + assertEquals("_bigint", getSpannerColumnTypeName(Type.array(Type.int64()), Dialect.POSTGRESQL)); + assertEquals("_boolean", getSpannerColumnTypeName(Type.array(Type.bool()), Dialect.POSTGRESQL)); + assertEquals( + "_double precision", + getSpannerColumnTypeName(Type.array(Type.float64()), Dialect.POSTGRESQL)); + assertEquals( + "_character varying", + getSpannerColumnTypeName(Type.array(Type.string()), Dialect.POSTGRESQL)); + assertEquals("_bytea", getSpannerColumnTypeName(Type.array(Type.bytes()), Dialect.POSTGRESQL)); + assertEquals("_date", getSpannerColumnTypeName(Type.array(Type.date()), Dialect.POSTGRESQL)); + assertEquals( + "_timestamp with time zone", + getSpannerColumnTypeName(Type.array(Type.timestamp()), Dialect.POSTGRESQL)); + assertEquals( + "_jsonb", getSpannerColumnTypeName(Type.array(Type.pgJsonb()), Dialect.POSTGRESQL)); + assertEquals( + "_numeric", getSpannerColumnTypeName(Type.array(Type.pgNumeric()), Dialect.POSTGRESQL)); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AllTypesMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AllTypesMockServerTest.java new file mode 100644 index 000000000000..7cb9729dc9f3 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AllTypesMockServerTest.java @@ -0,0 +1,423 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Value; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.Date; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.Base64; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +@RunWith(Parameterized.class) +public class AllTypesMockServerTest + extends com.google.cloud.spanner.connection.AllTypesMockServerTest { + + @Override + @Test + public void testSelectAllTypes() { + try (Connection connection = createJdbcConnection()) { + try (ResultSet resultSet = + connection.createStatement().executeQuery(SELECT_STATEMENT.getSql())) { + assertTrue(resultSet.next()); + + int col = 0; + assertEquals(BOOL_VALUE, resultSet.getBoolean(++col)); + assertEquals(INT64_VALUE, resultSet.getLong(++col)); + assertEquals(FLOAT32_VALUE, resultSet.getFloat(++col), 0.0f); + assertEquals(FLOAT64_VALUE, resultSet.getDouble(++col), 0.0d); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(PG_NUMERIC_VALUE, resultSet.getString(++col)); + } else { + assertEquals(NUMERIC_VALUE, resultSet.getBigDecimal(++col)); + } + assertEquals(STRING_VALUE, resultSet.getString(++col)); + assertEquals(JSON_VALUE, resultSet.getString(++col)); + assertArrayEquals(BYTES_VALUE, resultSet.getBytes(++col)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + resultSet.getDate(++col)); + assertEquals(UUID_VALUE, resultSet.getObject(++col, UUID.class)); + assertEquals(TIMESTAMP_VALUE.toSqlTimestamp(), resultSet.getTimestamp(++col)); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(PG_OID_VALUE, resultSet.getLong(++col)); + } + + assertEquals( + BOOL_ARRAY_VALUE, Arrays.asList((Boolean[]) resultSet.getArray(++col).getArray())); + assertEquals( + INT64_ARRAY_VALUE, Arrays.asList((Long[]) resultSet.getArray(++col).getArray())); + assertEquals( + FLOAT32_ARRAY_VALUE, Arrays.asList((Float[]) resultSet.getArray(++col).getArray())); + assertEquals( + FLOAT64_ARRAY_VALUE, Arrays.asList((Double[]) resultSet.getArray(++col).getArray())); + if (dialect == Dialect.POSTGRESQL) { + // TODO: Fix getting an ARRAY. This currently tries to cast it to a + // BigDecimal[], which is not supported for NaN values. + // assertEquals(PG_NUMERIC_ARRAY_VALUE, Arrays.asList((String[]) + // resultSet.getArray(++col).getArray())); + ++col; + } else { + assertEquals( + NUMERIC_ARRAY_VALUE, + Arrays.asList((BigDecimal[]) resultSet.getArray(++col).getArray())); + } + assertEquals( + STRING_ARRAY_VALUE, Arrays.asList((String[]) resultSet.getArray(++col).getArray())); + assertEquals( + JSON_ARRAY_VALUE, Arrays.asList((String[]) resultSet.getArray(++col).getArray())); + // Convert bytes to base64 strings, as the equals(..) method for byte[] uses ==. + assertEquals( + BYTES_ARRAY_VALUE.stream() + .map(bytes -> bytes == null ? null : bytes.toBase64()) + .collect(Collectors.toList()), + Arrays.stream((byte[][]) resultSet.getArray(++col).getArray()) + .map(bytes -> bytes == null ? null : Base64.getEncoder().encodeToString(bytes)) + .collect(Collectors.toList())); + assertEquals( + DATE_ARRAY_VALUE.stream() + .map( + date -> + date == null + ? null + : new Date( + date.getYear() - 1900, date.getMonth() - 1, date.getDayOfMonth())) + .collect(Collectors.toList()), + Arrays.asList((Date[]) resultSet.getArray(++col).getArray())); + assertEquals( + UUID_ARRAY_VALUE, Arrays.asList((UUID[]) resultSet.getArray(++col).getArray())); + assertEquals( + TIMESTAMP_ARRAY_VALUE.stream() + .map(timestamp -> timestamp == null ? null : timestamp.toSqlTimestamp()) + .collect(Collectors.toList()), + Arrays.asList((Timestamp[]) resultSet.getArray(++col).getArray())); + if (dialect == Dialect.POSTGRESQL) { + assertEquals( + PG_OID_ARRAY_VALUE, Arrays.asList((Long[]) resultSet.getArray(++col).getArray())); + } + + assertFalse(resultSet.next()); + } + } catch (SQLException sqlException) { + throw new RuntimeException(sqlException); + } + } + + @Override + @Test + public void testInsertAllTypes() { + Statement insertStatement = createInsertStatement(dialect); + if (dialect == Dialect.POSTGRESQL) { + // TODO: Remove when PG_NUMERIC NaN is supported. + insertStatement = + insertStatement.toBuilder() + .replace(insertStatement.getSql().replaceAll("@p", "\\$")) + .bind("p17") + .to( + com.google.cloud.spanner.Value.pgNumericArray( + NUMERIC_ARRAY_VALUE.stream() + .map( + bigDecimal -> + bigDecimal == null ? null : bigDecimal.toEngineeringString()) + .collect(Collectors.toList()))) + .build(); + } + // The JDBC driver binds UUID values as untyped strings, so we need to add it as 'partial' + // result, meaning that the match will only be made based on the SQL string. + mockSpanner.putPartialStatementResult(StatementResult.update(insertStatement, 1L)); + try (Connection connection = createJdbcConnection()) { + try (PreparedStatement statement = + connection.prepareStatement( + createInsertStatement(dialect).getSql().replaceAll("@p\\d+", "?"))) { + int param = 0; + statement.setBoolean(++param, BOOL_VALUE); + statement.setLong(++param, INT64_VALUE); + statement.setFloat(++param, FLOAT32_VALUE); + statement.setDouble(++param, FLOAT64_VALUE); + if (dialect == Dialect.POSTGRESQL) { + statement.setBigDecimal(++param, new BigDecimal(PG_NUMERIC_VALUE)); + } else { + statement.setBigDecimal(++param, NUMERIC_VALUE); + } + statement.setString(++param, STRING_VALUE); + if (dialect == Dialect.POSTGRESQL) { + statement.setObject(++param, JSON_VALUE, JdbcDataType.PG_JSONB.getSqlType()); + } else { + statement.setObject(++param, JSON_VALUE, JdbcDataType.JSON.getSqlType()); + } + statement.setBytes(++param, BYTES_VALUE); + statement.setDate( + ++param, + new Date( + DATE_VALUE.getYear() - 1900, + DATE_VALUE.getMonth() - 1, + DATE_VALUE.getDayOfMonth())); + statement.setObject(++param, UUID_VALUE); + statement.setTimestamp(++param, TIMESTAMP_VALUE.toSqlTimestamp()); + if (dialect == Dialect.POSTGRESQL) { + statement.setLong(++param, PG_OID_VALUE); + } + + // TODO: Support PostgreSQL type names for creating arrays. + statement.setArray( + ++param, connection.createArrayOf("BOOL", BOOL_ARRAY_VALUE.toArray(new Boolean[0]))); + statement.setArray( + ++param, connection.createArrayOf("INT64", INT64_ARRAY_VALUE.toArray(new Long[0]))); + statement.setArray( + ++param, + connection.createArrayOf("FLOAT32", FLOAT32_ARRAY_VALUE.toArray(new Float[0]))); + statement.setArray( + ++param, + connection.createArrayOf("FLOAT64", FLOAT64_ARRAY_VALUE.toArray(new Double[0]))); + + // TODO: Make dialect-specific when NaN in arrays for PG_NUMERIC is supported. + // statement.setArray( + // ++param, + // connection.createArrayOf( + // "NUMERIC", + // dialect == Dialect.POSTGRESQL + // ? PG_NUMERIC_ARRAY_VALUE.toArray(new String[0]) + // : NUMERIC_ARRAY_VALUE.toArray(new BigDecimal[0]))); + statement.setArray( + ++param, + connection.createArrayOf( + dialect == Dialect.POSTGRESQL ? "PG_NUMERIC" : "NUMERIC", + NUMERIC_ARRAY_VALUE.toArray(new BigDecimal[0]))); + + statement.setArray( + ++param, connection.createArrayOf("STRING", STRING_ARRAY_VALUE.toArray(new String[0]))); + statement.setArray( + ++param, + connection.createArrayOf( + dialect == Dialect.POSTGRESQL ? "JSONB" : "JSON", + JSON_ARRAY_VALUE.toArray(new String[0]))); + statement.setArray( + ++param, + connection.createArrayOf( + "BYTES", + BYTES_ARRAY_VALUE.stream() + .map(bytes -> bytes == null ? null : bytes.toByteArray()) + .toArray(byte[][]::new))); + statement.setArray( + ++param, + connection.createArrayOf( + "DATE", + DATE_ARRAY_VALUE.stream() + .map( + date -> + date == null + ? null + : new Date( + date.getYear() - 1900, + date.getMonth() - 1, + date.getDayOfMonth())) + .toArray(Date[]::new))); + statement.setArray( + ++param, connection.createArrayOf("UUID", UUID_ARRAY_VALUE.toArray(new UUID[0]))); + statement.setArray( + ++param, + connection.createArrayOf( + "TIMESTAMP", + TIMESTAMP_ARRAY_VALUE.stream() + .map(timestamp -> timestamp == null ? null : timestamp.toSqlTimestamp()) + .toArray(Timestamp[]::new))); + if (dialect == Dialect.POSTGRESQL) { + statement.setArray( + ++param, connection.createArrayOf("INT64", PG_OID_ARRAY_VALUE.toArray(new Long[0]))); + } + + assertEquals(1, statement.executeUpdate()); + } + + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + Map paramTypes = request.getParamTypesMap(); + Map params = request.getParams().getFieldsMap(); + // UUID is sent without any type information to allow it to be used with any type of column + // that accepts a STRING value. + assertEquals(dialect == Dialect.POSTGRESQL ? 23 : 21, paramTypes.size()); + assertEquals(dialect == Dialect.POSTGRESQL ? 24 : 22, params.size()); + + // Verify param types. + ImmutableList expectedTypes = + ImmutableList.of( + TypeCode.BOOL, + TypeCode.INT64, + TypeCode.FLOAT32, + TypeCode.FLOAT64, + TypeCode.NUMERIC, + TypeCode.STRING, + TypeCode.JSON, + TypeCode.BYTES, + TypeCode.DATE, + TypeCode.TYPE_CODE_UNSPECIFIED, // UUID + TypeCode.TIMESTAMP); + if (dialect == Dialect.POSTGRESQL) { + expectedTypes = + ImmutableList.builder().addAll(expectedTypes).add(TypeCode.INT64).build(); + } + for (int col = 0; col < expectedTypes.size(); col++) { + TypeCode expectedType = expectedTypes.get(col); + if (expectedType == TypeCode.TYPE_CODE_UNSPECIFIED) { + assertFalse(paramTypes.containsKey("p" + (col + 1))); + } else { + assertEquals(expectedType, paramTypes.get("p" + (col + 1)).getCode()); + } + int arrayCol = col + expectedTypes.size(); + assertEquals(TypeCode.ARRAY, paramTypes.get("p" + (arrayCol + 1)).getCode()); + if (expectedType != TypeCode.TYPE_CODE_UNSPECIFIED) { + assertEquals( + expectedType, paramTypes.get("p" + (arrayCol + 1)).getArrayElementType().getCode()); + } + } + + // Verify param values. + int col = 0; + assertEquals(BOOL_VALUE, params.get("p" + ++col).getBoolValue()); + assertEquals(String.valueOf(INT64_VALUE), params.get("p" + ++col).getStringValue()); + assertEquals(FLOAT32_VALUE, params.get("p" + ++col).getNumberValue(), 0.0d); + assertEquals(FLOAT64_VALUE, params.get("p" + ++col).getNumberValue(), 0.0d); + assertEquals( + dialect == Dialect.POSTGRESQL ? PG_NUMERIC_VALUE : NUMERIC_VALUE.toEngineeringString(), + params.get("p" + ++col).getStringValue()); + assertEquals(STRING_VALUE, params.get("p" + ++col).getStringValue()); + assertEquals(JSON_VALUE, params.get("p" + ++col).getStringValue()); + assertEquals( + Base64.getEncoder().encodeToString(BYTES_VALUE), + params.get("p" + ++col).getStringValue()); + assertEquals(DATE_VALUE.toString(), params.get("p" + ++col).getStringValue()); + assertEquals(UUID_VALUE.toString(), params.get("p" + ++col).getStringValue()); + assertEquals(TIMESTAMP_VALUE.toString(), params.get("p" + ++col).getStringValue()); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(String.valueOf(PG_OID_VALUE), params.get("p" + ++col).getStringValue()); + } + + assertEquals( + BOOL_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getBoolValue()) + .collect(Collectors.toList())); + assertEquals( + INT64_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : Long.valueOf(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + FLOAT32_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : (float) value.getNumberValue()) + .collect(Collectors.toList())); + assertEquals( + FLOAT64_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getNumberValue()) + .collect(Collectors.toList())); + if (dialect == Dialect.POSTGRESQL) { + // TODO: Replace with PG specific value when NaN is supported. + // assertEquals( + // PG_NUMERIC_ARRAY_VALUE, + // params.get("p" + ++col).getListValue().getValuesList().stream() + // .map(value -> value.hasNullValue() ? null : value.getStringValue()) + // .collect(Collectors.toList())); + assertEquals( + NUMERIC_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : new BigDecimal(value.getStringValue())) + .collect(Collectors.toList())); + } else { + assertEquals( + NUMERIC_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : new BigDecimal(value.getStringValue())) + .collect(Collectors.toList())); + } + assertEquals( + STRING_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getStringValue()) + .collect(Collectors.toList())); + assertEquals( + JSON_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : value.getStringValue()) + .collect(Collectors.toList())); + assertEquals( + BYTES_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map( + value -> + value.hasNullValue() ? null : ByteArray.fromBase64(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + DATE_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map( + value -> + value.hasNullValue() + ? null + : com.google.cloud.Date.parseDate(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + UUID_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : UUID.fromString(value.getStringValue())) + .collect(Collectors.toList())); + assertEquals( + TIMESTAMP_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map( + value -> + value.hasNullValue() + ? null + : com.google.cloud.Timestamp.parseTimestamp(value.getStringValue())) + .collect(Collectors.toList())); + if (dialect == Dialect.POSTGRESQL) { + assertEquals( + PG_OID_ARRAY_VALUE, + params.get("p" + ++col).getListValue().getValuesList().stream() + .map(value -> value.hasNullValue() ? null : Long.valueOf(value.getStringValue())) + .collect(Collectors.toList())); + } + } catch (SQLException sqlException) { + throw new RuntimeException(sqlException); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AutoBatchDmlMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AutoBatchDmlMockServerTest.java new file mode 100644 index 000000000000..8ba1a3c0bc8b --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/AutoBatchDmlMockServerTest.java @@ -0,0 +1,332 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.concurrent.ThreadLocalRandom; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class AutoBatchDmlMockServerTest extends AbstractMockServerTest { + private static final String NON_PARAMETERIZED_INSERT = + "insert into foo (id, value) values (1, 'One')"; + private static final String NON_PARAMETERIZED_UPDATE = "update foo set value='Zero' where id=0"; + private static final String PARAMETERIZED_INSERT = + "insert into foo (id, value) values (@p1, @p2)"; + private static final String PARAMETERIZED_UPDATE = "update foo set value=@p1 where id=@p2"; + + @BeforeClass + public static void setup() { + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.of(NON_PARAMETERIZED_INSERT), 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.of(NON_PARAMETERIZED_UPDATE), 1L)); + + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.newBuilder(PARAMETERIZED_INSERT) + .bind("p1") + .to(1L) + .bind("p2") + .to("One") + .build(), + 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.newBuilder(PARAMETERIZED_INSERT) + .bind("p1") + .to(2L) + .bind("p2") + .to("Two") + .build(), + 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.newBuilder(PARAMETERIZED_UPDATE) + .bind("p2") + .to(1L) + .bind("p1") + .to("One") + .build(), + 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.newBuilder(PARAMETERIZED_UPDATE) + .bind("p2") + .to(2L) + .bind("p1") + .to("Two") + .build(), + 1L)); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testStatementExecute() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + try (Statement statement = connection.createStatement()) { + assertFalse(statement.execute(NON_PARAMETERIZED_INSERT)); + assertFalse(statement.execute(NON_PARAMETERIZED_UPDATE)); + } + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testStatementExecuteUpdate() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + try (Statement statement = connection.createStatement()) { + assertEquals(1, statement.executeUpdate(NON_PARAMETERIZED_INSERT)); + assertEquals(1, statement.executeUpdate(NON_PARAMETERIZED_UPDATE)); + } + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testStatementBatch() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + try (Statement statement = connection.createStatement()) { + repeat( + () -> { + statement.addBatch(NON_PARAMETERIZED_INSERT); + statement.addBatch(NON_PARAMETERIZED_UPDATE); + assertArrayEquals(new int[] {1, 1}, statement.executeBatch()); + }, + 2); + } + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testStatementCombination() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + try (Statement statement = connection.createStatement()) { + statement.executeUpdate(NON_PARAMETERIZED_UPDATE); + repeat( + () -> { + statement.addBatch(NON_PARAMETERIZED_INSERT); + statement.addBatch(NON_PARAMETERIZED_UPDATE); + assertArrayEquals(new int[] {1, 1}, statement.executeBatch()); + }, + ThreadLocalRandom.current().nextInt(1, 5)); + repeat( + () -> { + statement.execute(NON_PARAMETERIZED_INSERT); + statement.executeUpdate(NON_PARAMETERIZED_UPDATE); + }, + ThreadLocalRandom.current().nextInt(1, 5)); + } + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testPreparedStatementExecute() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_INSERT)) { + statement.setLong(1, 1L); + statement.setString(2, "One"); + assertFalse(statement.execute()); + statement.setLong(1, 2L); + statement.setString(2, "Two"); + assertFalse(statement.execute()); + } + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_UPDATE)) { + statement.setLong(2, 1L); + statement.setString(1, "One"); + assertFalse(statement.execute()); + statement.setLong(2, 2L); + statement.setString(1, "Two"); + assertFalse(statement.execute()); + } + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testPreparedStatementExecuteUpdate() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_INSERT)) { + statement.setLong(1, 1L); + statement.setString(2, "One"); + assertEquals(1, statement.executeUpdate()); + statement.setLong(1, 2L); + statement.setString(2, "Two"); + assertEquals(1, statement.executeUpdate()); + } + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_UPDATE)) { + statement.setLong(2, 1L); + statement.setString(1, "One"); + assertEquals(1, statement.executeUpdate()); + statement.setLong(2, 2L); + statement.setString(1, "Two"); + assertEquals(1, statement.executeUpdate()); + } + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testPreparedStatementBatch() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + repeat( + () -> { + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_INSERT)) { + statement.setLong(1, 1L); + statement.setString(2, "One"); + statement.addBatch(); + statement.setLong(1, 2L); + statement.setString(2, "Two"); + statement.addBatch(); + statement.executeBatch(); + } + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_UPDATE)) { + statement.setLong(2, 1L); + statement.setString(1, "One"); + statement.addBatch(); + statement.setLong(2, 2L); + statement.setString(1, "Two"); + statement.addBatch(); + statement.executeBatch(); + } + }, + 2); + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testPreparedStatementCombination() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + connection.unwrap(CloudSpannerJdbcConnection.class).setAutoBatchDml(true); + + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_INSERT)) { + statement.setLong(1, 1L); + statement.setString(2, "One"); + assertFalse(statement.execute()); + } + repeat( + () -> { + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_INSERT)) { + statement.setLong(1, 1L); + statement.setString(2, "One"); + statement.addBatch(); + statement.setLong(1, 2L); + statement.setString(2, "Two"); + statement.addBatch(); + assertArrayEquals(new int[] {1, 1}, statement.executeBatch()); + } + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_UPDATE)) { + statement.setLong(2, 1L); + statement.setString(1, "One"); + statement.addBatch(); + statement.setLong(2, 2L); + statement.setString(1, "Two"); + statement.addBatch(); + assertArrayEquals(new int[] {1, 1}, statement.executeBatch()); + } + }, + ThreadLocalRandom.current().nextInt(1, 5)); + repeat( + () -> { + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_INSERT)) { + statement.setLong(1, 1L); + statement.setString(2, "One"); + assertEquals(1, statement.executeUpdate()); + } + try (PreparedStatement statement = connection.prepareStatement(PARAMETERIZED_UPDATE)) { + statement.setLong(2, 2L); + statement.setString(1, "Two"); + assertFalse(statement.execute()); + } + }, + ThreadLocalRandom.current().nextInt(1, 5)); + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + interface SQLRunnable { + void run() throws SQLException; + } + + static void repeat(SQLRunnable runnable, int count) throws SQLException { + for (int i = 0; i < count; i++) { + runnable.run(); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/BatchMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/BatchMockServerTest.java new file mode 100644 index 000000000000..a6fb7a72dba3 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/BatchMockServerTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.FallbackToPartitionedDMLMockServerTest.createTransactionMutationLimitExceededException; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerBatchUpdateException; +import com.google.cloud.spanner.TransactionMutationLimitExceededException; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.sql.BatchUpdateException; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class BatchMockServerTest extends AbstractMockServerTest { + private static final String NON_PARAMETERIZED_INSERT = + "insert into foo (id, value) values (1, 'One')"; + private static final String NON_PARAMETERIZED_UPDATE = "update foo set value='Zero' where id=0"; + private static final String LARGE_UPDATE = "update foo set value='Zero' where true"; + + @BeforeClass + public static void setup() { + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.of(NON_PARAMETERIZED_INSERT), 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.of(NON_PARAMETERIZED_UPDATE), 1L)); + mockSpanner.putStatementResult( + StatementResult.exception( + com.google.cloud.spanner.Statement.of(LARGE_UPDATE), + createTransactionMutationLimitExceededException())); + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + @Test + public void testStatementBatch() throws SQLException { + try (Connection connection = createJdbcConnection()) { + try (Statement statement = connection.createStatement()) { + statement.addBatch(NON_PARAMETERIZED_INSERT); + statement.addBatch(NON_PARAMETERIZED_UPDATE); + assertArrayEquals(new int[] {1, 1}, statement.executeBatch()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } + + @Test + public void testStatementBatchFailsDueToMutationLimit() throws SQLException { + try (Connection connection = createJdbcConnection()) { + try (Statement statement = connection.createStatement()) { + statement.addBatch(NON_PARAMETERIZED_INSERT); + statement.addBatch(LARGE_UPDATE); + + BatchUpdateException batchUpdateException = + assertThrows(BatchUpdateException.class, statement::executeBatch); + assertNotNull(batchUpdateException.getCause()); + assertEquals(SpannerBatchUpdateException.class, batchUpdateException.getCause().getClass()); + assertNotNull(batchUpdateException.getCause().getCause()); + assertEquals( + TransactionMutationLimitExceededException.class, + batchUpdateException.getCause().getCause().getClass()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ClientSideStatementHintsTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ClientSideStatementHintsTest.java new file mode 100644 index 000000000000..97bcd74b33b3 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ClientSideStatementHintsTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.RequestOptions.Priority; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class ClientSideStatementHintsTest extends AbstractMockServerTest { + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Before + public void setupDialect() { + if (this.dialect != currentDialect) { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(this.dialect)); + this.currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true", + getPort(), "proj", "inst", "db" + (dialect == Dialect.POSTGRESQL ? "pg" : "")); + } + + @Override + protected Connection createJdbcConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void testStatementTagInHint() throws SQLException { + try (Connection connection = createJdbcConnection()) { + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + dialect == Dialect.POSTGRESQL + ? "/*@statement_tag='test-tag'*/SELECT 1" + : "@{statement_tag='test-tag'}SELECT 1")) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(1)); + assertFalse(resultSet.next()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertEquals("test-tag", request.getRequestOptions().getRequestTag()); + } + + @Test + public void testRpcPriorityInHint() throws SQLException { + try (Connection connection = createJdbcConnection()) { + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery( + dialect == Dialect.POSTGRESQL + ? "/*@rpc_priority=PRIORITY_LOW*/SELECT 1" + : "@{rpc_priority=PRIORITY_LOW}SELECT 1")) { + assertTrue(resultSet.next()); + assertEquals(1L, resultSet.getLong(1)); + assertFalse(resultSet.next()); + } + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertEquals(Priority.PRIORITY_LOW, request.getRequestOptions().getPriority()); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ConcurrentTransactionOnEmulatorTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ConcurrentTransactionOnEmulatorTest.java new file mode 100644 index 000000000000..eb38f9cc0e7d --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ConcurrentTransactionOnEmulatorTest.java @@ -0,0 +1,111 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.connection.SpannerPool; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Properties; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.testcontainers.DockerClientFactory; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.utility.DockerImageName; + +@RunWith(JUnit4.class) +public class ConcurrentTransactionOnEmulatorTest { + private static GenericContainer emulator; + + private static Properties properties; + + @BeforeClass + public static void startEmulator() { + boolean dockerAvailable = false; + try { + dockerAvailable = DockerClientFactory.instance().isDockerAvailable(); + } catch (Exception ignore) { + // Ignore, and just skip the test. + } + assumeTrue(dockerAvailable); + + emulator = + new GenericContainer<>( + DockerImageName.parse("gcr.io/cloud-spanner-emulator/emulator:latest")) + .withExposedPorts(9010) + .waitingFor(Wait.forLogMessage(".*gRPC server listening at.*\\n", 1)); + emulator.start(); + properties = new Properties(); + properties.setProperty("autoConfigEmulator", "true"); + properties.setProperty( + "endpoint", String.format("%s:%d", emulator.getHost(), emulator.getMappedPort(9010))); + } + + @AfterClass + public static void cleanup() { + SpannerPool.closeSpannerPool(); + if (emulator != null) { + emulator.stop(); + } + } + + @Test + public void testRunConcurrentTransactions() throws SQLException { + String connectionUrl = + String.format( + "jdbc:cloudspanner:/projects/%s/instances/%s/databases/%s", + "emulator-project", "test-instance", "test-database"); + try (Connection connection1 = DriverManager.getConnection(connectionUrl, properties); + Connection connection2 = DriverManager.getConnection(connectionUrl, properties)) { + // Create a test table. + connection1 + .createStatement() + .execute("create table test (id int64, value string(max)) primary key (id)"); + + // Put both connections into autoCommit=false mode and verify that both connections can run + // a transaction using a single thread. + connection1.setAutoCommit(false); + connection2.setAutoCommit(false); + + connection1.createStatement().executeUpdate("insert into test (id, value) values (1, 'One')"); + connection2.createStatement().executeUpdate("insert into test (id, value) values (2, 'Two')"); + connection1.commit(); + connection2.commit(); + + // Verify that both transactions succeeded. + connection1.setAutoCommit(true); + try (ResultSet resultSet = + connection1.createStatement().executeQuery("select * from test order by id")) { + assertTrue(resultSet.next()); + assertEquals("One", resultSet.getString("value")); + assertTrue(resultSet.next()); + assertEquals("Two", resultSet.getString("value")); + assertFalse(resultSet.next()); + } + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesFileGenerator.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesFileGenerator.java new file mode 100644 index 000000000000..8d7b7e432b40 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesFileGenerator.java @@ -0,0 +1,73 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.connection.ConnectionProperty; +import java.io.BufferedWriter; +import java.io.FileWriter; +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Collectors; + +/** Generator for the documentation/connection_properties.md file. */ +public class ConnectionPropertiesFileGenerator { + + public static void main(String[] args) throws IOException { + String filename = args.length > 0 ? args[0] : "documentation/connection_properties.md"; + StringBuilder builder = + new StringBuilder("# Supported Connection Properties\n\n") + .append( + "This file contains all supported connection properties for the Spanner JDBC driver. " + + "These properties can be specified both in the connection URL and in the Properties map " + + "that is used to create a connection.\n\n" + + "The 'Context' value indicates whether the property can only be set when a connection is " + + "created (STARTUP), or whether the value can also be changed after a connection has been " + + "created.\n\n"); + builder.append("| Name | Description | Default | Enum values | Context |\n"); + builder.append("|------|-------------|---------|-------------|---------|\n"); + for (ConnectionProperty connectionProperty : + ConnectionPropertiesHelper.VALID_CONNECTION_PROPERTIES) { + builder + .append("| ") + .append(connectionProperty.getName()) + .append(" | ") + .append(connectionProperty.getDescription().replace("\n", " ")) + .append(" | ") + .append( + connectionProperty.getDefaultValue() == null + ? "" + : connectionProperty.getDefaultValue().toString()) + .append(" | ") + .append(getValidValues(connectionProperty)) + .append(" | ") + .append(connectionProperty.getContext()) + .append(" |\n"); + } + try (BufferedWriter writer = new BufferedWriter(new FileWriter(filename))) { + writer.write(builder.toString()); + } + } + + static String getValidValues(ConnectionProperty connectionProperty) { + return connectionProperty.getValidValues() == null + ? "" + : Arrays.stream(connectionProperty.getValidValues()) + .map(Objects::toString) + .collect(Collectors.joining(", ")); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/DdlMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/DdlMockServerTest.java new file mode 100644 index 000000000000..0646a73bc497 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/DdlMockServerTest.java @@ -0,0 +1,169 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.common.collect.ImmutableList; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.rpc.Code; +import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.util.List; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class DdlMockServerTest extends AbstractMockServerTest { + + private String createUrl(boolean autoCommit) { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true;autoCommit=%s", + getPort(), "proj", "inst", "db", autoCommit); + } + + private Connection createConnection(boolean autoCommit) throws SQLException { + return DriverManager.getConnection(createUrl(autoCommit)); + } + + @Test + public void testGetDatabaseDdl() throws SQLException { + List expectedDdl = + ImmutableList.of( + "create table foo (id int64) primary key (id)", + "create table bar (id int64) primary key (id)"); + mockDatabaseAdmin.addResponse( + GetDatabaseDdlResponse.newBuilder().addAllStatements(expectedDdl).build()); + + try (Connection connection = createConnection(/* autoCommit= */ true)) { + CloudSpannerJdbcConnection spannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + List ddl = + spannerJdbcConnection + .getSpanner() + .getDatabaseAdminClient() + .getDatabaseDdl( + spannerJdbcConnection.getDatabaseId().getInstanceId().getInstance(), + spannerJdbcConnection.getDatabaseId().getDatabase()); + assertEquals(expectedDdl, ddl); + } + } + + @Test + public void testDdlInAutoCommitIsTrue_succeeds() throws SQLException { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + + try (Connection connection = createConnection(/* autoCommit= */ true)) { + assertFalse( + connection.createStatement().execute("create table foo (id int64) primary key (id)")); + } + } + + @Test + public void testDdlInAutoCommitIsFalse_succeedsWithNoActiveTransaction() throws SQLException { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + + try (Connection connection = createConnection(/* autoCommit= */ false)) { + assertFalse( + connection.createStatement().execute("create table foo (id int64) primary key (id)")); + } + } + + @Test + public void testDdlInAutoCommitIsFalse_failsWithActiveTransaction() throws SQLException { + mockSpanner.putStatementResult( + StatementResult.update(Statement.of("update foo set bar=1 where true"), 1L)); + + try (Connection connection = createConnection(/* autoCommit= */ false)) { + assertFalse(connection.createStatement().execute("update foo set bar=1 where true")); + SQLException exception = + assertThrows( + SQLException.class, + () -> + connection + .createStatement() + .execute("create table foo (id int64) primary key (id)")); + assertTrue(exception instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) exception; + assertEquals(Code.FAILED_PRECONDITION, jdbcSqlException.getCode()); + } + } + + @Test + public void testDdlUsingStatementAndExecuteUpdate() throws SQLException { + for (boolean autoCommit : new boolean[] {true, false}) { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + + try (Connection connection = createConnection(autoCommit)) { + try (java.sql.Statement statement = connection.createStatement()) { + assertEquals(0, statement.executeUpdate("create table foo (id int64) primary key (id)")); + } + } + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + mockDatabaseAdmin.getRequests().clear(); + } + } + + @Test + public void testDdlUsingPreparedStatementAndExecuteUpdate() throws SQLException { + for (boolean autoCommit : new boolean[] {true, false}) { + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + + try (Connection connection = createConnection(autoCommit)) { + try (PreparedStatement preparedStatement = + connection.prepareStatement("create table foo (id int64) primary key (id)")) { + assertEquals(0, preparedStatement.executeUpdate()); + } + } + assertEquals(1, mockDatabaseAdmin.getRequests().size()); + mockDatabaseAdmin.getRequests().clear(); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ExecuteMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ExecuteMockServerTest.java new file mode 100644 index 000000000000..c226bd6731eb --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/ExecuteMockServerTest.java @@ -0,0 +1,1038 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.longrunning.Operation; +import com.google.protobuf.Any; +import com.google.protobuf.Empty; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.rpc.Code; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Objects; +import org.junit.Before; +import org.junit.Test; +import org.junit.function.ThrowingRunnable; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * Test class for verifying that the methods execute, executeQuery, and executeUpdate work as + * intended. It also verifies that they always also include any comments in the statement for the + * PostgreSQL dialect, as these may contain hints. + */ +@RunWith(Parameterized.class) +public class ExecuteMockServerTest extends AbstractMockServerTest { + private static final IllegalStateException REQUEST_NOT_FOUND = + new IllegalStateException("request not found"); + private static Dialect currentDialect; + + @Parameters(name = "dialect = {0}") + public static Object[] parameters() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private static final String DDL = "create table my_table"; + private static final long LARGE_UPDATE_COUNT = 2L * Integer.MAX_VALUE; + + private String query; + private String dml; + private String largeDml; + private String dmlReturning; + private String clientSideQuery; + private String clientSideUpdate; + + @Before + public void setupResults() { + // Only register new results if the dialect has changed. + if (!Objects.equals(currentDialect, dialect)) { + query = + dialect == Dialect.POSTGRESQL + ? "/*@ lock_scanned_ranges = exclusive */ select * from my_table" + : "select * from my_table"; + dml = + dialect == Dialect.POSTGRESQL + ? "/*@ lock_scanned_ranges = exclusive */ insert into my_table (id, value) values (1, 'One')" + : "insert into my_table (id, value) values (1, 'One')"; + String DML_THEN_RETURN_ID = + dml + (dialect == Dialect.POSTGRESQL ? "\nRETURNING \"id\"" : "\nTHEN RETURN `id`"); + largeDml = + dialect == Dialect.POSTGRESQL + ? "/*@ lock_scanned_ranges = exclusive */ update my_table set value='new value' where true" + : "update my_table set value='new value' where true"; + String LARGE_DML_THEN_RETURN_ID = + largeDml + (dialect == Dialect.POSTGRESQL ? "\nRETURNING \"id\"" : "\nTHEN RETURN `id`"); + dmlReturning = + dialect == Dialect.POSTGRESQL + ? "/*@ lock_scanned_ranges = exclusive */ insert into my_table (id, value) values (1, 'One') RETURNING *" + : "insert into my_table (id, value) values (1, 'One') THEN RETURN *"; + clientSideQuery = + dialect == Dialect.POSTGRESQL ? "show spanner.readonly" : "show variable readonly"; + clientSideUpdate = + dialect == Dialect.POSTGRESQL ? "set spanner.readonly=false" : "set readonly=false"; + + // This forces a refresh of the Spanner instance that is used for a connection, which again is + // needed in order to refresh the dialect of the database. + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult(StatementResult.detectDialectResult(dialect)); + + super.setupResults(); + + com.google.spanner.v1.ResultSet resultSet = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .setName("id") + .build()) + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .setName("value") + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .addValues(Value.newBuilder().setStringValue("One").build()) + .build()) + .build(); + com.google.spanner.v1.ResultSet returnIdResultSet = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .setName("id") + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build(); + mockSpanner.putStatementResult( + StatementResult.query(com.google.cloud.spanner.Statement.of(query), resultSet)); + mockSpanner.putStatementResult( + StatementResult.update(com.google.cloud.spanner.Statement.of(dml), 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + com.google.cloud.spanner.Statement.of(largeDml), LARGE_UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.query( + com.google.cloud.spanner.Statement.of(dmlReturning), + resultSet.toBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.query( + com.google.cloud.spanner.Statement.of(DML_THEN_RETURN_ID), + returnIdResultSet.toBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.query( + com.google.cloud.spanner.Statement.of(LARGE_DML_THEN_RETURN_ID), + returnIdResultSet.toBuilder() + .setStats( + ResultSetStats.newBuilder().setRowCountExact(LARGE_UPDATE_COUNT).build()) + .build())); + mockDatabaseAdmin.addResponse( + Operation.newBuilder() + .setDone(true) + .setResponse(Any.pack(Empty.getDefaultInstance())) + .setMetadata(Any.pack(UpdateDatabaseDdlMetadata.getDefaultInstance())) + .build()); + } + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true", + getPort(), "proj", "inst", "db"); + } + + @Override + protected Connection createJdbcConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void testStatementExecuteQuery() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(query)) { + verifyResultSet(resultSet); + } + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(r -> r.getSql().equals(query)) + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasSingleUse()); + assertTrue(request.getTransaction().getSingleUse().hasReadOnly()); + assertFalse(request.getLastStatement()); + + try (ResultSet resultSet = statement.executeQuery(dmlReturning)) { + verifyResultSet(resultSet); + } + request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(r -> r.getSql().equals(dmlReturning)) + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getLastStatement()); + + try (ResultSet resultSet = statement.executeQuery(clientSideQuery)) { + verifyClientSideResultSet(resultSet); + } + verifyException(() -> statement.executeQuery(dml)); + verifyException(() -> statement.executeQuery(largeDml)); + verifyException(() -> statement.executeQuery(DDL)); + verifyException(() -> statement.executeQuery(clientSideUpdate)); + } + } + + @Test + public void testStatementExecuteUpdate() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + assertEquals(1, statement.executeUpdate(dml)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(r -> r.getSql().equals(dml)) + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getLastStatement()); + + assertEquals(0, statement.executeUpdate(DDL)); + + connection.setAutoCommit(false); + assertEquals(0, statement.executeUpdate(clientSideUpdate)); + connection.setAutoCommit(true); + + verifyOverflow(() -> statement.executeUpdate(largeDml)); + verifyException(() -> statement.executeUpdate(query)); + verifyException(() -> statement.executeUpdate(dmlReturning)); + verifyException(() -> statement.executeUpdate(clientSideQuery)); + } + } + + @Test + public void testStatementExecuteUpdateReturnGeneratedKeys() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + // TODO: Add tests for RETURN_GENERATED_KEYS when that is supported. + assertEquals(1, statement.executeUpdate(dml, Statement.NO_GENERATED_KEYS)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getLastStatement()); + + assertEquals(0, statement.executeUpdate(DDL, Statement.NO_GENERATED_KEYS)); + assertEquals(0, statement.executeUpdate(clientSideUpdate, Statement.NO_GENERATED_KEYS)); + + verifyOverflow(() -> statement.executeUpdate(largeDml, Statement.NO_GENERATED_KEYS)); + verifyException(() -> statement.executeUpdate(query, Statement.NO_GENERATED_KEYS)); + verifyException(() -> statement.executeUpdate(dmlReturning, Statement.NO_GENERATED_KEYS)); + verifyException(() -> statement.executeUpdate(clientSideQuery, Statement.NO_GENERATED_KEYS)); + } + } + + @Test + public void testStatementExecuteUpdateReturnColumnNames() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + assertEquals(1, statement.executeUpdate(dml, new String[] {"id"})); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getLastStatement()); + + assertEquals(0, statement.executeUpdate(DDL, new String[] {"id"})); + assertEquals(0, statement.executeUpdate(clientSideUpdate, new String[] {"id"})); + + verifyOverflow(() -> statement.executeUpdate(largeDml, new String[] {"id"})); + verifyException( + () -> statement.executeUpdate(query, new String[] {"id"}), Code.FAILED_PRECONDITION); + assertEquals(1, statement.executeUpdate(dmlReturning, new String[] {"id"})); + verifyException( + () -> statement.executeUpdate(clientSideQuery, new String[] {"id"}), + Code.FAILED_PRECONDITION); + } + } + + @Test + public void testStatementExecuteUpdateReturnColumnIndexes() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + assertEquals(1, statement.executeUpdate(dml, new int[] {1})); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getLastStatement()); + + assertEquals(0, statement.executeUpdate(DDL, new int[] {1})); + assertEquals(0, statement.executeUpdate(clientSideUpdate, new int[] {1})); + verifyOverflow(() -> statement.executeUpdate(largeDml, new int[] {1})); + verifyException(() -> statement.executeUpdate(query, new int[] {1})); + verifyException(() -> statement.executeUpdate(dmlReturning, new int[] {1})); + verifyException(() -> statement.executeUpdate(clientSideQuery, new int[] {1})); + } + } + + @Test + public void testStatementLargeExecuteUpdate() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + assertEquals(1L, statement.executeLargeUpdate(dml)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getLastStatement()); + + assertEquals(0L, statement.executeLargeUpdate(DDL)); + assertEquals(0L, statement.executeLargeUpdate(clientSideUpdate)); + assertEquals(LARGE_UPDATE_COUNT, statement.executeLargeUpdate(largeDml)); + verifyException(() -> statement.executeLargeUpdate(query)); + verifyException(() -> statement.executeLargeUpdate(dmlReturning)); + verifyException(() -> statement.executeLargeUpdate(clientSideQuery)); + } + } + + @Test + public void testStatementExecuteLargeUpdateReturnGeneratedKeys() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + // TODO: Add tests for RETURN_GENERATED_KEYS when that is supported. + assertEquals(1, statement.executeLargeUpdate(dml, Statement.NO_GENERATED_KEYS)); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .findAny() + .orElseThrow(() -> REQUEST_NOT_FOUND); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertTrue(request.getLastStatement()); + + assertEquals(0, statement.executeLargeUpdate(DDL, Statement.NO_GENERATED_KEYS)); + assertEquals(0, statement.executeLargeUpdate(clientSideUpdate, Statement.NO_GENERATED_KEYS)); + assertEquals( + LARGE_UPDATE_COUNT, statement.executeLargeUpdate(largeDml, Statement.NO_GENERATED_KEYS)); + verifyException(() -> statement.executeLargeUpdate(query, Statement.NO_GENERATED_KEYS)); + verifyException( + () -> statement.executeLargeUpdate(dmlReturning, Statement.NO_GENERATED_KEYS)); + verifyException( + () -> statement.executeLargeUpdate(clientSideQuery, Statement.NO_GENERATED_KEYS)); + } + } + + @Test + public void testStatementExecuteLargeUpdateReturnColumnNames() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + assertEquals(1, statement.executeLargeUpdate(dml, new String[] {"id"})); + assertEquals(0, statement.executeLargeUpdate(DDL, new String[] {"id"})); + assertEquals(0, statement.executeLargeUpdate(clientSideUpdate, new String[] {"id"})); + assertEquals(LARGE_UPDATE_COUNT, statement.executeLargeUpdate(largeDml, new String[] {"id"})); + verifyException( + () -> statement.executeLargeUpdate(query, new String[] {"id"}), Code.FAILED_PRECONDITION); + assertEquals(1L, statement.executeLargeUpdate(dmlReturning, new String[] {"id"})); + verifyException( + () -> statement.executeLargeUpdate(clientSideQuery, new String[] {"id"}), + Code.FAILED_PRECONDITION); + } + } + + @Test + public void testStatementExecuteLargeUpdateReturnColumnIndexes() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + assertEquals(1, statement.executeLargeUpdate(dml, new int[] {1})); + assertEquals(0, statement.executeLargeUpdate(DDL, new int[] {1})); + assertEquals(0, statement.executeLargeUpdate(clientSideUpdate, new int[] {1})); + assertEquals(LARGE_UPDATE_COUNT, statement.executeLargeUpdate(largeDml, new int[] {1})); + verifyException(() -> statement.executeLargeUpdate(query, new int[] {1})); + verifyException(() -> statement.executeLargeUpdate(dmlReturning, new int[] {1})); + verifyException(() -> statement.executeLargeUpdate(clientSideQuery, new int[] {1})); + } + } + + @Test + public void testStatementExecute() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + verifyUpdateCount(statement, () -> statement.execute(dml), 1L); + verifyUpdateCount(statement, () -> statement.execute(largeDml), LARGE_UPDATE_COUNT); + verifyUpdateCount(statement, () -> statement.execute(DDL), Statement.SUCCESS_NO_INFO); + verifyUpdateCount( + statement, () -> statement.execute(clientSideUpdate), Statement.SUCCESS_NO_INFO); + verifyResultSet(statement, () -> statement.execute(query)); + verifyResultSet(statement, () -> statement.execute(dmlReturning)); + verifyResultSet(statement, () -> statement.execute(clientSideQuery)); + } + } + + @Test + public void testStatementExecuteReturnGeneratedKeys() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + // TODO: Add tests for RETURN_GENERATED_KEYS when that is supported. + verifyUpdateCount(statement, () -> statement.execute(dml, Statement.NO_GENERATED_KEYS), 1L); + verifyUpdateCount( + statement, + () -> statement.execute(largeDml, Statement.NO_GENERATED_KEYS), + LARGE_UPDATE_COUNT); + verifyUpdateCount( + statement, + () -> statement.execute(DDL, Statement.NO_GENERATED_KEYS), + Statement.SUCCESS_NO_INFO); + verifyUpdateCount( + statement, + () -> statement.execute(clientSideUpdate, Statement.NO_GENERATED_KEYS), + Statement.SUCCESS_NO_INFO); + verifyResultSet(statement, () -> statement.execute(query, Statement.NO_GENERATED_KEYS)); + verifyResultSet( + statement, () -> statement.execute(dmlReturning, Statement.NO_GENERATED_KEYS)); + verifyResultSet( + statement, () -> statement.execute(clientSideQuery, Statement.NO_GENERATED_KEYS)); + } + } + + @Test + public void testStatementExecuteReturnColumnNames() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + verifyUpdateCount(statement, () -> statement.execute(dml, new String[] {"id"}), 1L); + verifyUpdateCount( + statement, () -> statement.execute(largeDml, new String[] {"id"}), LARGE_UPDATE_COUNT); + verifyUpdateCount( + statement, () -> statement.execute(DDL, new String[] {"id"}), Statement.SUCCESS_NO_INFO); + verifyUpdateCount( + statement, + () -> statement.execute(clientSideUpdate, new String[] {"id"}), + Statement.SUCCESS_NO_INFO); + verifyResultSet(statement, () -> statement.execute(query, new String[] {"id"})); + verifyResultSet(statement, () -> statement.execute(dmlReturning, new String[] {"id"})); + verifyResultSet(statement, () -> statement.execute(clientSideQuery, new String[] {"id"})); + } + } + + @Test + public void testStatementExecuteReturnColumnIndexes() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + verifyUpdateCount(statement, () -> statement.execute(dml, new int[] {1}), 1L); + verifyUpdateCount( + statement, () -> statement.execute(largeDml, new int[] {1}), LARGE_UPDATE_COUNT); + verifyUpdateCount( + statement, () -> statement.execute(DDL, new int[] {1}), Statement.SUCCESS_NO_INFO); + verifyUpdateCount( + statement, + () -> statement.execute(clientSideUpdate, new int[] {1}), + Statement.SUCCESS_NO_INFO); + verifyResultSet(statement, () -> statement.execute(query, new int[] {1})); + verifyResultSet(statement, () -> statement.execute(dmlReturning, new int[] {1})); + verifyResultSet(statement, () -> statement.execute(clientSideQuery, new int[] {1})); + } + } + + @Test + public void testPreparedStatementExecuteQuery() throws SQLException { + try (Connection connection = createJdbcConnection()) { + try (ResultSet resultSet = connection.prepareStatement(query).executeQuery()) { + verifyResultSet(resultSet); + } + try (ResultSet resultSet = connection.prepareStatement(dmlReturning).executeQuery()) { + verifyResultSet(resultSet); + } + try (ResultSet resultSet = connection.prepareStatement(clientSideQuery).executeQuery()) { + verifyClientSideResultSet(resultSet); + } + verifyException(() -> connection.prepareStatement(dml).executeQuery()); + verifyException(() -> connection.prepareStatement(largeDml).executeQuery()); + verifyException(() -> connection.prepareStatement(DDL).executeQuery()); + verifyException(() -> connection.prepareStatement(clientSideUpdate).executeQuery()); + } + } + + @Test + public void testPreparedStatementExecuteUpdate() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertEquals(1, connection.prepareStatement(dml).executeUpdate()); + assertEquals(0, connection.prepareStatement(DDL).executeUpdate()); + assertEquals(0, connection.prepareStatement(clientSideUpdate).executeUpdate()); + verifyOverflow(() -> connection.prepareStatement(largeDml).executeUpdate()); + verifyException(() -> connection.prepareStatement(query).executeUpdate()); + verifyException( + () -> connection.prepareStatement(dmlReturning).executeUpdate(), Code.INVALID_ARGUMENT); + verifyException(() -> connection.prepareStatement(clientSideQuery).executeUpdate()); + } + } + + @Test + public void testPreparedStatementExecuteUpdateReturnGeneratedKeys() throws SQLException { + try (Connection connection = createJdbcConnection()) { + // TODO: Add tests for RETURN_GENERATED_KEYS when that is supported. + assertEquals( + 1, connection.prepareStatement(dml, Statement.NO_GENERATED_KEYS).executeUpdate()); + assertEquals( + 0, connection.prepareStatement(DDL, Statement.NO_GENERATED_KEYS).executeUpdate()); + assertEquals( + 0, + connection + .prepareStatement(clientSideUpdate, Statement.NO_GENERATED_KEYS) + .executeUpdate()); + verifyOverflow( + () -> connection.prepareStatement(largeDml, Statement.NO_GENERATED_KEYS).executeUpdate()); + verifyException( + () -> connection.prepareStatement(query, Statement.NO_GENERATED_KEYS).executeUpdate()); + verifyException( + () -> + connection + .prepareStatement(dmlReturning, Statement.NO_GENERATED_KEYS) + .executeUpdate(), + Code.INVALID_ARGUMENT); + verifyException( + () -> + connection + .prepareStatement(clientSideQuery, Statement.NO_GENERATED_KEYS) + .executeUpdate()); + } + } + + @Test + public void testPreparedStatementExecuteUpdateReturnColumnNames() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertEquals(1, connection.prepareStatement(dml, new String[] {"id"}).executeUpdate()); + assertEquals(0, connection.prepareStatement(DDL, new String[] {"id"}).executeUpdate()); + assertEquals( + 0, connection.prepareStatement(clientSideUpdate, new String[] {"id"}).executeUpdate()); + verifyOverflow( + () -> connection.prepareStatement(largeDml, new String[] {"id"}).executeUpdate()); + verifyException( + () -> connection.prepareStatement(query, new String[] {"id"}).executeUpdate(), + Code.FAILED_PRECONDITION); + assertEquals( + 1, connection.prepareStatement(dmlReturning, new String[] {"id"}).executeUpdate()); + verifyException( + () -> connection.prepareStatement(clientSideQuery, new String[] {"id"}).executeUpdate(), + Code.FAILED_PRECONDITION); + } + } + + @Test + public void testPreparedStatementExecuteUpdateReturnColumnIndexes() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertEquals(1, connection.prepareStatement(dml, new int[] {1}).executeUpdate()); + assertEquals(0, connection.prepareStatement(DDL, new int[] {1}).executeUpdate()); + assertEquals(0, connection.prepareStatement(clientSideUpdate, new int[] {1}).executeUpdate()); + verifyOverflow(() -> connection.prepareStatement(largeDml, new int[] {1}).executeUpdate()); + verifyException(() -> connection.prepareStatement(query, new int[] {1}).executeUpdate()); + verifyException( + () -> connection.prepareStatement(dmlReturning, new int[] {1}).executeUpdate(), + Code.INVALID_ARGUMENT); + verifyException( + () -> connection.prepareStatement(clientSideQuery, new int[] {1}).executeUpdate()); + } + } + + @Test + public void testPreparedStatementLargeExecuteUpdate() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertEquals(1L, connection.prepareStatement(dml).executeLargeUpdate()); + assertEquals(0L, connection.prepareStatement(DDL).executeLargeUpdate()); + assertEquals(0L, connection.prepareStatement(clientSideUpdate).executeLargeUpdate()); + assertEquals(LARGE_UPDATE_COUNT, connection.prepareStatement(largeDml).executeLargeUpdate()); + verifyException(() -> connection.prepareStatement(query).executeLargeUpdate()); + verifyException( + () -> connection.prepareStatement(dmlReturning).executeLargeUpdate(), + Code.INVALID_ARGUMENT); + verifyException(() -> connection.prepareStatement(clientSideQuery).executeLargeUpdate()); + } + } + + @Test + public void testPreparedStatementExecuteLargeUpdateReturnGeneratedKeys() throws SQLException { + try (Connection connection = createJdbcConnection()) { + // TODO: Add tests for RETURN_GENERATED_KEYS when that is supported. + assertEquals( + 1, connection.prepareStatement(dml, Statement.NO_GENERATED_KEYS).executeLargeUpdate()); + assertEquals( + 0, connection.prepareStatement(DDL, Statement.NO_GENERATED_KEYS).executeLargeUpdate()); + assertEquals( + 0, + connection + .prepareStatement(clientSideUpdate, Statement.NO_GENERATED_KEYS) + .executeLargeUpdate()); + assertEquals( + LARGE_UPDATE_COUNT, + connection.prepareStatement(largeDml, Statement.NO_GENERATED_KEYS).executeLargeUpdate()); + verifyException( + () -> + connection.prepareStatement(query, Statement.NO_GENERATED_KEYS).executeLargeUpdate()); + verifyException( + () -> + connection + .prepareStatement(dmlReturning, Statement.NO_GENERATED_KEYS) + .executeLargeUpdate(), + Code.INVALID_ARGUMENT); + verifyException( + () -> + connection + .prepareStatement(clientSideQuery, Statement.NO_GENERATED_KEYS) + .executeLargeUpdate()); + } + } + + @Test + public void testPreparedStatementExecuteLargeUpdateReturnColumnNames() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertEquals(1, connection.prepareStatement(dml, new String[] {"id"}).executeLargeUpdate()); + assertEquals(0, connection.prepareStatement(DDL, new String[] {"id"}).executeLargeUpdate()); + assertEquals( + 0, + connection.prepareStatement(clientSideUpdate, new String[] {"id"}).executeLargeUpdate()); + assertEquals( + LARGE_UPDATE_COUNT, + connection.prepareStatement(largeDml, new String[] {"id"}).executeLargeUpdate()); + verifyException( + () -> connection.prepareStatement(query, new String[] {"id"}).executeLargeUpdate(), + Code.FAILED_PRECONDITION); + assertEquals( + 1L, connection.prepareStatement(dmlReturning, new String[] {"id"}).executeLargeUpdate()); + verifyException( + () -> + connection + .prepareStatement(clientSideQuery, new String[] {"id"}) + .executeLargeUpdate(), + Code.FAILED_PRECONDITION); + } + } + + @Test + public void testPreparedStatementExecuteLargeUpdateReturnColumnIndexes() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertEquals(1, connection.prepareStatement(dml, new int[] {1}).executeLargeUpdate()); + assertEquals(0, connection.prepareStatement(DDL, new int[] {1}).executeLargeUpdate()); + assertEquals( + 0, connection.prepareStatement(clientSideUpdate, new int[] {1}).executeLargeUpdate()); + assertEquals( + LARGE_UPDATE_COUNT, + connection.prepareStatement(largeDml, new int[] {1}).executeLargeUpdate()); + verifyException(() -> connection.prepareStatement(query, new int[] {1}).executeLargeUpdate()); + verifyException( + () -> connection.prepareStatement(dmlReturning, new int[] {1}).executeLargeUpdate(), + Code.INVALID_ARGUMENT); + verifyException( + () -> connection.prepareStatement(clientSideQuery, new int[] {1}).executeLargeUpdate()); + } + } + + @Test + public void testPreparedStatementExecute() throws SQLException { + try (Connection connection = createJdbcConnection()) { + verifyPreparedUpdateCount(connection.prepareStatement(dml), PreparedStatement::execute, 1L); + verifyPreparedUpdateCount( + connection.prepareStatement(largeDml), PreparedStatement::execute, LARGE_UPDATE_COUNT); + verifyPreparedUpdateCount( + connection.prepareStatement(DDL), PreparedStatement::execute, Statement.SUCCESS_NO_INFO); + verifyPreparedUpdateCount( + connection.prepareStatement(clientSideUpdate), + PreparedStatement::execute, + Statement.SUCCESS_NO_INFO); + verifyPreparedResultSet(connection.prepareStatement(query), PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(dmlReturning), PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(clientSideQuery), PreparedStatement::execute); + } + } + + @Test + public void testPreparedStatementExecuteReturnGeneratedKeys() throws SQLException { + try (Connection connection = createJdbcConnection()) { + // TODO: Add tests for RETURN_GENERATED_KEYS when that is supported. + verifyPreparedUpdateCount( + connection.prepareStatement(dml, Statement.NO_GENERATED_KEYS), + PreparedStatement::execute, + 1L); + verifyPreparedUpdateCount( + connection.prepareStatement(largeDml, Statement.NO_GENERATED_KEYS), + PreparedStatement::execute, + LARGE_UPDATE_COUNT); + verifyPreparedUpdateCount( + connection.prepareStatement(DDL, Statement.NO_GENERATED_KEYS), + PreparedStatement::execute, + Statement.SUCCESS_NO_INFO); + verifyPreparedUpdateCount( + connection.prepareStatement(clientSideUpdate, Statement.NO_GENERATED_KEYS), + PreparedStatement::execute, + Statement.SUCCESS_NO_INFO); + verifyPreparedResultSet( + connection.prepareStatement(query, Statement.NO_GENERATED_KEYS), + PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(dmlReturning, Statement.NO_GENERATED_KEYS), + PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(clientSideQuery, Statement.NO_GENERATED_KEYS), + PreparedStatement::execute); + } + } + + @Test + public void testPreparedStatementExecuteReturnColumnNames() throws SQLException { + try (Connection connection = createJdbcConnection()) { + verifyPreparedUpdateCount( + connection.prepareStatement(dml, new String[] {"id"}), PreparedStatement::execute, 1L); + verifyPreparedUpdateCount( + connection.prepareStatement(largeDml, new String[] {"id"}), + PreparedStatement::execute, + LARGE_UPDATE_COUNT); + verifyPreparedUpdateCount( + connection.prepareStatement(DDL, new String[] {"id"}), + PreparedStatement::execute, + Statement.SUCCESS_NO_INFO); + verifyPreparedUpdateCount( + connection.prepareStatement(clientSideUpdate, new String[] {"id"}), + PreparedStatement::execute, + Statement.SUCCESS_NO_INFO); + verifyPreparedResultSet( + connection.prepareStatement(query, new String[] {"id"}), PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(dmlReturning, new String[] {"id"}), + PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(clientSideQuery, new String[] {"id"}), + PreparedStatement::execute); + } + } + + @Test + public void testPreparedStatementExecuteReturnColumnIndexes() throws SQLException { + try (Connection connection = createJdbcConnection()) { + verifyPreparedUpdateCount( + connection.prepareStatement(dml, new int[] {1}), PreparedStatement::execute, 1L); + verifyPreparedUpdateCount( + connection.prepareStatement(largeDml, new int[] {1}), + PreparedStatement::execute, + LARGE_UPDATE_COUNT); + verifyPreparedUpdateCount( + connection.prepareStatement(DDL, new int[] {1}), + PreparedStatement::execute, + Statement.SUCCESS_NO_INFO); + verifyPreparedUpdateCount( + connection.prepareStatement(clientSideUpdate, new int[] {1}), + PreparedStatement::execute, + Statement.SUCCESS_NO_INFO); + verifyPreparedResultSet( + connection.prepareStatement(query, new int[] {1}), PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(dmlReturning, new int[] {1}), PreparedStatement::execute); + verifyPreparedResultSet( + connection.prepareStatement(clientSideQuery, new int[] {1}), PreparedStatement::execute); + } + } + + private void verifyClientSideResultSet(ResultSet resultSet) throws SQLException { + assertNotNull(resultSet.getMetaData()); + assertEquals(1, resultSet.getMetaData().getColumnCount()); + assertTrue(resultSet.next()); + assertFalse(resultSet.getBoolean(1)); + assertFalse(resultSet.next()); + } + + private void verifyResultSet(ResultSet resultSet) throws SQLException { + assertNotNull(resultSet.getMetaData()); + assertEquals(2, resultSet.getMetaData().getColumnCount()); + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + private void verifyException(ThrowingRunnable runnable) { + verifyException(runnable, Code.INVALID_ARGUMENT); + } + + private void verifyOverflow(ThrowingRunnable runnable) { + verifyException(runnable, Code.OUT_OF_RANGE); + } + + private void verifyException(ThrowingRunnable runnable, Code code) { + SQLException exception = assertThrows(SQLException.class, runnable); + assertTrue(exception instanceof JdbcSqlException); + JdbcSqlException sqlException = (JdbcSqlException) exception; + assertEquals(code, sqlException.getCode()); + } + + interface ThrowingFunction { + T apply() throws SQLException; + } + + interface ThrowingPreparedFunction { + T apply(PreparedStatement statement) throws SQLException; + } + + private void verifyPreparedUpdateCount( + PreparedStatement statement, ThrowingPreparedFunction function, long updateCount) + throws SQLException { + verifyUpdateCount(statement, () -> function.apply(statement), updateCount); + } + + private void verifyUpdateCount( + Statement statement, ThrowingFunction function, long updateCount) + throws SQLException { + assertFalse(function.apply()); + if (updateCount > Integer.MAX_VALUE) { + verifyOverflow(statement::getUpdateCount); + } else { + assertEquals((int) updateCount, statement.getUpdateCount()); + } + assertEquals(updateCount, statement.getLargeUpdateCount()); + assertNull(statement.getResultSet()); + assertFalse(statement.getMoreResults()); + assertEquals(-1, statement.getUpdateCount()); + assertEquals(-1L, statement.getLargeUpdateCount()); + } + + private void verifyPreparedResultSet( + PreparedStatement statement, ThrowingPreparedFunction function) throws SQLException { + verifyResultSet(statement, () -> function.apply(statement)); + } + + private void verifyResultSet(Statement statement, ThrowingFunction function) + throws SQLException { + assertTrue(function.apply()); + assertEquals(-1, statement.getUpdateCount()); + assertEquals(-1L, statement.getLargeUpdateCount()); + assertNotNull(statement.getResultSet()); + try (ResultSet resultSet = statement.getResultSet()) { + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + assertFalse(statement.getMoreResults()); + assertEquals(-1, statement.getUpdateCount()); + assertEquals(-1L, statement.getLargeUpdateCount()); + } + + @Test + public void testInvalidExecuteUpdate_shouldNotLeakSession() throws SQLException { + int maxSessions = 1; + try (Connection connection = + new JdbcConnection( + createUrl(), + ConnectionOptions.newBuilder() + .setUri(createUrl().substring("jdbc:".length())) + .setSessionPoolOptions( + SessionPoolOptions.newBuilder() + .setMinSessions(1) + .setMaxSessions(maxSessions) + .setFailIfPoolExhausted() + .build()) + .build())) { + + for (int i = 0; i < (maxSessions + 1); i++) { + SQLException exception = + assertThrows( + SQLException.class, () -> connection.createStatement().executeUpdate(query)); + assertTrue(exception instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) exception; + // This would be RESOURCE_EXHAUSTED if the query leaked a session. + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + } + + private String getExtension() { + return dialect == Dialect.POSTGRESQL ? "spanner." : ""; + } + + @Test + public void testExecuteAutoBatchDml() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + connection.setAutoCommit(false); + + assertFalse(statement.execute(String.format("set %sauto_batch_dml = true", getExtension()))); + for (int i = 0; i < 3; i++) { + assertFalse(statement.execute(dml)); + assertEquals(1, statement.getUpdateCount()); + } + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + ExecuteBatchDmlRequest request = + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0); + assertEquals(3, request.getStatementsCount()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testLastStatement_AutoCommit_Query() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + //noinspection EmptyTryBlock + try (ResultSet ignore = statement.executeQuery(query)) {} + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getLastStatement()); + } + + @Test + public void testLastStatement_AutoCommit_Dml() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + statement.executeUpdate(dml); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertTrue(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getLastStatement()); + } + + @Test + public void testLastStatement_AutoCommit_DmlReturning() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + //noinspection EmptyTryBlock + try (ResultSet ignore = statement.executeQuery(dmlReturning)) {} + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertTrue(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getLastStatement()); + } + + @Test + public void testLastStatement_AutoCommit_BatchDml() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + statement.addBatch(dml); + statement.addBatch(dml); + statement.executeBatch(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertTrue( + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0).getLastStatements()); + } + + @Test + public void testLastStatement_Transaction_Query() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + connection.setAutoCommit(false); + //noinspection EmptyTryBlock + try (ResultSet ignore = statement.executeQuery(query)) {} + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getLastStatement()); + } + + @Test + public void testLastStatement_Transaction_Dml() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + connection.setAutoCommit(false); + statement.executeUpdate(dml); + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getLastStatement()); + } + + @Test + public void testLastStatement_Transaction_DmlReturning() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + connection.setAutoCommit(false); + //noinspection EmptyTryBlock + try (ResultSet ignore = statement.executeQuery(dmlReturning)) {} + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertFalse(mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getLastStatement()); + } + + @Test + public void testLastStatement_Transaction_BatchDml() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + connection.setAutoCommit(false); + statement.addBatch(dml); + statement.addBatch(dml); + statement.executeBatch(); + connection.commit(); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteBatchDmlRequest.class)); + assertFalse( + mockSpanner.getRequestsOfType(ExecuteBatchDmlRequest.class).get(0).getLastStatements()); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/FallbackToPartitionedDMLMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/FallbackToPartitionedDMLMockServerTest.java new file mode 100644 index 000000000000..587143a86432 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/FallbackToPartitionedDMLMockServerTest.java @@ -0,0 +1,259 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.TransactionMutationLimitExceededException; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.AutocommitDmlMode; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.protobuf.Any; +import com.google.rpc.Help; +import com.google.rpc.Help.Link; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Properties; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class FallbackToPartitionedDMLMockServerTest extends AbstractMockServerTest { + + static StatusRuntimeException createTransactionMutationLimitExceededException() { + Metadata.Key key = + Metadata.Key.of("grpc-status-details-bin", Metadata.BINARY_BYTE_MARSHALLER); + Help help = + Help.newBuilder() + .addLinks( + Link.newBuilder() + .setDescription("Cloud Spanner limits documentation.") + .setUrl("https://cloud.google.com/spanner/docs/limits") + .build()) + .build(); + com.google.rpc.Status status = + com.google.rpc.Status.newBuilder().addDetails(Any.pack(help)).build(); + + Metadata trailers = new Metadata(); + trailers.put(key, status.toByteArray()); + + return Status.INVALID_ARGUMENT + .withDescription("The transaction contains too many mutations.") + .asRuntimeException(trailers); + } + + @Test + public void testConnectionProperty() throws SQLException { + for (AutocommitDmlMode mode : AutocommitDmlMode.values()) { + Properties properties = new Properties(); + properties.put("autocommit_dml_mode", mode.name()); + try (Connection connection = + DriverManager.getConnection("jdbc:" + getBaseUrl(), properties)) { + assertEquals( + mode, connection.unwrap(CloudSpannerJdbcConnection.class).getAutocommitDmlMode()); + } + } + } + + @Test + public void testTransactionMutationLimitExceeded_isNotRetriedByDefault() throws SQLException { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(createTransactionMutationLimitExceededException())); + + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(true); + assertEquals( + AutocommitDmlMode.TRANSACTIONAL, + connection.unwrap(CloudSpannerJdbcConnection.class).getAutocommitDmlMode()); + + SQLException exception = + assertThrows( + SQLException.class, + () -> + connection.createStatement().executeUpdate("update test set value=1 where true")); + assertNotNull(exception.getCause()); + assertEquals( + TransactionMutationLimitExceededException.class, exception.getCause().getClass()); + TransactionMutationLimitExceededException transactionMutationLimitExceededException = + (TransactionMutationLimitExceededException) exception.getCause(); + assertEquals(0, transactionMutationLimitExceededException.getSuppressed().length); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testTransactionMutationLimitExceeded_canBeRetriedAsPDML() throws SQLException { + String sql = "update test set value=1 where true"; + com.google.cloud.spanner.Statement statement = com.google.cloud.spanner.Statement.of(sql); + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(createTransactionMutationLimitExceededException())); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.update(statement, 100000L)); + + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(true); + connection + .unwrap(CloudSpannerJdbcConnection.class) + .setAutocommitDmlMode( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC); + + long updateCount = connection.createStatement().executeUpdate(sql); + assertEquals(100000L, updateCount); + } + // Verify that the request is retried as Partitioned DML. + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + // The transactional request uses inline-begin. + ExecuteSqlRequest transactionalRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(transactionalRequest.getTransaction().getBegin().hasReadWrite()); + + // Partitioned DML uses an explicit BeginTransaction RPC. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(beginRequest.getOptions().hasPartitionedDml()); + ExecuteSqlRequest partitionedDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(partitionedDmlRequest.getTransaction().hasId()); + + // Partitioned DML transactions are not committed. + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testTransactionMutationLimitExceeded_retryAsPDMLFails() throws SQLException { + String sql = "insert into test (id, value) select -id, value from test"; + com.google.cloud.spanner.Statement statement = com.google.cloud.spanner.Statement.of(sql); + // The transactional update statement uses ExecuteSql(..). + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofException(createTransactionMutationLimitExceededException())); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.exception( + statement, + Status.INVALID_ARGUMENT + .withDescription("This statement is not supported with Partitioned DML") + .asRuntimeException())); + + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(true); + connection + .unwrap(CloudSpannerJdbcConnection.class) + .setAutocommitDmlMode( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC); + + // The connection throws TransactionMutationLimitExceededException if the retry using + // partitioned DML fails. The exception from the failed retry is returned as a suppressed + // exception of the TransactionMutationLimitExceededException. + SQLException exception = + assertThrows(SQLException.class, () -> connection.createStatement().executeUpdate(sql)); + assertNotNull(exception.getCause()); + assertEquals( + TransactionMutationLimitExceededException.class, exception.getCause().getClass()); + TransactionMutationLimitExceededException transactionMutationLimitExceededException = + (TransactionMutationLimitExceededException) exception.getCause(); + assertEquals(1, transactionMutationLimitExceededException.getSuppressed().length); + assertEquals( + SpannerException.class, + transactionMutationLimitExceededException.getSuppressed()[0].getClass()); + SpannerException spannerException = + (SpannerException) transactionMutationLimitExceededException.getSuppressed()[0]; + assertEquals(ErrorCode.INVALID_ARGUMENT, spannerException.getErrorCode()); + assertTrue( + spannerException.getMessage(), + spannerException + .getMessage() + .contains("This statement is not supported with Partitioned DML")); + } + // Verify that the request was retried as Partitioned DML. + assertEquals(2, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + // The transactional request uses inline-begin. + ExecuteSqlRequest transactionalRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(transactionalRequest.getTransaction().getBegin().hasReadWrite()); + + // Partitioned DML uses an explicit BeginTransaction RPC. + assertEquals(1, mockSpanner.countRequestsOfType(BeginTransactionRequest.class)); + BeginTransactionRequest beginRequest = + mockSpanner.getRequestsOfType(BeginTransactionRequest.class).get(0); + assertTrue(beginRequest.getOptions().hasPartitionedDml()); + ExecuteSqlRequest partitionedDmlRequest = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(1); + assertTrue(partitionedDmlRequest.getTransaction().hasId()); + + // Partitioned DML transactions are not committed. + assertEquals(0, mockSpanner.countRequestsOfType(CommitRequest.class)); + } + + @Test + public void testSqlStatements() throws SQLException { + for (Dialect dialect : Dialect.values()) { + SpannerPool.closeSpannerPool(); + mockSpanner.putStatementResult( + MockSpannerServiceImpl.StatementResult.detectDialectResult(dialect)); + String prefix = dialect == Dialect.POSTGRESQL ? "SPANNER." : ""; + + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(true); + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery(String.format("show variable %sautocommit_dml_mode", prefix))) { + assertTrue(resultSet.next()); + assertEquals( + AutocommitDmlMode.TRANSACTIONAL.name(), + resultSet.getString(String.format("%sAUTOCOMMIT_DML_MODE", prefix))); + assertFalse(resultSet.next()); + } + connection + .createStatement() + .execute( + String.format( + "set %sautocommit_dml_mode = 'transactional_with_fallback_to_partitioned_non_atomic'", + prefix)); + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery(String.format("show variable %sautocommit_dml_mode", prefix))) { + assertTrue(resultSet.next()); + assertEquals( + AutocommitDmlMode.TRANSACTIONAL_WITH_FALLBACK_TO_PARTITIONED_NON_ATOMIC.name(), + resultSet.getString(String.format("%sAUTOCOMMIT_DML_MODE", prefix))); + assertFalse(resultSet.next()); + } + } + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/IsolationLevelConverterTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/IsolationLevelConverterTest.java new file mode 100644 index 000000000000..2be9e5ea472c --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/IsolationLevelConverterTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.IsolationLevelConverter.convertToJdbc; +import static com.google.cloud.spanner.jdbc.IsolationLevelConverter.convertToSpanner; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; + +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class IsolationLevelConverterTest { + + @Test + public void testConvertToSpanner() throws SQLException { + assertEquals( + IsolationLevel.SERIALIZABLE, convertToSpanner(Connection.TRANSACTION_SERIALIZABLE)); + assertEquals( + IsolationLevel.REPEATABLE_READ, convertToSpanner(Connection.TRANSACTION_REPEATABLE_READ)); + + assertThrows( + SQLFeatureNotSupportedException.class, + () -> convertToSpanner(Connection.TRANSACTION_READ_COMMITTED)); + assertThrows( + SQLFeatureNotSupportedException.class, + () -> convertToSpanner(Connection.TRANSACTION_READ_UNCOMMITTED)); + assertThrows( + SQLFeatureNotSupportedException.class, () -> convertToSpanner(Connection.TRANSACTION_NONE)); + + assertThrows(IllegalArgumentException.class, () -> convertToSpanner(-1)); + } + + @Test + public void testConvertToJdbc() { + // There is no 'unspecified' isolation level in JDBC, so we convert this to the default + // SERIALIZABLE isolation level in Spanner. + assertEquals( + Connection.TRANSACTION_SERIALIZABLE, + convertToJdbc(IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED)); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, convertToJdbc(IsolationLevel.SERIALIZABLE)); + assertEquals( + Connection.TRANSACTION_REPEATABLE_READ, convertToJdbc(IsolationLevel.REPEATABLE_READ)); + + assertThrows(IllegalArgumentException.class, () -> convertToJdbc(IsolationLevel.UNRECOGNIZED)); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcAbortedTransactionTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcAbortedTransactionTest.java new file mode 100644 index 000000000000..b7253df82741 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcAbortedTransactionTest.java @@ -0,0 +1,373 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.fail; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.MockDatabaseAdminImpl; +import com.google.cloud.spanner.admin.instance.v1.MockInstanceAdminImpl; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.cloud.spanner.connection.TransactionRetryListener; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcAbortedDueToConcurrentModificationException; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcAbortedException; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class JdbcAbortedTransactionTest { + private static final class TransactionRetryCounter implements TransactionRetryListener { + private int retriesFinished; + + @Override + public void retryStarting(Timestamp transactionStarted, long transactionId, int retryAttempt) {} + + @Override + public void retryFinished( + Timestamp transactionStarted, long transactionId, int retryAttempt, RetryResult result) { + retriesFinished++; + } + } + + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + private static final Statement SELECT_RANDOM = Statement.of("SELECT * FROM RANDOM"); + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final int UPDATE_COUNT = 1; + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + + @Parameter public boolean retryAbortsInternally; + + @Parameters(name = "retryAbortsInternally = {0}") + public static Collection data() { + List params = new ArrayList<>(); + params.add(new Object[] {Boolean.TRUE}); + params.add(new Object[] {Boolean.FALSE}); + return params; + } + + @BeforeClass + public static void startStaticServer() throws IOException { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + MockInstanceAdminImpl mockInstanceAdmin = new MockInstanceAdminImpl(); + MockDatabaseAdminImpl mockDatabaseAdmin = new MockDatabaseAdminImpl(); + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .addService(mockInstanceAdmin) + .addService(mockDatabaseAdmin) + .build() + .start(); + } + + @AfterClass + public static void stopServer() throws Exception { + SpannerPool.closeSpannerPool(); + server.shutdown(); + server.awaitTermination(); + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true;retryAbortsInternally=%s", + server.getPort(), "proj", "inst", "db", Boolean.toString(retryAbortsInternally)); + } + + private Connection createConnection() throws SQLException { + Connection connection = DriverManager.getConnection(createUrl()); + CloudSpannerJdbcConnection cs = connection.unwrap(CloudSpannerJdbcConnection.class); + cs.addTransactionRetryListener(new TransactionRetryCounter()); + return connection; + } + + private int getRetryCount(Connection connection) throws SQLException { + return ((TransactionRetryCounter) + connection + .unwrap(CloudSpannerJdbcConnection.class) + .getTransactionRetryListenersFromConnection() + .next()) + .retriesFinished; + } + + @Test + public void testAutocommitUpdateAborted() throws SQLException { + // Updates in autocommit are always automatically retried. + // These retries are not picked up by the transaction retry listener, as that is only done for + // actual JDBC transactions that are retried. + try (java.sql.Connection connection = createConnection()) { + mockSpanner.abortNextStatement(); + int updateCount = connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql()); + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + } + } + + @Test + public void testTransactionalUpdateAborted() throws SQLException { + // Updates in transactional mode are automatically retried by default, but this can be switched + // off. + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + mockSpanner.abortNextStatement(); + int updateCount = connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql()); + if (!retryAbortsInternally) { + fail("missing expected exception"); + } + assertThat(updateCount).isEqualTo(UPDATE_COUNT); + assertThat(getRetryCount(connection)).isEqualTo(1); + } catch (JdbcAbortedException e) { + assertThat(retryAbortsInternally).isFalse(); + } + } + + @Test + public void testAutocommitBatchUpdateAborted() throws SQLException { + try (java.sql.Connection connection = createConnection()) { + mockSpanner.abortNextStatement(); + try (java.sql.Statement statement = connection.createStatement()) { + statement.addBatch(UPDATE_STATEMENT.getSql()); + statement.addBatch(UPDATE_STATEMENT.getSql()); + int[] updateCounts = statement.executeBatch(); + assertThat(updateCounts).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + } + } + } + + @Test + public void testTransactionalBatchUpdateAborted() throws SQLException { + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + mockSpanner.abortNextStatement(); + try (java.sql.Statement statement = connection.createStatement()) { + statement.addBatch(UPDATE_STATEMENT.getSql()); + statement.addBatch(UPDATE_STATEMENT.getSql()); + int[] updateCounts = statement.executeBatch(); + if (!retryAbortsInternally) { + fail("missing expected exception"); + } + assertThat(updateCounts).asList().containsExactly(UPDATE_COUNT, UPDATE_COUNT); + assertThat(getRetryCount(connection)).isEqualTo(1); + } catch (JdbcAbortedException e) { + assertThat(retryAbortsInternally).isFalse(); + } + } + } + + @Test + public void testAutocommitSelectAborted() throws SQLException { + // Selects in autocommit are executed using a singleUse read-only transaction and cannot abort. + try (java.sql.Connection connection = createConnection()) { + mockSpanner.abortNextStatement(); + try (ResultSet rs = connection.createStatement().executeQuery(SELECT1.getSql())) { + while (rs.next()) { + assertThat(rs.getLong(1)).isEqualTo(1L); + } + } + } + } + + @Test + public void testTransactionalSelectAborted() throws SQLException { + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + mockSpanner.abortNextStatement(); + try (ResultSet rs = connection.createStatement().executeQuery(SELECT1.getSql())) { + while (rs.next()) { + if (!retryAbortsInternally) { + fail("missing expected exception"); + } + assertThat(rs.getLong(1)).isEqualTo(1L); + } + } + assertThat(getRetryCount(connection)).isEqualTo(1); + } catch (JdbcAbortedException e) { + assertThat(retryAbortsInternally).isFalse(); + } + } + + @Test + public void testTransactionalUpdateWithConcurrentModificationsAborted() throws SQLException { + // As the transaction does a random select, the retry will always see different data than the + // original attempt. + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + // Set a random answer. + mockSpanner.putStatementResult( + StatementResult.query(SELECT_RANDOM, new RandomResultSetGenerator(25).generate())); + try (ResultSet rs = connection.createStatement().executeQuery(SELECT_RANDOM.getSql())) { + //noinspection StatementWithEmptyBody + while (rs.next()) {} + } + // Set a new random answer that will be returned during the retry. + mockSpanner.putStatementResult( + StatementResult.query(SELECT_RANDOM, new RandomResultSetGenerator(25).generate())); + // Abort all transactions (including the current one). + mockSpanner.abortNextStatement(); + // This will abort and start an internal retry. + connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql()); + fail("missing expected aborted exception"); + } catch (JdbcAbortedDueToConcurrentModificationException e) { + assertThat(retryAbortsInternally).isTrue(); + } catch (JdbcAbortedException e) { + assertThat(retryAbortsInternally).isFalse(); + } + } + + @Test + public void testTransactionalUpdateWithErrorOnOriginalAndRetry() throws SQLException { + final String sql = "UPDATE SOMETHING SET OTHER=1"; + mockSpanner.putStatementResult( + StatementResult.exception( + Statement.of(sql), + Status.INVALID_ARGUMENT.withDescription("test").asRuntimeException())); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + try (ResultSet rs = connection.createStatement().executeQuery(SELECT1.getSql())) { + while (rs.next()) { + assertThat(rs.getLong(1)).isEqualTo(1L); + } + } + try { + connection.createStatement().executeUpdate(sql); + fail("missing 'test' exception"); + } catch (SQLException e) { + // ignore + } + mockSpanner.abortNextStatement(); + connection.commit(); + if (!retryAbortsInternally) { + fail("missing expected exception"); + } + } catch (JdbcAbortedException e) { + assertThat(retryAbortsInternally).isFalse(); + } + } + + @Test + public void testTransactionalUpdateWithErrorOnRetryAndNotOnOriginal() throws SQLException { + final String sql = "UPDATE SOMETHING SET OTHER=1"; + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + // Set a normal response to the update statement. + mockSpanner.putStatementResult(StatementResult.update(Statement.of(sql), 1L)); + connection.createStatement().executeUpdate(sql); + // Set an error as response for the same update statement that will be used during the retry. + // This will cause the retry to fail. + mockSpanner.putStatementResult( + StatementResult.exception( + Statement.of(sql), + Status.INVALID_ARGUMENT.withDescription("test").asRuntimeException())); + mockSpanner.abortNextStatement(); + connection.commit(); + fail("missing expected aborted exception"); + } catch (JdbcAbortedDueToConcurrentModificationException e) { + assertThat(retryAbortsInternally).isTrue(); + assertThat(e.getDatabaseErrorDuringRetry().getErrorCode()) + .isEqualTo(ErrorCode.INVALID_ARGUMENT); + assertThat(e.getDatabaseErrorDuringRetry().getMessage()).endsWith("test"); + } catch (JdbcAbortedException e) { + assertThat(retryAbortsInternally).isFalse(); + } + } + + @Test + public void testTransactionalUpdateWithErrorOnOriginalAndNotOnRetry() throws SQLException { + final String sql = "UPDATE SOMETHING SET OTHER=1"; + mockSpanner.putStatementResult( + StatementResult.exception( + Statement.of(sql), + Status.INVALID_ARGUMENT.withDescription("test").asRuntimeException())); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + try (ResultSet rs = connection.createStatement().executeQuery(SELECT1.getSql())) { + while (rs.next()) { + assertThat(rs.getLong(1)).isEqualTo(1L); + } + } + try { + connection.createStatement().executeUpdate(sql); + fail("missing 'test' exception"); + } catch (SQLException e) { + // ignore + } + // Set the update statement to return a result next time (i.e. during retry). + mockSpanner.putStatementResult(StatementResult.update(Statement.of(sql), 1L)); + mockSpanner.abortNextStatement(); + connection.commit(); + fail("missing expected aborted exception"); + } catch (JdbcAbortedDueToConcurrentModificationException e) { + assertThat(retryAbortsInternally).isTrue(); + assertThat(e.getDatabaseErrorDuringRetry()).isNull(); + } catch (JdbcAbortedException e) { + assertThat(retryAbortsInternally).isFalse(); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcArrayTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcArrayTest.java new file mode 100644 index 000000000000..44dd1a78f863 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcArrayTest.java @@ -0,0 +1,462 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toSqlDate; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type.Code; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.cloud.spanner.jdbc.it.SingerProto.Genre; +import com.google.cloud.spanner.jdbc.it.SingerProto.SingerInfo; +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Arrays; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcArrayTest { + + @Test + public void testCreateArrayTypeName() throws SQLException { + // Note that JDBC array indices start at 1. + JdbcArray array; + array = JdbcArray.createArray("BOOL", new Boolean[] {true, false, true}); + assertThat(array.getBaseType()).isEqualTo(Types.BOOLEAN); + assertThat(((Boolean[]) array.getArray(1, 1))[0]).isEqualTo(Boolean.TRUE); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getBoolean(2)).isEqualTo(true); + assertThat(rs.next()).isTrue(); + assertThat(rs.getBoolean(2)).isEqualTo(false); + assertThat(rs.next()).isTrue(); + assertThat(rs.getBoolean(2)).isEqualTo(true); + assertThat(rs.next()).isFalse(); + } + + array = JdbcArray.createArray("BYTES", new byte[][] {new byte[] {1, 2}, new byte[] {3, 4}}); + assertThat(array.getBaseType()).isEqualTo(Types.BINARY); + assertThat(((byte[][]) array.getArray(1, 1))[0][1]).isEqualTo((byte) 2); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getBytes(2)).isEqualTo(new byte[] {1, 2}); + assertThat(rs.next()).isTrue(); + assertThat(rs.getBytes(2)).isEqualTo(new byte[] {3, 4}); + assertThat(rs.next()).isFalse(); + } + + array = + JdbcArray.createArray( + "DATE", + new Date[] { + toSqlDate(com.google.cloud.Date.fromYearMonthDay(2021, 1, 18)), + toSqlDate(com.google.cloud.Date.fromYearMonthDay(2000, 2, 29)), + toSqlDate(com.google.cloud.Date.fromYearMonthDay(2019, 8, 31)) + }); + assertThat(array.getBaseType()).isEqualTo(Types.DATE); + assertThat(((Date[]) array.getArray(1, 1))[0]) + .isEqualTo(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2021, 1, 18))); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getDate(2)) + .isEqualTo(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2021, 1, 18))); + assertThat(rs.next()).isTrue(); + assertThat(rs.getDate(2)) + .isEqualTo(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2000, 2, 29))); + assertThat(rs.next()).isTrue(); + assertThat(rs.getDate(2)) + .isEqualTo(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2019, 8, 31))); + assertThat(rs.next()).isFalse(); + } + + array = + JdbcArray.createArray( + "FLOAT32", new Float[] {1.1f, 2.2f, Double.valueOf(Math.PI).floatValue()}); + assertEquals(Types.REAL, array.getBaseType()); + assertThat(((Float[]) array.getArray(1, 3))[2]).isEqualTo(Double.valueOf(Math.PI).floatValue()); + assertEquals(Double.valueOf(Math.PI).floatValue(), ((Float[]) array.getArray(1, 3))[2], 0.0f); + try (ResultSet resultSet = array.getResultSet()) { + assertTrue(resultSet.next()); + // Column index 2 of a JDBC array is the value. + // Column index 1 of a JDBC array is the index. + assertEquals(1.1f, resultSet.getFloat(2), 0.0f); + assertTrue(resultSet.next()); + assertEquals(2.2f, resultSet.getFloat(2), 0.0f); + assertTrue(resultSet.next()); + assertEquals(Double.valueOf(Math.PI).floatValue(), resultSet.getFloat(2), 0.0f); + assertFalse(resultSet.next()); + } + + array = JdbcArray.createArray("FLOAT64", new Double[] {1.1D, 2.2D, Math.PI}); + assertThat(array.getBaseType()).isEqualTo(Types.DOUBLE); + assertThat(((Double[]) array.getArray(1, 3))[2]).isEqualTo(Math.PI); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getDouble(2)).isEqualTo(1.1D); + assertThat(rs.next()).isTrue(); + assertThat(rs.getDouble(2)).isEqualTo(2.2D); + assertThat(rs.next()).isTrue(); + assertThat(rs.getDouble(2)).isEqualTo(Math.PI); + assertThat(rs.next()).isFalse(); + } + + array = JdbcArray.createArray("INT64", new Long[] {1L, 2L, 3L}); + assertThat(array.getBaseType()).isEqualTo(Types.BIGINT); + assertThat(((Long[]) array.getArray(1, 1))[0]).isEqualTo(1L); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(2)).isEqualTo(1L); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(2)).isEqualTo(2L); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(2)).isEqualTo(3L); + assertThat(rs.next()).isFalse(); + } + + // Test that Byte[] arrays are automatically widened to Long[] for INT64 type + Long[] data; + array = JdbcArray.createArray("INT64", new Byte[] {1, 2, 3, null, Byte.MAX_VALUE}); + assertThat(array.getBaseType()).isEqualTo(Types.BIGINT); + // Data should be stored as Long[] + assertThat(array.getArray()).isInstanceOf(Long[].class); + data = (Long[]) array.getArray(); + assertThat(data[0]).isEqualTo(1L); + assertThat(data[1]).isEqualTo(2L); + assertThat(data[2]).isEqualTo(3L); + assertThat(data[3]).isNull(); + assertThat(data[4]).isEqualTo((long) Byte.MAX_VALUE); + + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getByte(2)).isEqualTo((byte) 1); + assertThat(rs.next()).isTrue(); + assertThat(rs.getByte(2)).isEqualTo((byte) 2); + assertThat(rs.next()).isTrue(); + assertThat(rs.getByte(2)).isEqualTo((byte) 3); + assertThat(rs.next()).isTrue(); + assertThat(rs.getByte(2)).isEqualTo((byte) 0); + assertTrue(rs.wasNull()); + assertThat(rs.next()).isTrue(); + assertThat(rs.getByte(2)).isEqualTo(Byte.MAX_VALUE); + assertThat(rs.next()).isFalse(); + } + + // Test that Short[] arrays are automatically widened to Long[] for INT64 type + array = JdbcArray.createArray("INT64", new Short[] {100, 200, null, Short.MAX_VALUE}); + assertThat(array.getBaseType()).isEqualTo(Types.BIGINT); + // Data should be stored as Long[] + assertThat(array.getArray()).isInstanceOf(Long[].class); + data = (Long[]) array.getArray(); + assertThat(data[0]).isEqualTo(100L); + assertThat(data[1]).isEqualTo(200L); + assertThat(data[2]).isNull(); + assertThat(data[3]).isEqualTo((long) Short.MAX_VALUE); + + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getShort(2)).isEqualTo((short) 100); + assertThat(rs.next()).isTrue(); + assertThat(rs.getShort(2)).isEqualTo((short) 200); + assertThat(rs.next()).isTrue(); + assertThat(rs.getShort(2)).isEqualTo((short) 0); + assertTrue(rs.wasNull()); + assertThat(rs.next()).isTrue(); + assertThat(rs.getShort(2)).isEqualTo(Short.MAX_VALUE); + assertThat(rs.next()).isFalse(); + } + + // Test that Integer[] arrays are automatically widened to Long[] for INT64 type + array = JdbcArray.createArray("INT64", new Integer[] {1000, 2000, null, Integer.MAX_VALUE}); + assertThat(array.getBaseType()).isEqualTo(Types.BIGINT); + // Data should be stored as Long[] + assertThat(array.getArray()).isInstanceOf(Long[].class); + data = (Long[]) array.getArray(); + assertThat(data[0]).isEqualTo(1000L); + assertThat(data[1]).isEqualTo(2000L); + assertThat(data[2]).isNull(); + assertThat(data[3]).isEqualTo((long) Integer.MAX_VALUE); + + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getInt(2)).isEqualTo(1000); + assertThat(rs.next()).isTrue(); + assertThat(rs.getInt(2)).isEqualTo(2000); + assertThat(rs.next()).isTrue(); + assertThat(rs.getInt(2)).isEqualTo(0); + assertTrue(rs.wasNull()); + assertThat(rs.next()).isTrue(); + assertThat(rs.getInt(2)).isEqualTo(Integer.MAX_VALUE); + assertThat(rs.next()).isFalse(); + } + + array = + JdbcArray.createArray("NUMERIC", new BigDecimal[] {BigDecimal.ONE, null, BigDecimal.TEN}); + assertThat(array.getBaseType()).isEqualTo(Types.NUMERIC); + assertThat(((BigDecimal[]) array.getArray(1, 1))[0]).isEqualTo(BigDecimal.ONE); + assertThat(((BigDecimal[]) array.getArray(2, 1))[0]).isNull(); + assertThat(((BigDecimal[]) array.getArray(3, 1))[0]).isEqualTo(BigDecimal.TEN); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getBigDecimal(2)).isEqualTo(BigDecimal.ONE); + assertThat(rs.next()).isTrue(); + assertThat(rs.getBigDecimal(2)).isNull(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getBigDecimal(2)).isEqualTo(BigDecimal.TEN); + assertThat(rs.next()).isFalse(); + } + + array = JdbcArray.createArray("STRING", new String[] {"foo", "bar", "baz"}); + assertThat(array.getBaseType()).isEqualTo(Types.NVARCHAR); + assertThat(((String[]) array.getArray(1, 1))[0]).isEqualTo("foo"); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString(2)).isEqualTo("foo"); + assertThat(rs.next()).isTrue(); + assertThat(rs.getString(2)).isEqualTo("bar"); + assertThat(rs.next()).isTrue(); + assertThat(rs.getString(2)).isEqualTo("baz"); + assertThat(rs.next()).isFalse(); + } + + array = + JdbcArray.createArray( + "JSON", + new String[] {"{}", "[]", null, "{\"name\":\"John\", \"age\":30, \"car\":null}"}); + assertThat(array.getBaseType()).isEqualTo(JsonType.VENDOR_TYPE_NUMBER); + assertThat(((String[]) array.getArray(1, 1))[0]).isEqualTo("{}"); + try (ResultSet rs = array.getResultSet()) { + assertTrue(rs.next()); + assertEquals("{}", rs.getString(2)); + assertTrue(rs.next()); + assertEquals("[]", rs.getString(2)); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + assertTrue(rs.wasNull()); + assertTrue(rs.next()); + assertEquals("{\"name\":\"John\", \"age\":30, \"car\":null}", rs.getString(2)); + assertFalse(rs.next()); + } + + array = + JdbcArray.createArray( + "JSONB", + new String[] {"{}", "[]", null, "{\"name\":\"John\", \"age\":30, \"car\":null}"}); + assertThat(array.getBaseType()).isEqualTo(PgJsonbType.VENDOR_TYPE_NUMBER); + assertThat(((String[]) array.getArray(1, 1))[0]).isEqualTo("{}"); + try (ResultSet rs = array.getResultSet()) { + assertTrue(rs.next()); + assertEquals("{}", rs.getString(2)); + assertTrue(rs.next()); + assertEquals("[]", rs.getString(2)); + assertTrue(rs.next()); + assertNull(rs.getString(2)); + assertTrue(rs.wasNull()); + assertTrue(rs.next()); + assertEquals("{\"name\":\"John\", \"age\":30, \"car\":null}", rs.getString(2)); + assertFalse(rs.next()); + } + + array = + JdbcArray.createArray( + "TIMESTAMP", + new Timestamp[] {new Timestamp(1L), new Timestamp(100L), new Timestamp(1000L)}); + assertThat(array.getBaseType()).isEqualTo(Types.TIMESTAMP); + assertThat(((Timestamp[]) array.getArray(1, 1))[0]).isEqualTo(new Timestamp(1L)); + try (ResultSet rs = array.getResultSet()) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getTimestamp(2)).isEqualTo(new Timestamp(1L)); + assertThat(rs.next()).isTrue(); + assertThat(rs.getTimestamp(2)).isEqualTo(new Timestamp(100L)); + assertThat(rs.next()).isTrue(); + assertThat(rs.getTimestamp(2)).isEqualTo(new Timestamp(1000L)); + assertThat(rs.next()).isFalse(); + } + + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + array = + JdbcArray.createArray( + "PROTO", new SingerInfo[] {singerInfo, SingerInfo.getDefaultInstance(), null}); + assertEquals(ProtoMessageType.VENDOR_TYPE_NUMBER, array.getBaseType()); + assertEquals(singerInfo, ((SingerInfo[]) array.getArray(1, 1))[0]); + try (ResultSet rs = array.getResultSet()) { + assertTrue(rs.next()); + assertEquals(singerInfo, rs.getObject(2, SingerInfo.class)); + assertArrayEquals(singerInfo.toByteArray(), rs.getBytes(2)); + assertTrue(rs.next()); + assertEquals(SingerInfo.getDefaultInstance(), rs.getObject(2, SingerInfo.class)); + assertArrayEquals(SingerInfo.getDefaultInstance().toByteArray(), rs.getBytes(2)); + assertTrue(rs.next()); + assertNull(rs.getObject(2, SingerInfo.class)); + assertNull(rs.getBytes(2)); + assertFalse(rs.next()); + } + + array = JdbcArray.createArray("ENUM", new Genre[] {Genre.ROCK, Genre.FOLK, null}); + assertEquals(ProtoEnumType.VENDOR_TYPE_NUMBER, array.getBaseType()); + assertEquals(Genre.ROCK, ((Genre[]) array.getArray(1, 1))[0]); + try (ResultSet rs = array.getResultSet()) { + assertTrue(rs.next()); + assertEquals(Genre.ROCK, rs.getObject(2, Genre.class)); + assertEquals(Genre.ROCK.getNumber(), rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(Genre.FOLK, rs.getObject(2, Genre.class)); + assertEquals(Genre.FOLK.getNumber(), rs.getInt(2)); + assertTrue(rs.next()); + assertNull(rs.getObject(2, Genre.class)); + assertEquals(0, rs.getInt(2)); + assertFalse(rs.next()); + } + + array = + JdbcArray.createArray( + JdbcDataType.getType(Code.PROTO), + Arrays.asList( + singerInfo.toByteArray(), SingerInfo.getDefaultInstance().toByteArray(), null)); + assertEquals(ProtoMessageType.VENDOR_TYPE_NUMBER, array.getBaseType()); + assertArrayEquals(singerInfo.toByteArray(), ((byte[][]) array.getArray(1, 1))[0]); + try (ResultSet rs = array.getResultSet()) { + assertTrue(rs.next()); + assertArrayEquals(singerInfo.toByteArray(), rs.getBytes(2)); + assertTrue(rs.next()); + assertArrayEquals(SingerInfo.getDefaultInstance().toByteArray(), rs.getBytes(2)); + assertTrue(rs.next()); + assertNull(rs.getBytes(2)); + assertFalse(rs.next()); + } + + array = + JdbcArray.createArray( + JdbcDataType.getType(Code.ENUM), + Arrays.asList((long) Genre.ROCK.getNumber(), (long) Genre.FOLK.getNumber(), null)); + assertEquals(ProtoEnumType.VENDOR_TYPE_NUMBER, array.getBaseType()); + assertEquals(Genre.ROCK.getNumber(), (long) ((Long[]) array.getArray(1, 1))[0]); + try (ResultSet rs = array.getResultSet()) { + assertTrue(rs.next()); + assertEquals(Genre.ROCK.getNumber(), rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(Genre.FOLK.getNumber(), rs.getInt(2)); + assertTrue(rs.next()); + assertEquals(0, rs.getInt(2)); + assertFalse(rs.next()); + } + } + + @Test + public void testCreateArrayOfArray() { + try { + JdbcArray.createArray("ARRAY", new String[][] {{}}); + fail("missing expected exception"); + } catch (SQLException e) { + assertThat((Exception) e).isInstanceOf(JdbcSqlException.class); + JdbcSqlException jse = (JdbcSqlException) e; + assertThat(jse.getErrorCode()) + .isEqualTo(ErrorCode.INVALID_ARGUMENT.getGrpcStatusCode().value()); + } + } + + @Test + public void testCreateArrayOfStruct() throws SQLException { + JdbcArray array = + JdbcArray.createArray( + "STRUCT", + new Struct[] {Struct.newBuilder().set("f1").to("v1").set("f2").to(1L).build(), null}); + assertEquals(Types.STRUCT, array.getBaseType()); + assertThat((Struct[]) array.getArray()) + .asList() + .containsExactly(Struct.newBuilder().set("f1").to("v1").set("f2").to(1L).build(), null) + .inOrder(); + assertThrows(SQLFeatureNotSupportedException.class, array::getResultSet); + } + + @Test + public void testGetResultSetMetadata() throws SQLException { + JdbcArray array = JdbcArray.createArray("STRING", new String[] {"foo", "bar", "baz"}); + try (ResultSet rs = array.getResultSet()) { + ResultSetMetaData metadata = rs.getMetaData(); + assertThat(metadata.getColumnCount()).isEqualTo(2); + assertThat(metadata.getColumnType(1)).isEqualTo(Types.BIGINT); + assertThat(metadata.getColumnType(2)).isEqualTo(Types.NVARCHAR); + assertThat(metadata.getColumnName(1)).isEqualTo("INDEX"); + assertThat(metadata.getColumnName(2)).isEqualTo("VALUE"); + } + } + + @Test + public void testGetResultSetWithIndex() throws SQLException { + JdbcArray array = JdbcArray.createArray("STRING", new String[] {"foo", "bar", "baz"}); + try (ResultSet rs = array.getResultSet(2L, 1)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong("INDEX")).isEqualTo(2L); + assertThat(rs.getString("VALUE")).isEqualTo("bar"); + assertThat(rs.next()).isFalse(); + } + + try (ResultSet rs = array.getResultSet(1L, 5)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString(2)).isEqualTo("foo"); + assertThat(rs.next()).isTrue(); + assertThat(rs.getString(2)).isEqualTo("bar"); + assertThat(rs.next()).isTrue(); + assertThat(rs.getString(2)).isEqualTo("baz"); + assertThat(rs.next()).isFalse(); + } + + try (ResultSet rs = array.getResultSet(1L, 0)) { + assertThat(rs.next()).isFalse(); + } + } + + @Test + public void testGetResultSetWithInvalidIndex() throws SQLException { + JdbcArray array = JdbcArray.createArray("STRING", new String[] {"foo", "bar", "baz"}); + try (ResultSet rs = array.getResultSet(0L, 1)) { + fail("missing expected exception"); + } catch (JdbcSqlExceptionImpl e) { + assertThat(e.getErrorCode()) + .isEqualTo(ErrorCode.INVALID_ARGUMENT.getGrpcStatusCode().value()); + } + } + + @Test + public void testGetResultSetWithInvalidCount() throws SQLException { + JdbcArray array = JdbcArray.createArray("STRING", new String[] {"foo", "bar", "baz"}); + try (ResultSet rs = array.getResultSet(1L, -1)) { + fail("missing expected exception"); + } catch (JdbcSqlExceptionImpl e) { + assertThat(e.getErrorCode()) + .isEqualTo(ErrorCode.INVALID_ARGUMENT.getGrpcStatusCode().value()); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcBlobTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcBlobTest.java new file mode 100644 index 000000000000..e7be0ecee2e0 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcBlobTest.java @@ -0,0 +1,336 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.rpc.Code; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.sql.SQLException; +import java.util.Arrays; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcBlobTest { + + private static final class PosLength { + private final long pos; + private final int len; + + private static PosLength of(long pos, int len) { + return new PosLength(pos, len); + } + + private PosLength(long pos, int len) { + this.pos = pos; + this.len = len; + } + + @Override + public String toString() { + return "pos: " + pos + ", len: " + len; + } + } + + private static final class PosBytes { + private final long pos; + private final byte[] bytes; + + private static PosBytes of(long pos, byte[] bytes) { + return new PosBytes(pos, bytes); + } + + private PosBytes(long pos, byte[] bytes) { + this.pos = pos; + this.bytes = bytes; + } + + @Override + public String toString() { + return "pos: " + pos + ", bytes: " + Arrays.toString(bytes); + } + } + + @Test + public void testLength() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + assertEquals(0L, blob.length()); + blob.setBytes(1L, new byte[] {1, 2, 3}); + assertEquals(3L, blob.length()); + blob.free(); + assertEquals(0L, blob.length()); + } + + @Test + public void testGetBytes() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, blob.getBytes(1L, 5)); + assertArrayEquals(new byte[] {2, 3, 4, 5}, blob.getBytes(2L, 5)); + assertArrayEquals(new byte[] {2, 3, 4}, blob.getBytes(2L, 3)); + assertArrayEquals(new byte[] {}, blob.getBytes(1L, 0)); + + // test invalid parameters + PosLength[] params = + new PosLength[] {PosLength.of(0L, 4), PosLength.of(-1L, 4), PosLength.of(1L, -1)}; + for (PosLength param : params) { + SQLException sqlException = + assertThrows(SQLException.class, () -> blob.getBytes(param.pos, param.len)); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + + @Test + public void testGetBinaryStream() throws SQLException, IOException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + byte[] buf = new byte[5]; + try (InputStream is = blob.getBinaryStream()) { + int b; + int index = 0; + while ((b = is.read()) > -1) { + buf[index] = (byte) b; + index++; + } + } + assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, buf); + + buf = new byte[10]; + try (InputStream is = blob.getBinaryStream()) { + assertEquals(5, is.read(buf)); + assertEquals(-1, is.read()); + } + assertArrayEquals(new byte[] {1, 2, 3, 4, 5, 0, 0, 0, 0, 0}, buf); + } + + @Test + public void testPosition() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + assertEquals(1L, blob.position(new byte[] {1}, 1L)); + assertEquals(1L, blob.position(new byte[] {1, 2}, 1L)); + assertEquals(2L, blob.position(new byte[] {2}, 1L)); + // note that the spec says that the method should return the position within the BLOB where the + // pattern can be found, so it's not relative to the starting position. + assertEquals(2L, blob.position(new byte[] {2}, 2L)); + assertEquals(1L, blob.position(new byte[] {1, 2, 3, 4, 5}, 1L)); + assertEquals(-1L, blob.position(new byte[] {1, 2, 3, 4, 5, 6}, 1L)); + assertEquals(-1L, blob.position(new byte[] {1, 2, 3, 4, 5}, 2L)); + assertEquals(-1L, blob.position(new byte[] {2}, 3L)); + assertEquals(-1L, blob.position(new byte[] {1}, 6L)); + + // test invalid parameters + PosBytes[] params = + new PosBytes[] { + PosBytes.of(0L, new byte[] {}), PosBytes.of(-1L, new byte[] {}), PosBytes.of(1L, null) + }; + for (PosBytes param : params) { + SQLException sqlException = + assertThrows(SQLException.class, () -> blob.position(param.bytes, param.pos)); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + + @Test + public void testPositionBlob() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + + assertEquals(1L, blob.position(createBlob((byte) 1), 1L)); + assertEquals(1L, blob.position(createBlob((byte) 1, (byte) 2), 1L)); + assertEquals(2L, blob.position(createBlob((byte) 2), 1L)); + // note that the spec says that the method should return the position within the BLOB where the + // pattern can be found, so it's not relative to the starting position. + assertEquals(2L, blob.position(createBlob((byte) 2), 2L)); + assertEquals(1L, blob.position(createBlob(new byte[] {1, 2, 3, 4, 5}), 1L)); + assertEquals(-1L, blob.position(createBlob(new byte[] {1, 2, 3, 4, 5, 6}), 1L)); + assertEquals(-1L, blob.position(createBlob(new byte[] {1, 2, 3, 4, 5}), 2L)); + assertEquals(-1L, blob.position(createBlob(new byte[] {2}), 3L)); + assertEquals(-1L, blob.position(createBlob(new byte[] {1}), 6L)); + + // test invalid parameters + PosBytes[] params = + new PosBytes[] { + PosBytes.of(0L, new byte[] {}), PosBytes.of(-1L, new byte[] {}), PosBytes.of(1L, null) + }; + for (PosBytes param : params) { + SQLException sqlException = + assertThrows(SQLException.class, () -> blob.position(createBlob(param.bytes), param.pos)); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + + private JdbcBlob createBlob(byte... bytes) throws SQLException { + if (bytes == null) { + return null; + } + JdbcBlob res = new JdbcBlob(); + res.setBytes(1L, bytes); + return res; + } + + @Test + public void testSetBytes() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3}); + assertArrayEquals(new byte[] {1, 2, 3}, blob.getBytes(1L, 10)); + blob.setBytes(2L, new byte[] {1}); + assertArrayEquals(new byte[] {1, 1, 3}, blob.getBytes(1L, 10)); + blob.setBytes(4L, new byte[] {4}); + assertArrayEquals(new byte[] {1, 1, 3, 4}, blob.getBytes(1L, 10)); + blob.setBytes(8L, new byte[] {8}); + assertArrayEquals(new byte[] {1, 1, 3, 4, 0, 0, 0, 8}, blob.getBytes(1L, 10)); + } + + @Test + public void testSetBytesOffsetLength() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(4L, new byte[] {1, 2, 3}, 0, 3); + assertArrayEquals(new byte[] {0, 0, 0, 1, 2, 3}, blob.getBytes(1L, 10)); + blob.free(); + blob.setBytes(4L, new byte[] {1, 2, 3}, 1, 3); + assertArrayEquals(new byte[] {0, 0, 0, 2, 3}, blob.getBytes(1L, 10)); + blob.free(); + blob.setBytes(4L, new byte[] {1, 2, 3}, 3, 3); + assertArrayEquals(new byte[] {0, 0, 0}, blob.getBytes(1L, 10)); + blob.free(); + blob.setBytes(4L, new byte[] {1, 2, 3}, 4, 3); + assertArrayEquals(new byte[] {0, 0, 0}, blob.getBytes(1L, 10)); + blob.setBytes(2L, new byte[] {1, 2, 3}, 0, 10); + assertArrayEquals(new byte[] {0, 1, 2, 3}, blob.getBytes(1L, 10)); + blob.setBytes(3L, new byte[] {1, 2, 3}, 2, 10); + assertArrayEquals(new byte[] {0, 1, 3, 3}, blob.getBytes(1L, 10)); + blob.setBytes(10L, new byte[] {1, 2, 3}, 2, 10); + assertArrayEquals(new byte[] {0, 1, 3, 3, 0, 0, 0, 0, 0, 3}, blob.getBytes(1L, 20)); + } + + @Test + public void testSetBinaryStream() throws SQLException, IOException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + try (OutputStream os = blob.setBinaryStream(1L)) { + os.write(6); + // no flush yet, so it should be unchanged + assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, blob.getBytes(1L, 20)); + os.flush(); + assertArrayEquals(new byte[] {6, 2, 3, 4, 5}, blob.getBytes(1L, 20)); + os.write(7); + } + // closing the stream should also flush the changes + assertArrayEquals(new byte[] {6, 7, 3, 4, 5}, blob.getBytes(1L, 20)); + + // test writing beyond the end of the stream + try (OutputStream os = blob.setBinaryStream(1L)) { + os.write(new byte[] {1, 2, 3, 4, 5, 6, 7}); + // no flush yet, so it should be unchanged + assertArrayEquals(new byte[] {6, 7, 3, 4, 5}, blob.getBytes(1L, 20)); + os.flush(); + assertArrayEquals(new byte[] {1, 2, 3, 4, 5, 6, 7}, blob.getBytes(1L, 20)); + } + assertArrayEquals(new byte[] {1, 2, 3, 4, 5, 6, 7}, blob.getBytes(1L, 20)); + + // test writing from a position that is larger than 1 + try (OutputStream os = blob.setBinaryStream(5L)) { + os.write(new byte[] {1, 2, 3}); + // no flush yet, so it should be unchanged + assertArrayEquals(new byte[] {1, 2, 3, 4, 5, 6, 7}, blob.getBytes(1L, 20)); + os.flush(); + assertArrayEquals(new byte[] {1, 2, 3, 4, 1, 2, 3}, blob.getBytes(1L, 20)); + } + + // test writing from a position that is larger than the current length + try (OutputStream os = blob.setBinaryStream(10L)) { + os.write(new byte[] {1, 2, 3}); + // no flush yet, so it should be unchanged + assertArrayEquals(new byte[] {1, 2, 3, 4, 1, 2, 3}, blob.getBytes(1L, 20)); + os.flush(); + assertArrayEquals(new byte[] {1, 2, 3, 4, 1, 2, 3, 0, 0, 1, 2, 3}, blob.getBytes(1L, 20)); + } + + // test writing a large number of bytes + try (OutputStream os = blob.setBinaryStream(1L)) { + os.write(new byte[2000]); + // no flush yet, so it should be unchanged + assertArrayEquals(new byte[] {1, 2, 3, 4, 1, 2, 3, 0, 0, 1, 2, 3}, blob.getBytes(1L, 3000)); + os.flush(); + assertArrayEquals(new byte[2000], blob.getBytes(1L, 3000)); + } + } + + @Test + public void testTruncate() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, blob.getBytes(1L, 20)); + blob.truncate(3); + assertArrayEquals(new byte[] {1, 2, 3}, blob.getBytes(1L, 20)); + blob.truncate(0); + assertArrayEquals(new byte[] {}, blob.getBytes(1L, 20)); + } + + @Test + public void testFree() throws SQLException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + assertArrayEquals(new byte[] {1, 2, 3, 4, 5}, blob.getBytes(1L, 20)); + blob.free(); + assertArrayEquals(new byte[] {}, blob.getBytes(1L, 20)); + } + + @Test + public void testGetBinaryStreamPosLength() throws SQLException, IOException { + JdbcBlob blob = new JdbcBlob(); + blob.setBytes(1L, new byte[] {1, 2, 3, 4, 5}); + + byte[] buf = new byte[5]; + try (InputStream is = blob.getBinaryStream(1L, 3)) { + int b; + int index = 0; + while ((b = is.read()) > -1) { + buf[index] = (byte) b; + index++; + } + } + assertArrayEquals(new byte[] {1, 2, 3, 0, 0}, buf); + + buf = new byte[10]; + try (InputStream is = blob.getBinaryStream(4L, 10)) { + assertEquals(2, is.read(buf)); + assertEquals(-1, is.read()); + } + assertArrayEquals(new byte[] {4, 5, 0, 0, 0, 0, 0, 0, 0, 0}, buf); + + buf = new byte[10]; + try (InputStream is = blob.getBinaryStream(6L, 10)) { + assertEquals(-1L, is.read(buf)); + } + assertArrayEquals(new byte[10], buf); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcClobTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcClobTest.java new file mode 100644 index 000000000000..573200ddf660 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcClobTest.java @@ -0,0 +1,318 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.rpc.Code; +import java.io.IOException; +import java.io.Reader; +import java.io.Writer; +import java.sql.SQLException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcClobTest { + + private static final class PosLength { + private final long pos; + private final int len; + + private static PosLength of(long pos, int len) { + return new PosLength(pos, len); + } + + private PosLength(long pos, int len) { + this.pos = pos; + this.len = len; + } + + @Override + public String toString() { + return "pos: " + pos + ", len: " + len; + } + } + + private static final class PosString { + private final long pos; + private final String str; + + private static PosString of(long pos, String str) { + return new PosString(pos, str); + } + + private PosString(long pos, String str) { + this.pos = pos; + this.str = str; + } + + @Override + public String toString() { + return "pos: " + pos + ", str: " + str; + } + } + + @Test + public void testLength() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "test"); + assertEquals(4L, clob.length()); + clob.setString(1L, "test-test"); + assertEquals(9L, clob.length()); + } + + @Test + public void testGetSubstring() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "test"); + assertEquals("test", clob.getSubString(1, 4)); + assertEquals("te", clob.getSubString(1, 2)); + assertEquals("st", clob.getSubString(3, 2)); + assertEquals("test", clob.getSubString(1, 5)); + assertEquals("t", clob.getSubString(4, 5)); + assertEquals("", clob.getSubString(5, 5)); + assertEquals("", clob.getSubString(6, 5)); + + // test invalid parameters + PosLength[] params = + new PosLength[] {PosLength.of(0L, 4), PosLength.of(-1L, 4), PosLength.of(1L, -1)}; + for (PosLength param : params) { + SQLException sqlException = + assertThrows(SQLException.class, () -> clob.getSubString(param.pos, param.len)); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + + @Test + public void testGetCharacterStream() throws SQLException, IOException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "test"); + char[] cbuf = new char[4]; + try (Reader reader = clob.getCharacterStream()) { + assertEquals(4, reader.read(cbuf, 0, 4)); + } + assertArrayEquals(new char[] {'t', 'e', 's', 't'}, cbuf); + try (Reader reader = clob.getCharacterStream()) { + assertEquals(2, reader.read(cbuf, 0, 2)); + assertEquals(2, reader.read(cbuf, 2, 2)); + } + assertArrayEquals(new char[] {'t', 'e', 's', 't'}, cbuf); + try (Reader reader = clob.getCharacterStream()) { + assertEquals(2, reader.read(cbuf, 0, 2)); + // changing the value of the clob will not change a character stream that has already been + // opened + clob.setString(1L, "foobar"); + assertEquals(2, reader.read(cbuf, 2, 2)); + } + assertArrayEquals(new char[] {'t', 'e', 's', 't'}, cbuf); + } + + @Test + public void testPositionString() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "test"); + assertEquals(3L, clob.position("st", 1L)); + clob.setString(1L, "foobarfoobar"); + assertEquals(4L, clob.position("bar", 1L)); + assertEquals(4L, clob.position("bar", 2L)); + assertEquals(10L, clob.position("bar", 5L)); + assertEquals(10L, clob.position("bar", 8L)); + assertEquals(10L, clob.position("bar", 10L)); + assertEquals(-1L, clob.position("bar", 11L)); + assertEquals(-1L, clob.position("bar", 100L)); + assertEquals(-1L, clob.position("not_there", 1L)); + // test invalid parameters + PosString[] params = + new PosString[] {PosString.of(0L, "bar"), PosString.of(-1L, "bar"), PosString.of(1L, null)}; + for (PosString param : params) { + SQLException sqlException = + assertThrows(SQLException.class, () -> clob.position(param.str, param.pos)); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + + @Test + public void testPositionClob() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "test"); + JdbcClob search = new JdbcClob(); + search.setString(1L, "st"); + assertEquals(3L, clob.position(search, 1L)); + clob.setString(1L, "foobarfoobar"); + search.setString(1L, "bar"); + assertEquals(4L, clob.position(search, 1L)); + assertEquals(4L, clob.position(search, 2L)); + assertEquals(10L, clob.position(search, 5L)); + assertEquals(10L, clob.position(search, 8L)); + assertEquals(10L, clob.position(search, 10L)); + assertEquals(-1L, clob.position(search, 11L)); + assertEquals(-1L, clob.position(search, 100L)); + search.setString(1L, "not_there"); + assertEquals(-1L, clob.position(search, 1L)); + // test invalid parameters + PosString[] params = + new PosString[] {PosString.of(0L, "bar"), PosString.of(-1L, "bar"), PosString.of(1L, null)}; + for (PosString param : params) { + SQLException sqlException = + assertThrows( + SQLException.class, + () -> { + search.setString(1L, param.str); + clob.position(search, param.pos); + }); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + + @Test + public void testSetString() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "test"); + assertEquals("test", clob.getSubString(1L, 4)); + clob.setString(1L, "bar"); + assertEquals("bart", clob.getSubString(1L, 4)); + clob.setString(1L, "foobar"); + assertEquals("foobar", clob.getSubString(1L, 6)); + clob.setString(2L, "foobar"); + assertEquals("ffoobar", clob.getSubString(1L, 7)); + clob.setString(8L, "test"); + assertEquals("ffoobartest", clob.getSubString(1L, 11)); + clob.setString(15, "end"); + assertEquals("ffoobartest end", clob.getSubString(1L, 17)); + // test invalid parameters + PosString[] params = + new PosString[] {PosString.of(0L, "bar"), PosString.of(-1L, "bar"), PosString.of(1L, null)}; + for (PosString param : params) { + SQLException sqlException = + assertThrows(SQLException.class, () -> clob.setString(param.pos, param.str)); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals(Code.INVALID_ARGUMENT, jdbcSqlException.getCode()); + } + } + + @Test + public void testSetStringOffsetLen() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "test", 2, 3); + assertEquals("est", clob.getSubString(1L, 4)); + clob.setString(1L, "bar", 1, 1); + assertEquals("bst", clob.getSubString(1L, 4)); + clob.setString(1L, "foobar", 1, 6); + assertEquals("foobar", clob.getSubString(1L, 6)); + clob.setString(2L, "foobar", 2, 5); + assertEquals("foobar", clob.getSubString(1L, 7)); + clob.setString(8L, "test", 4, 1); + assertEquals("foobar t", clob.getSubString(1L, 8)); + clob.setString(15, "end", 1, 3); + assertEquals("foobar t end", clob.getSubString(1L, 17)); + } + + @Test + public void testSetCharacterStream() throws SQLException, IOException { + JdbcClob clob = new JdbcClob(); + clob.setString(1, "foobar"); + assertEquals("foobar", clob.getSubString(1L, 6)); + try (Writer writer = clob.setCharacterStream(1L)) { + writer.write("t"); + // not yet flushed, there should be no change + assertEquals("foobar", clob.getSubString(1L, 6)); + writer.flush(); + // after a flush the change should be visible + assertEquals("toobar", clob.getSubString(1L, 6)); + writer.write("est"); + } + // close should also auto-flush + assertEquals("testar", clob.getSubString(1L, 6)); + + // start all over + clob.free(); + clob.setString(1, "foobar"); + assertEquals("foobar", clob.getSubString(1L, 6)); + try (Writer writer = clob.setCharacterStream(5L)) { + writer.write("t"); + // not yet flushed, there should be no change + assertEquals("foobar", clob.getSubString(1L, 6)); + writer.flush(); + // after a flush the change should be visible + assertEquals("foobtr", clob.getSubString(1L, 6)); + writer.write("est"); + } + // close should also auto-flush + assertEquals("foobtest", clob.getSubString(1L, 8)); + + // do a test with multiple flushes + clob.free(); + clob.setString(1, "foobar"); + assertEquals("foobar", clob.getSubString(1L, 6)); + try (Writer writer = clob.setCharacterStream(1L)) { + writer.write("t"); + assertEquals("foobar", clob.getSubString(1L, 6)); + writer.flush(); + assertEquals("toobar", clob.getSubString(1L, 6)); + writer.write("est"); + assertEquals("toobar", clob.getSubString(1L, 6)); + writer.flush(); + assertEquals("testar", clob.getSubString(1L, 6)); + } + assertEquals("testar", clob.getSubString(1L, 8)); + + // writer after end + clob.free(); + clob.setString(1, "foobar"); + assertEquals("foobar", clob.getSubString(1L, 10)); + try (Writer writer = clob.setCharacterStream(10L)) { + writer.write("t"); + assertEquals("foobar", clob.getSubString(1L, 20)); + writer.flush(); + assertEquals("foobar t", clob.getSubString(1L, 20)); + writer.write("est"); + } + assertEquals("foobar test", clob.getSubString(1L, 20)); + } + + @Test + public void testTruncate() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "foobar"); + assertEquals("foobar", clob.getSubString(1L, 6)); + clob.truncate(3L); + assertEquals("foo", clob.getSubString(1L, 6)); + clob.truncate(0L); + assertEquals("", clob.getSubString(1L, 6)); + } + + @Test + public void testFree() throws SQLException { + JdbcClob clob = new JdbcClob(); + clob.setString(1L, "foobar"); + assertEquals("foobar", clob.getSubString(1L, 6)); + clob.free(); + assertEquals("", clob.getSubString(1L, 6)); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcConnectionGeneratedSqlScriptTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcConnectionGeneratedSqlScriptTest.java new file mode 100644 index 000000000000..26722bd7b351 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcConnectionGeneratedSqlScriptTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.connection.AbstractConnectionImplTest; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier.GenericConnection; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier.GenericConnectionProvider; +import com.google.cloud.spanner.connection.ConnectionImplTest; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.jdbc.JdbcSqlScriptVerifier.JdbcGenericConnection; +import io.opentelemetry.api.OpenTelemetry; +import java.sql.SQLException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * This test executes a SQL script that has been generated from the log of all the subclasses of + * {@link AbstractConnectionImplTest} and covers the same test cases. Its aim is to verify that the + * connection reacts correctly in all possible states (i.e. DML statements should not be allowed + * when the connection is in read-only mode, or when a read-only transaction has started etc.) + */ +@RunWith(Parameterized.class) +public class JdbcConnectionGeneratedSqlScriptTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + static class TestConnectionProvider implements GenericConnectionProvider { + private final Dialect dialect; + + TestConnectionProvider(Dialect dialect) { + this.dialect = dialect; + } + + @Override + public GenericConnection getConnection() { + ConnectionOptions options = mock(ConnectionOptions.class); + when(options.getUri()).thenReturn(ConnectionImplTest.URI); + com.google.cloud.spanner.connection.Connection spannerConnection = + ConnectionImplTest.createConnection(options, dialect); + Spanner spanner = spannerConnection.getSpanner(); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(spannerConnection.getDialect()).thenReturn(dialect); + when(options.getConnection()).thenReturn(spannerConnection); + when(options.getDatabaseId()).thenReturn(DatabaseId.of("project", "instance", "database")); + try { + JdbcConnection connection = + new JdbcConnection( + "jdbc:cloudspanner://localhost/projects/project/instances/instance/databases/database;credentialsUrl=url", + options); + JdbcGenericConnection res = JdbcGenericConnection.of(connection); + res.setStripCommentsBeforeExecute(true); + return res; + } catch (SQLException e) { + throw SpannerExceptionFactory.asSpannerException(e); + } + } + } + + @Test + public void testGeneratedScript() throws Exception { + JdbcSqlScriptVerifier verifier = new JdbcSqlScriptVerifier(new TestConnectionProvider(dialect)); + String prefix = dialect == Dialect.POSTGRESQL ? "PostgreSQL/" : ""; + verifier.verifyStatementsInFile( + prefix + "ConnectionImplGeneratedSqlScriptTest.sql", getClass(), false); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcConnectionTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcConnectionTest.java new file mode 100644 index 000000000000..69bb4fba8d54 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcConnectionTest.java @@ -0,0 +1,856 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.connection.AbstractConnectionImplTest; +import com.google.cloud.spanner.connection.ConnectionImplTest; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.rpc.Code; +import io.opentelemetry.api.OpenTelemetry; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; +import java.sql.SQLWarning; +import java.sql.Savepoint; +import java.util.Collections; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.mockito.Mockito; + +@RunWith(Parameterized.class) +public class JdbcConnectionTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private com.google.cloud.spanner.ResultSet createSelect1ResultSet() { + return ResultSets.forRows( + Type.struct(StructField.of("", Type.int64())), + Collections.singletonList(Struct.newBuilder().set("").to(1L).build())); + } + + private JdbcConnection createConnection(ConnectionOptions options) throws SQLException { + com.google.cloud.spanner.connection.Connection spannerConnection = + ConnectionImplTest.createConnection(options, dialect); + Spanner spanner = spannerConnection.getSpanner(); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + when(spannerConnection.getDialect()).thenReturn(dialect); + when(options.getConnection()).thenReturn(spannerConnection); + when(options.getDatabaseId()).thenReturn(DatabaseId.of("project", "instance", "database")); + return new JdbcConnection( + "jdbc:cloudspanner://localhost/projects/project/instances/instance/databases/database;credentialsUrl=url", + options); + } + + private ConnectionOptions mockOptions() { + ConnectionOptions options = mock(ConnectionOptions.class); + when(options.getDatabaseId()).thenReturn(DatabaseId.of("project", "instance", "database")); + return options; + } + + @Test + public void testGetDatabaseClient() throws SQLException { + ConnectionOptions options = mockOptions(); + try (Connection connection = createConnection(options)) { + CloudSpannerJdbcConnection spannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + assertNotNull(spannerJdbcConnection.getDatabaseClient()); + } + } + + @Test + public void testAutoCommit() throws SQLException { + ConnectionOptions options = mockOptions(); + when(options.isAutocommit()).thenReturn(true); + try (Connection connection = createConnection(options)) { + assertThat(connection.getAutoCommit()).isTrue(); + connection.setAutoCommit(false); + assertThat(connection.getAutoCommit()).isFalse(); + // execute a query that will start a transaction + connection.createStatement().executeQuery(AbstractConnectionImplTest.SELECT); + // setting autocommit will automatically commit the transaction + connection.setAutoCommit(true); + assertThat(connection.getAutoCommit()).isTrue(); + } + } + + @Test + public void testReadOnly() throws SQLException { + ConnectionOptions options = mockOptions(); + when(options.isAutocommit()).thenReturn(true); + when(options.isReadOnly()).thenReturn(true); + try (Connection connection = createConnection(options)) { + assertThat(connection.isReadOnly()).isTrue(); + connection.setReadOnly(false); + assertThat(connection.isReadOnly()).isFalse(); + // start a transaction + connection.createStatement().execute("begin transaction"); + // setting readonly should no longer be allowed + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> connection.setReadOnly(true)); + assertEquals(Code.FAILED_PRECONDITION, sqlException.getCode()); + } + } + + @Test + public void testCommit() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + // verify that there is no transaction started + assertThat(connection.getSpannerConnection().isTransactionStarted()).isFalse(); + // start a transaction + connection.createStatement().execute(AbstractConnectionImplTest.SELECT); + // verify that we did start a transaction + assertThat(connection.getSpannerConnection().isTransactionStarted()).isTrue(); + // do a commit + connection.commit(); + // verify that there is no transaction started anymore + assertThat(connection.getSpannerConnection().isTransactionStarted()).isFalse(); + // verify that there is a commit timestamp + assertThat(connection.getSpannerConnection().getCommitTimestamp()).isNotNull(); + } + } + + @Test + public void testRollback() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + // verify that there is no transaction started + assertThat(connection.getSpannerConnection().isTransactionStarted()).isFalse(); + // start a transaction + connection.createStatement().execute(AbstractConnectionImplTest.SELECT); + // verify that we did start a transaction + assertThat(connection.getSpannerConnection().isTransactionStarted()).isTrue(); + // do a rollback + connection.rollback(); + // verify that there is no transaction started anymore + assertThat(connection.getSpannerConnection().isTransactionStarted()).isFalse(); + // verify that there is no commit timestamp + String showCommitTimestamp = + dialect == Dialect.POSTGRESQL + ? "show spanner.commit_timestamp" + : "show variable commit_timestamp"; + try (ResultSet rs = connection.createStatement().executeQuery(showCommitTimestamp)) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getTimestamp(1)).isNull(); + } + } + } + + @Test + public void testClosedJdbcConnection() + throws SQLException, + NoSuchMethodException, + SecurityException, + IllegalAccessException, + IllegalArgumentException { + testClosed(Connection.class, "getCatalog"); + testClosed(Connection.class, "getWarnings"); + testClosed(Connection.class, "clearWarnings"); + testClosed(Connection.class, "getHoldability"); + testClosed(Connection.class, "createClob"); + testClosed(Connection.class, "createBlob"); + testClosed(Connection.class, "createNClob"); + testClosed(Connection.class, "createSQLXML"); + testClosed(Connection.class, "getCatalog"); + testClosed(Connection.class, "getClientInfo"); + testClosed(Connection.class, "getSchema"); + testClosed(Connection.class, "getNetworkTimeout"); + + testClosed( + Connection.class, "setCatalog", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed( + Connection.class, + "prepareCall", + new Class[] {String.class, int.class, int.class}, + new Object[] {"TEST", 0, 0}); + testClosed( + Connection.class, + "prepareCall", + new Class[] {String.class, int.class, int.class, int.class}, + new Object[] {"TEST", 0, 0, 0}); + testClosed( + Connection.class, + "setClientInfo", + new Class[] {String.class, String.class}, + new Object[] {"TEST", "TEST"}); + testClosed( + Connection.class, "setClientInfo", new Class[] {Properties.class}, new Object[] {null}); + testClosed( + Connection.class, "getClientInfo", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed( + Connection.class, + "createStruct", + new Class[] {String.class, Object[].class}, + new Object[] {"TEST", new Object[] {}}); + testClosed(Connection.class, "setSchema", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed( + Connection.class, + "setNetworkTimeout", + new Class[] {Executor.class, int.class}, + new Object[] {null, 0}); + + testClosed(Connection.class, "getTypeMap"); + testClosed(Connection.class, "createStatement"); + testClosed(Connection.class, "getAutoCommit"); + testClosed(Connection.class, "commit"); + testClosed(Connection.class, "rollback"); + testClosed(Connection.class, "getMetaData"); + testClosed(Connection.class, "isReadOnly"); + testClosed(Connection.class, "getTransactionIsolation"); + testClosed(Connection.class, "setSavepoint"); + + testClosed( + Connection.class, + "setTypeMap", + new Class[] {Map.class}, + new Object[] {Collections.EMPTY_MAP}); + testClosed( + Connection.class, "prepareStatement", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed( + Connection.class, "prepareCall", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed(Connection.class, "nativeSQL", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed( + Connection.class, "prepareStatement", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed( + Connection.class, "setAutoCommit", new Class[] {boolean.class}, new Object[] {true}); + testClosed( + Connection.class, "setReadOnly", new Class[] {boolean.class}, new Object[] {true}); + testClosed( + Connection.class, "setTransactionIsolation", new Class[] {int.class}, new Object[] {0}); + testClosed( + Connection.class, + "createStatement", + new Class[] {int.class, int.class}, + new Object[] {0, 0}); + testClosed( + Connection.class, + "prepareStatement", + new Class[] {String.class, int.class, int.class}, + new Object[] {"TEST", 0, 0}); + testClosed( + Connection.class, + "createStatement", + new Class[] {int.class, int.class, int.class}, + new Object[] {0, 0, 0}); + testClosed( + Connection.class, + "prepareStatement", + new Class[] {String.class, int.class, int.class, int.class}, + new Object[] {"TEST", 0, 0, 0}); + testClosed( + Connection.class, + "prepareStatement", + new Class[] {String.class, int.class}, + new Object[] {"TEST", 0}); + testClosed( + Connection.class, + "prepareStatement", + new Class[] {String.class, int[].class}, + new Object[] {"TEST", new int[] {0}}); + testClosed( + Connection.class, + "prepareStatement", + new Class[] {String.class, String[].class}, + new Object[] {"TEST", new String[] {"COL1"}}); + testClosed( + Connection.class, + "createArrayOf", + new Class[] {String.class, Object[].class}, + new Object[] {"TEST", new Object[] {"COL1"}}); + + testClosed( + Connection.class, "setSavepoint", new Class[] {String.class}, new Object[] {"TEST"}); + testClosed(Connection.class, "rollback", new Class[] {Savepoint.class}, new Object[] {null}); + testClosed( + Connection.class, + "releaseSavepoint", + new Class[] {Savepoint.class}, + new Object[] {null}); + + testClosed(CloudSpannerJdbcConnection.class, "isReturnCommitStats"); + testClosed( + CloudSpannerJdbcConnection.class, + "setReturnCommitStats", + new Class[] {boolean.class}, + new Object[] {true}); + testClosed(CloudSpannerJdbcConnection.class, "getCommitResponse"); + } + + private void testClosed(Class clazz, String name) + throws NoSuchMethodException, + SecurityException, + SQLException, + IllegalAccessException, + IllegalArgumentException { + testClosed(clazz, name, null, null); + } + + private void testClosed( + Class clazz, String name, Class[] paramTypes, Object[] args) + throws NoSuchMethodException, + SecurityException, + SQLException, + IllegalAccessException, + IllegalArgumentException { + Method method = clazz.getDeclaredMethod(name, paramTypes); + testInvokeMethodOnClosedConnection(method, args); + } + + private void testInvokeMethodOnClosedConnection(Method method, Object... args) + throws SQLException, IllegalAccessException, IllegalArgumentException { + ConnectionOptions options = mockOptions(); + JdbcConnection connection = createConnection(options); + connection.close(); + boolean valid = false; + try { + method.invoke(connection, args); + } catch (InvocationTargetException e) { + if (e.getTargetException() instanceof JdbcSqlException + && ((JdbcSqlException) e.getTargetException()).getCode() == Code.FAILED_PRECONDITION + && ((JdbcSqlException) e.getTargetException()).getMessage().endsWith("has been closed")) { + // this is the expected exception + valid = true; + } + } + assertWithMessage("Method did not throw exception on closed connection: " + method.getName()) + .that(valid) + .isTrue(); + } + + @Test + public void testTransactionIsolation() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + assertEquals(Connection.TRANSACTION_SERIALIZABLE, connection.getTransactionIsolation()); + // assert that setting it to these values is ok. + connection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, connection.getTransactionIsolation()); + connection.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + assertEquals(Connection.TRANSACTION_REPEATABLE_READ, connection.getTransactionIsolation()); + // assert that setting it to something else is not ok. + int[] invalidValues = + new int[] { + Connection.TRANSACTION_READ_COMMITTED, Connection.TRANSACTION_READ_UNCOMMITTED, -100 + }; + for (int invalidValue : invalidValues) { + SQLException exception = + assertThrows( + SQLException.class, () -> connection.setTransactionIsolation(invalidValue)); + assertTrue(exception instanceof JdbcSqlException); + JdbcSqlException spannerException = (JdbcSqlException) exception; + if (invalidValue == -100) { + assertEquals(Code.INVALID_ARGUMENT, spannerException.getCode()); + } else { + assertEquals(Code.UNIMPLEMENTED, spannerException.getCode()); + } + } + } + } + + @Test + public void testHoldability() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + assertThat(connection.getHoldability()).isEqualTo(ResultSet.CLOSE_CURSORS_AT_COMMIT); + // assert that setting it to this value is ok. + connection.setHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT); + // assert that setting it to something else is not ok. + int[] settings = new int[] {ResultSet.HOLD_CURSORS_OVER_COMMIT, -100}; + for (int setting : settings) { + boolean exception = false; + try { + connection.setHoldability(setting); + } catch (SQLException e) { + if (setting == -100) { + exception = + (e instanceof JdbcSqlException + && ((JdbcSqlException) e).getCode() == Code.INVALID_ARGUMENT); + } else { + exception = + (e instanceof JdbcSqlException + && ((JdbcSqlException) e).getCode() == Code.UNIMPLEMENTED); + } + } + assertThat(exception).isTrue(); + } + } + } + + @Test + public void testWarnings() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + assertThat((Object) connection.getWarnings()).isNull(); + + // Push one warning and get it twice. + connection.pushWarning(new SQLWarning("test")); + assertThat(connection.getWarnings().getMessage()).isEqualTo("test"); + assertThat(connection.getWarnings().getMessage()).isEqualTo("test"); + + // Clear warnings and push two warnings and get them both. + connection.clearWarnings(); + connection.pushWarning(new SQLWarning("test 1")); + connection.pushWarning(new SQLWarning("test 2")); + assertThat(connection.getWarnings().getMessage()).isEqualTo("test 1"); + assertThat(connection.getWarnings().getMessage()).isEqualTo("test 1"); + assertThat(connection.getWarnings().getNextWarning().getMessage()).isEqualTo("test 2"); + + // Clear warnings. + connection.clearWarnings(); + assertThat((Object) connection.getWarnings()).isNull(); + } + } + + @Test + public void getDefaultClientInfo() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + Properties defaultProperties = connection.getClientInfo(); + assertThat(defaultProperties.stringPropertyNames()) + .containsExactly("APPLICATIONNAME", "CLIENTHOSTNAME", "CLIENTUSER"); + } + } + + @Test + public void testSetInvalidClientInfo() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + assertThat((Object) connection.getWarnings()).isNull(); + connection.setClientInfo("test", "foo"); + assertThat((Object) connection.getWarnings()).isNotNull(); + assertThat(connection.getWarnings().getMessage()) + .isEqualTo(String.format(AbstractJdbcConnection.CLIENT_INFO_NOT_SUPPORTED, "TEST")); + + connection.clearWarnings(); + assertThat((Object) connection.getWarnings()).isNull(); + + Properties props = new Properties(); + props.setProperty("test", "foo"); + connection.setClientInfo(props); + assertThat((Object) connection.getWarnings()).isNotNull(); + assertThat(connection.getWarnings().getMessage()) + .isEqualTo(String.format(AbstractJdbcConnection.CLIENT_INFO_NOT_SUPPORTED, "TEST")); + } + } + + @Test + public void testSetClientInfo() throws SQLException { + ConnectionOptions options = mockOptions(); + try (JdbcConnection connection = createConnection(options)) { + try (ResultSet validProperties = connection.getMetaData().getClientInfoProperties()) { + while (validProperties.next()) { + assertThat((Object) connection.getWarnings()).isNull(); + String name = validProperties.getString("NAME"); + + connection.setClientInfo(name, "new-client-info-value"); + assertThat((Object) connection.getWarnings()).isNull(); + assertThat(connection.getClientInfo(name)).isEqualTo("new-client-info-value"); + + Properties props = new Properties(); + props.setProperty(name.toLowerCase(), "some-other-value"); + connection.setClientInfo(props); + assertThat((Object) connection.getWarnings()).isNull(); + assertThat(connection.getClientInfo(name)).isEqualTo("some-other-value"); + assertThat(connection.getClientInfo().keySet()).hasSize(1); + for (String key : connection.getClientInfo().stringPropertyNames()) { + if (key.equals(name)) { + assertThat(connection.getClientInfo().getProperty(key)).isEqualTo("some-other-value"); + } else { + assertThat(connection.getClientInfo().getProperty(key)).isEqualTo(""); + } + } + } + } + } + } + + @Test + public void testIsValid() throws SQLException { + // Setup. + ConnectionOptions options = mockOptions(); + com.google.cloud.spanner.connection.Connection spannerConnection = + mock(com.google.cloud.spanner.connection.Connection.class); + when(spannerConnection.getDialect()).thenReturn(dialect); + when(options.getConnection()).thenReturn(spannerConnection); + Spanner spanner = mock(Spanner.class); + when(spannerConnection.getSpanner()).thenReturn(spanner); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + when(spanner.getOptions()).thenReturn(spannerOptions); + Statement statement = Statement.of(JdbcConnection.LEGACY_IS_VALID_QUERY); + + // Verify that an opened connection that returns a result set is valid. + try (JdbcConnection connection = new JdbcConnection("url", options)) { + when(spannerConnection.executeQuery(statement)).thenReturn(createSelect1ResultSet()); + assertThat(connection.isValid(1)).isTrue(); + try { + // Invalid timeout value. + connection.isValid(-1); + fail("missing expected exception"); + } catch (JdbcSqlExceptionImpl e) { + assertThat(e.getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + + // Now let the query return an error. isValid should now return false. + when(spannerConnection.getDialect()) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.ABORTED, "the current transaction has been aborted")); + assertThat(connection.isValid(1)).isFalse(); + } + } + + @Test + public void testIsValidOnClosedConnection() throws SQLException { + Connection connection = createConnection(mockOptions()); + connection.close(); + assertThat(connection.isValid(1)).isFalse(); + } + + @Test + public void testCreateStatement() throws SQLException { + try (JdbcConnection connection = createConnection(mockOptions())) { + for (int resultSetType : + new int[] { + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.TYPE_SCROLL_SENSITIVE + }) { + for (int resultSetConcurrency : + new int[] {ResultSet.CONCUR_READ_ONLY, ResultSet.CONCUR_UPDATABLE}) { + if (resultSetType == ResultSet.TYPE_FORWARD_ONLY // Only FORWARD_ONLY is supported + && resultSetConcurrency == ResultSet.CONCUR_READ_ONLY) // Only READ_ONLY is supported + { + java.sql.Statement statement = + connection.createStatement(resultSetType, resultSetConcurrency); + assertThat(statement.getResultSetType()).isEqualTo(resultSetType); + assertThat(statement.getResultSetConcurrency()).isEqualTo(resultSetConcurrency); + } else { + assertCreateStatementFails(connection, resultSetType, resultSetConcurrency); + } + for (int resultSetHoldability : + new int[] {ResultSet.CLOSE_CURSORS_AT_COMMIT, ResultSet.HOLD_CURSORS_OVER_COMMIT}) { + if (resultSetType == ResultSet.TYPE_FORWARD_ONLY // Only FORWARD_ONLY is supported + && resultSetConcurrency == ResultSet.CONCUR_READ_ONLY // Only READ_ONLY is supported + && resultSetHoldability + == ResultSet + .CLOSE_CURSORS_AT_COMMIT) // Only CLOSE_CURSORS_AT_COMMIT is supported + { + java.sql.Statement statement = + connection.createStatement( + resultSetType, resultSetConcurrency, resultSetHoldability); + assertThat(statement.getResultSetType()).isEqualTo(resultSetType); + assertThat(statement.getResultSetConcurrency()).isEqualTo(resultSetConcurrency); + assertThat(statement.getResultSetHoldability()).isEqualTo(resultSetHoldability); + } else { + assertCreateStatementFails( + connection, resultSetType, resultSetConcurrency, resultSetHoldability); + } + } + } + } + } + } + + private void assertCreateStatementFails( + JdbcConnection connection, + int resultSetType, + int resultSetConcurrency, + int resultSetHoldability) + throws SQLException { + try { + connection.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability); + fail( + String.format( + "missing expected exception for %d %d %d", + resultSetType, resultSetConcurrency, resultSetHoldability)); + } catch (SQLFeatureNotSupportedException e) { + // ignore, this is the expected exception. + } + } + + private void assertCreateStatementFails( + JdbcConnection connection, int resultSetType, int resultSetConcurrency) throws SQLException { + try { + connection.createStatement(resultSetType, resultSetConcurrency); + fail( + String.format( + "missing expected exception for %d %d", resultSetType, resultSetConcurrency)); + } catch (SQLFeatureNotSupportedException e) { + // ignore, this is the expected exception. + } + } + + @Test + public void testPrepareStatement() throws SQLException { + try (JdbcConnection connection = createConnection(mockOptions())) { + for (int resultSetType : + new int[] { + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.TYPE_SCROLL_SENSITIVE + }) { + for (int resultSetConcurrency : + new int[] {ResultSet.CONCUR_READ_ONLY, ResultSet.CONCUR_UPDATABLE}) { + if (resultSetType == ResultSet.TYPE_FORWARD_ONLY // Only FORWARD_ONLY is supported + && resultSetConcurrency == ResultSet.CONCUR_READ_ONLY) // Only READ_ONLY is supported + { + PreparedStatement ps = + connection.prepareStatement("SELECT 1", resultSetType, resultSetConcurrency); + assertThat(ps.getResultSetType()).isEqualTo(resultSetType); + assertThat(ps.getResultSetConcurrency()).isEqualTo(resultSetConcurrency); + } else { + assertPrepareStatementFails(connection, resultSetType, resultSetConcurrency); + } + for (int resultSetHoldability : + new int[] {ResultSet.CLOSE_CURSORS_AT_COMMIT, ResultSet.HOLD_CURSORS_OVER_COMMIT}) { + if (resultSetType == ResultSet.TYPE_FORWARD_ONLY // Only FORWARD_ONLY is supported + && resultSetConcurrency == ResultSet.CONCUR_READ_ONLY // Only READ_ONLY is supported + && resultSetHoldability + == ResultSet + .CLOSE_CURSORS_AT_COMMIT) // Only CLOSE_CURSORS_AT_COMMIT is supported + { + PreparedStatement ps = + connection.prepareStatement( + "SELECT 1", resultSetType, resultSetConcurrency, resultSetHoldability); + assertThat(ps.getResultSetType()).isEqualTo(resultSetType); + assertThat(ps.getResultSetConcurrency()).isEqualTo(resultSetConcurrency); + assertThat(ps.getResultSetHoldability()).isEqualTo(resultSetHoldability); + } else { + assertPrepareStatementFails( + connection, resultSetType, resultSetConcurrency, resultSetHoldability); + } + } + } + } + } + } + + private void assertPrepareStatementFails( + JdbcConnection connection, + int resultSetType, + int resultSetConcurrency, + int resultSetHoldability) + throws SQLException { + try { + connection.prepareStatement( + "SELECT 1", resultSetType, resultSetConcurrency, resultSetHoldability); + fail( + String.format( + "missing expected exception for %d %d %d", + resultSetType, resultSetConcurrency, resultSetHoldability)); + } catch (SQLFeatureNotSupportedException e) { + // ignore, this is the expected exception. + } + } + + private void assertPrepareStatementFails( + JdbcConnection connection, int resultSetType, int resultSetConcurrency) throws SQLException { + try { + connection.prepareStatement("SELECT 1", resultSetType, resultSetConcurrency); + fail( + String.format( + "missing expected exception for %d %d", resultSetType, resultSetConcurrency)); + } catch (SQLFeatureNotSupportedException e) { + // ignore, this is the expected exception. + } + } + + @Test + public void testPrepareStatementWithAutoGeneratedKeys() throws SQLException { + String sql = "INSERT INTO FOO (COL1) VALUES (?)"; + try (JdbcConnection connection = createConnection(mockOptions())) { + PreparedStatement statement = + connection.prepareStatement(sql, java.sql.Statement.NO_GENERATED_KEYS); + ResultSet generatedKeys = statement.getGeneratedKeys(); + assertFalse(generatedKeys.next()); + + statement = connection.prepareStatement(sql, java.sql.Statement.RETURN_GENERATED_KEYS); + generatedKeys = statement.getGeneratedKeys(); + assertFalse(generatedKeys.next()); + } + } + + @Test + public void testCatalog() throws SQLException { + ConnectionOptions options = mockOptions(); + when(options.getDatabaseName()).thenReturn("test"); + try (JdbcConnection connection = createConnection(options)) { + // The connection should always return the default catalog as the current catalog, as no other + // catalogs exist in the INFORMATION_SCHEMA. + // The default catalog is the empty string for GoogleSQL databases. + // The default catalog is the database name for PostgreSQL databases. + assertEquals(connection.getDefaultCatalog(), connection.getCatalog()); + // This should be allowed. + connection.setCatalog(connection.getDefaultCatalog()); + // This should cause an exception. + JdbcSqlExceptionImpl exception = + assertThrows(JdbcSqlExceptionImpl.class, () -> connection.setCatalog("other")); + assertEquals(Code.INVALID_ARGUMENT, exception.getCode()); + } + } + + @Test + public void testSchema() throws SQLException { + try (JdbcConnection connection = createConnection(mockOptions())) { + // The connection should always return the default schema as the current schema, as we + // currently do not support setting the connection to a different schema. + // The default schema is the empty string for GoogleSQL databases. + // The default schema is 'public' for PostgreSQL databases. + assertEquals(connection.getDefaultSchema(), connection.getSchema()); + String expectedDefaultSchema = dialect == Dialect.POSTGRESQL ? "public" : ""; + assertEquals(expectedDefaultSchema, connection.getSchema()); + // This should be allowed. + connection.setSchema(connection.getDefaultSchema()); + JdbcSqlExceptionImpl exception = + assertThrows(JdbcSqlExceptionImpl.class, () -> connection.setSchema("other")); + assertEquals(Code.INVALID_ARGUMENT, exception.getCode()); + } + } + + @Test + public void testIsReturnCommitStats() throws SQLException { + try (JdbcConnection connection = createConnection(mockOptions())) { + assertFalse(connection.isReturnCommitStats()); + connection.setReturnCommitStats(true); + assertTrue(connection.isReturnCommitStats()); + } + } + + @Test + public void testIsReturnCommitStats_throwsSqlException() { + ConnectionOptions options = mockOptions(); + com.google.cloud.spanner.connection.Connection spannerConnection = + mock(com.google.cloud.spanner.connection.Connection.class); + when(options.getConnection()).thenReturn(spannerConnection); + Spanner spanner = mock(Spanner.class); + when(spannerConnection.getSpanner()).thenReturn(spanner); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + when(spanner.getOptions()).thenReturn(spannerOptions); + when(spannerConnection.isReturnCommitStats()) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "test exception")); + try (JdbcConnection connection = + new JdbcConnection( + "jdbc:cloudspanner://localhost/projects/project/instances/instance/databases/database;credentialsUrl=url", + options)) { + connection.isReturnCommitStats(); + fail("missing expected exception"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.FAILED_PRECONDITION, ((JdbcSqlException) e).getCode()); + } + } + + @Test + public void testSetReturnCommitStats_throwsSqlException() { + ConnectionOptions options = mockOptions(); + com.google.cloud.spanner.connection.Connection spannerConnection = + mock(com.google.cloud.spanner.connection.Connection.class); + when(options.getConnection()).thenReturn(spannerConnection); + Spanner spanner = mock(Spanner.class); + when(spannerConnection.getSpanner()).thenReturn(spanner); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + when(spanner.getOptions()).thenReturn(spannerOptions); + Mockito.doThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "test exception")) + .when(spannerConnection) + .setReturnCommitStats(any(boolean.class)); + try (JdbcConnection connection = + new JdbcConnection( + "jdbc:cloudspanner://localhost/projects/project/instances/instance/databases/database;credentialsUrl=url", + options)) { + connection.setReturnCommitStats(true); + fail("missing expected exception"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.FAILED_PRECONDITION, ((JdbcSqlException) e).getCode()); + } + } + + @Test + public void testGetCommitResponse_throwsSqlException() { + ConnectionOptions options = mockOptions(); + com.google.cloud.spanner.connection.Connection spannerConnection = + mock(com.google.cloud.spanner.connection.Connection.class); + when(options.getConnection()).thenReturn(spannerConnection); + Spanner spanner = mock(Spanner.class); + when(spannerConnection.getSpanner()).thenReturn(spanner); + SpannerOptions spannerOptions = mock(SpannerOptions.class); + when(spannerOptions.getOpenTelemetry()).thenReturn(OpenTelemetry.noop()); + when(spanner.getOptions()).thenReturn(spannerOptions); + Mockito.doThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "test exception")) + .when(spannerConnection) + .setReturnCommitStats(any(boolean.class)); + try (JdbcConnection connection = + new JdbcConnection( + "jdbc:cloudspanner://localhost/projects/project/instances/instance/databases/database;credentialsUrl=url", + options)) { + connection.setReturnCommitStats(true); + fail("missing expected exception"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.FAILED_PRECONDITION, ((JdbcSqlException) e).getCode()); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDataSourceTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDataSourceTest.java new file mode 100644 index 000000000000..196cb9169df5 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDataSourceTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import java.sql.Connection; +import java.sql.SQLException; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcDataSourceTest extends AbstractMockServerTest { + + @Override + protected String getBaseUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/p/instances/i/databases/d?usePlainText=true", + getPort()); + } + + @Test + public void testGetConnectionFromNewDataSource() throws SQLException { + for (boolean autoCommit : new boolean[] {true, false}) { + JdbcDataSource dataSource = new JdbcDataSource(); + dataSource.setUrl(getBaseUrl()); + dataSource.setAutocommit(autoCommit); + try (Connection connection = dataSource.getConnection()) { + assertEquals(autoCommit, connection.getAutoCommit()); + } + } + } + + @Test + public void testGetConnectionFromCachedDataSource() throws SQLException { + JdbcDataSource dataSource = new JdbcDataSource(); + dataSource.setUrl(getBaseUrl()); + for (boolean autoCommit : new boolean[] {true, false}) { + // Changing a property on the DataSource should invalidate the internally cached + // ConnectionOptions. + dataSource.setAutocommit(autoCommit); + try (Connection connection = dataSource.getConnection()) { + assertEquals(autoCommit, connection.getAutoCommit()); + } + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaDataTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaDataTest.java new file mode 100644 index 000000000000..fc3168ab4f0a --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaDataTest.java @@ -0,0 +1,577 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.cloud.spanner.connection.ConnectionOptionsTest; +import java.io.IOException; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowIdLifetime; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Objects; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class JdbcDatabaseMetaDataTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private static final String DEFAULT_CATALOG = ""; + private static final String DEFAULT_SCHEMA = ""; + private static final String TEST_TABLE = "FOO"; + private static final int DATABASE_MAJOR_VERSION = 1; + private static final int DATABASE_MINOR_VERSION = 0; + private static final String DATABASE_PRODUCT_NAME = "Google Cloud Spanner"; + private static final String POSTGRESQL_DATABASE_PRODUCT_NAME = + DATABASE_PRODUCT_NAME + " PostgreSQL"; + + @Test + public void testTrivialMethods() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + assertTrue(meta.allProceduresAreCallable()); + assertTrue(meta.allTablesAreSelectable()); + assertFalse(meta.autoCommitFailureClosesAllResultSets()); + assertFalse(meta.dataDefinitionCausesTransactionCommit()); + assertFalse(meta.dataDefinitionIgnoredInTransactions()); + for (int type : + new int[] { + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.TYPE_SCROLL_SENSITIVE + }) { + assertFalse(meta.deletesAreDetected(type)); + assertFalse(meta.insertsAreDetected(type)); + assertFalse(meta.updatesAreDetected(type)); + assertFalse(meta.ownDeletesAreVisible(type)); + assertFalse(meta.ownInsertsAreVisible(type)); + assertFalse(meta.ownUpdatesAreVisible(type)); + assertFalse(meta.othersDeletesAreVisible(type)); + assertFalse(meta.othersInsertsAreVisible(type)); + assertFalse(meta.othersUpdatesAreVisible(type)); + } + assertTrue(meta.doesMaxRowSizeIncludeBlobs()); + assertFalse(meta.generatedKeyAlwaysReturned()); + assertEquals(".", meta.getCatalogSeparator()); + assertEquals("CATALOG", meta.getCatalogTerm()); + assertEquals(DATABASE_MAJOR_VERSION, meta.getDatabaseMajorVersion()); + assertEquals(DATABASE_MINOR_VERSION, meta.getDatabaseMinorVersion()); + assertEquals( + DATABASE_MAJOR_VERSION + "." + DATABASE_MINOR_VERSION, meta.getDatabaseProductVersion()); + assertEquals(Connection.TRANSACTION_SERIALIZABLE, meta.getDefaultTransactionIsolation()); + assertEquals("com.google.cloud.spanner.jdbc.JdbcDriver", meta.getDriverName()); + assertEquals("", meta.getExtraNameCharacters()); + assertEquals("`", meta.getIdentifierQuoteString()); + assertEquals(4, meta.getJDBCMajorVersion()); + assertEquals(1, meta.getJDBCMinorVersion()); // Java 7 is JDBC 4.1 + assertEquals(0, meta.getMaxBinaryLiteralLength()); + assertEquals(0, meta.getMaxCatalogNameLength()); + assertEquals(0, meta.getMaxCharLiteralLength()); + assertEquals(128, meta.getMaxColumnNameLength()); + assertEquals(1000, meta.getMaxColumnsInGroupBy()); + assertEquals(16, meta.getMaxColumnsInIndex()); + assertEquals(0, meta.getMaxColumnsInOrderBy()); + assertEquals(0, meta.getMaxColumnsInSelect()); + assertEquals(1024, meta.getMaxColumnsInTable()); + assertEquals(0, meta.getMaxConnections()); + assertEquals(0, meta.getMaxCursorNameLength()); + assertEquals(8000, meta.getMaxIndexLength()); + assertEquals(0, meta.getMaxProcedureNameLength()); + assertEquals(0, meta.getMaxRowSize()); + assertEquals(128, meta.getMaxSchemaNameLength()); + assertEquals(1000000, meta.getMaxStatementLength()); + assertEquals(0, meta.getMaxStatements()); + assertEquals(128, meta.getMaxTableNameLength()); + assertEquals(0, meta.getMaxTablesInSelect()); + assertEquals(0, meta.getMaxUserNameLength()); + assertEquals("PROCEDURE", meta.getProcedureTerm()); + assertEquals(ResultSet.CLOSE_CURSORS_AT_COMMIT, meta.getResultSetHoldability()); + assertEquals(RowIdLifetime.ROWID_UNSUPPORTED, meta.getRowIdLifetime()); + assertEquals("SCHEMA", meta.getSchemaTerm()); + assertEquals("\\", meta.getSearchStringEscape()); + assertEquals(DatabaseMetaData.sqlStateSQL, meta.getSQLStateType()); + assertTrue(meta.locatorsUpdateCopy()); + assertFalse(meta.nullsAreSortedHigh()); + assertTrue(meta.nullsAreSortedLow()); + assertFalse(meta.nullsAreSortedAtStart()); + assertFalse(meta.nullsAreSortedAtEnd()); + assertTrue(meta.nullPlusNonNullIsNull()); + assertFalse(meta.isCatalogAtStart()); + assertEquals(connection.isReadOnly(), meta.isReadOnly()); + if (dialect == Dialect.POSTGRESQL) { + assertEquals(POSTGRESQL_DATABASE_PRODUCT_NAME, meta.getDatabaseProductName()); + assertTrue(meta.storesLowerCaseIdentifiers()); + } else { + assertEquals(DATABASE_PRODUCT_NAME, meta.getDatabaseProductName()); + assertFalse(meta.storesLowerCaseIdentifiers()); + } + assertFalse(meta.storesLowerCaseQuotedIdentifiers()); + assertTrue(meta.storesMixedCaseIdentifiers()); + assertTrue(meta.storesMixedCaseQuotedIdentifiers()); + assertFalse(meta.storesUpperCaseIdentifiers()); + assertFalse(meta.storesUpperCaseQuotedIdentifiers()); + assertTrue(meta.supportsAlterTableWithAddColumn()); + assertTrue(meta.supportsAlterTableWithDropColumn()); + assertFalse(meta.supportsANSI92EntryLevelSQL()); + assertFalse(meta.supportsANSI92FullSQL()); + assertFalse(meta.supportsANSI92IntermediateSQL()); + assertTrue(meta.supportsBatchUpdates()); + assertFalse(meta.supportsCatalogsInDataManipulation()); + assertFalse(meta.supportsCatalogsInIndexDefinitions()); + assertFalse(meta.supportsCatalogsInPrivilegeDefinitions()); + assertFalse(meta.supportsCatalogsInProcedureCalls()); + assertFalse(meta.supportsCatalogsInTableDefinitions()); + assertTrue(meta.supportsColumnAliasing()); + // Note that the supportsConvert() method indicates whether the server side function CONVERT is + // supported, not what the JDBC driver might be able to convert on the client side. + assertFalse(meta.supportsConvert()); + int[] types = + new int[] { + Types.ARRAY, + Types.BIGINT, + Types.BINARY, + Types.BIT, + Types.BLOB, + Types.BOOLEAN, + Types.CHAR, + Types.CLOB, + Types.DATALINK, + Types.DATE, + Types.DECIMAL, + Types.DISTINCT, + Types.DOUBLE, + Types.FLOAT, + Types.INTEGER, + Types.JAVA_OBJECT, + Types.LONGNVARCHAR, + Types.LONGVARCHAR, + Types.LONGVARBINARY, + Types.LONGVARCHAR, + Types.NCHAR, + Types.NCLOB, + Types.NULL, + Types.NUMERIC, + Types.NVARCHAR, + Types.OTHER, + Types.REAL, + Types.REF, + Types.ROWID, + Types.SMALLINT, + Types.SQLXML, + Types.STRUCT, + Types.TIME, + Types.TIMESTAMP, + Types.TINYINT, + Types.VARBINARY, + Types.VARCHAR + }; + for (int from : types) { + for (int to : types) { + assertFalse(meta.supportsConvert(from, to)); + } + } + assertFalse(meta.supportsCoreSQLGrammar()); + assertTrue(meta.supportsCorrelatedSubqueries()); + assertFalse(meta.supportsDataDefinitionAndDataManipulationTransactions()); + assertTrue(meta.supportsDataManipulationTransactionsOnly()); + assertFalse(meta.supportsDifferentTableCorrelationNames()); + assertTrue(meta.supportsExpressionsInOrderBy()); + assertFalse(meta.supportsExtendedSQLGrammar()); + assertTrue(meta.supportsFullOuterJoins()); + assertFalse(meta.supportsGetGeneratedKeys()); + assertTrue(meta.supportsGroupBy()); + assertTrue(meta.supportsGroupByBeyondSelect()); + assertTrue(meta.supportsGroupByUnrelated()); + assertFalse(meta.supportsIntegrityEnhancementFacility()); + assertTrue(meta.supportsLikeEscapeClause()); + assertTrue(meta.supportsLimitedOuterJoins()); + assertFalse(meta.supportsMinimumSQLGrammar()); + assertFalse(meta.supportsMixedCaseIdentifiers()); + assertFalse(meta.supportsMixedCaseQuotedIdentifiers()); + assertTrue(meta.supportsMultipleOpenResults()); + assertTrue(meta.supportsMultipleResultSets()); + assertTrue(meta.supportsMultipleTransactions()); + assertFalse(meta.supportsNamedParameters()); + assertTrue(meta.supportsNonNullableColumns()); + assertFalse(meta.supportsOpenCursorsAcrossCommit()); + assertFalse(meta.supportsOpenCursorsAcrossRollback()); + assertTrue(meta.supportsOpenStatementsAcrossCommit()); + assertTrue(meta.supportsOpenStatementsAcrossRollback()); + assertTrue(meta.supportsOrderByUnrelated()); + assertTrue(meta.supportsOuterJoins()); + assertFalse(meta.supportsPositionedDelete()); + assertFalse(meta.supportsPositionedUpdate()); + for (int type : + new int[] { + ResultSet.TYPE_FORWARD_ONLY, + ResultSet.TYPE_SCROLL_INSENSITIVE, + ResultSet.TYPE_SCROLL_SENSITIVE + }) { + assertEquals(type == ResultSet.TYPE_FORWARD_ONLY, meta.supportsResultSetType(type)); + for (int concur : new int[] {ResultSet.CONCUR_READ_ONLY, ResultSet.CONCUR_UPDATABLE}) { + assertEquals( + type == ResultSet.TYPE_FORWARD_ONLY && concur == ResultSet.CONCUR_READ_ONLY, + meta.supportsResultSetConcurrency(type, concur)); + } + } + assertTrue(meta.supportsResultSetHoldability(ResultSet.CLOSE_CURSORS_AT_COMMIT)); + assertFalse(meta.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT)); + assertFalse(meta.supportsSavepoints()); + assertTrue(meta.supportsSchemasInDataManipulation()); + assertTrue(meta.supportsSchemasInIndexDefinitions()); + assertTrue(meta.supportsSchemasInPrivilegeDefinitions()); + assertTrue(meta.supportsSchemasInProcedureCalls()); + assertTrue(meta.supportsSchemasInTableDefinitions()); + assertTrue(meta.supportsSelectForUpdate()); + assertFalse(meta.supportsStatementPooling()); + assertFalse(meta.supportsStoredFunctionsUsingCallSyntax()); + assertFalse(meta.supportsStoredProcedures()); + assertTrue(meta.supportsSubqueriesInComparisons()); + assertTrue(meta.supportsSubqueriesInExists()); + assertTrue(meta.supportsSubqueriesInIns()); + assertTrue(meta.supportsSubqueriesInQuantifieds()); + assertTrue(meta.supportsTableCorrelationNames()); + assertTrue(meta.supportsTransactions()); + assertTrue(meta.supportsUnion()); + assertTrue(meta.supportsUnionAll()); + assertFalse(meta.usesLocalFiles()); + assertFalse(meta.usesLocalFilePerTable()); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_SERIALIZABLE)); + assertTrue(meta.supportsTransactionIsolationLevel(Connection.TRANSACTION_REPEATABLE_READ)); + for (int level : + new int[] { + Connection.TRANSACTION_NONE, + Connection.TRANSACTION_READ_COMMITTED, + Connection.TRANSACTION_READ_UNCOMMITTED, + }) { + assertFalse(meta.supportsTransactionIsolationLevel(level)); + } + assertEquals(10485760L, meta.getMaxLogicalLobSize()); + assertFalse(meta.supportsRefCursors()); + + // trivial tests that guarantee that the function works, but the return value doesn't matter + assertNotNull(meta.getNumericFunctions()); + assertNotNull(meta.getSQLKeywords()); + assertNotNull(meta.getStringFunctions()); + assertNotNull(meta.getSystemFunctions()); + assertNotNull(meta.getTimeDateFunctions()); + } + + @Test + public void testGetAttributes() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getAttributes(DEFAULT_CATALOG, DEFAULT_SCHEMA, TEST_TABLE, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(21, rsmd.getColumnCount()); + } + } + + @Test + public void testGetBestRowIdentifier() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = + meta.getBestRowIdentifier( + DEFAULT_CATALOG, + DEFAULT_SCHEMA, + TEST_TABLE, + DatabaseMetaData.bestRowTransaction, + false)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(8, rsmd.getColumnCount()); + } + } + + @Test + public void testGetCatalogs() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + when(connection.getCatalog()).thenCallRealMethod(); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getCatalogs()) { + assertTrue(rs.next()); + assertEquals(connection.getDefaultCatalog(), rs.getString("TABLE_CAT")); + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(1, rsmd.getColumnCount()); + } + } + + @Test + public void testGetClientInfoProperties() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getClientInfoProperties()) { + assertTrue(rs.next()); + assertEquals("APPLICATIONNAME", rs.getString("NAME")); + assertEquals("", rs.getString("DEFAULT_VALUE")); + + assertTrue(rs.next()); + assertEquals("CLIENTHOSTNAME", rs.getString("NAME")); + assertEquals("", rs.getString("DEFAULT_VALUE")); + + assertTrue(rs.next()); + assertEquals("CLIENTUSER", rs.getString("NAME")); + assertEquals("", rs.getString("DEFAULT_VALUE")); + + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(4, rsmd.getColumnCount()); + } + } + + @Test + public void testGetColumnPrivileges() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = + meta.getColumnPrivileges(DEFAULT_CATALOG, DEFAULT_SCHEMA, TEST_TABLE, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(8, rsmd.getColumnCount()); + } + } + + @Test + public void testGetFunctionColumns() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getFunctionColumns(DEFAULT_CATALOG, DEFAULT_SCHEMA, null, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(17, rsmd.getColumnCount()); + } + } + + @Test + public void testGetFunctions() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getFunctions(DEFAULT_CATALOG, DEFAULT_SCHEMA, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(6, rsmd.getColumnCount()); + } + } + + @Test + public void testGetProcedureColumns() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getProcedureColumns(DEFAULT_CATALOG, DEFAULT_SCHEMA, null, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(20, rsmd.getColumnCount()); + } + } + + @Test + public void testGetProcedures() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getProcedures(DEFAULT_CATALOG, DEFAULT_SCHEMA, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(9, rsmd.getColumnCount()); + } + } + + @Test + public void testGetPseudoColumns() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getPseudoColumns(DEFAULT_CATALOG, DEFAULT_SCHEMA, TEST_TABLE, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(12, rsmd.getColumnCount()); + } + } + + @Test + public void testGetSuperTables() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getSuperTables(DEFAULT_CATALOG, DEFAULT_SCHEMA, TEST_TABLE)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(4, rsmd.getColumnCount()); + } + } + + @Test + public void testGetSuperTypes() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getSuperTypes(DEFAULT_CATALOG, DEFAULT_SCHEMA, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(6, rsmd.getColumnCount()); + } + } + + @Test + public void testGetTablePrivileges() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getTablePrivileges(DEFAULT_CATALOG, DEFAULT_SCHEMA, TEST_TABLE)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(7, rsmd.getColumnCount()); + } + } + + @Test + public void testGetTableTypes() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getTableTypes()) { + assertTrue(rs.next()); + assertEquals("TABLE", rs.getString("TABLE_TYPE")); + assertTrue(rs.next()); + assertEquals("VIEW", rs.getString("TABLE_TYPE")); + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(1, rsmd.getColumnCount()); + } + } + + @Test + public void testGetTypeInfo() throws SQLException { + for (Dialect dialect : Dialect.values()) { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getTypeInfo()) { + assertTrue(rs.next()); + assertEquals("STRING", rs.getString("TYPE_NAME")); + assertEquals(Types.NVARCHAR, rs.getInt("DATA_TYPE")); + assertEquals(Types.NVARCHAR, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("INT64", rs.getString("TYPE_NAME")); + assertEquals(Types.BIGINT, rs.getInt("DATA_TYPE")); + assertEquals(Types.BIGINT, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("BYTES", rs.getString("TYPE_NAME")); + assertEquals(Types.BINARY, rs.getInt("DATA_TYPE")); + assertEquals(Types.BINARY, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("FLOAT32", rs.getString("TYPE_NAME")); + assertEquals(Types.REAL, rs.getInt("DATA_TYPE")); + assertEquals(Types.REAL, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("FLOAT64", rs.getString("TYPE_NAME")); + assertEquals(Types.DOUBLE, rs.getInt("DATA_TYPE")); + assertEquals(Types.DOUBLE, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("BOOL", rs.getString("TYPE_NAME")); + assertEquals(Types.BOOLEAN, rs.getInt("DATA_TYPE")); + assertEquals(Types.BOOLEAN, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("DATE", rs.getString("TYPE_NAME")); + assertEquals(Types.DATE, rs.getInt("DATA_TYPE")); + assertEquals(Types.DATE, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("TIMESTAMP", rs.getString("TYPE_NAME")); + assertEquals(Types.TIMESTAMP, rs.getInt("DATA_TYPE")); + assertEquals(Types.TIMESTAMP, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("NUMERIC", rs.getString("TYPE_NAME")); + assertEquals(Types.NUMERIC, rs.getInt("DATA_TYPE")); + assertEquals(Types.NUMERIC, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + assertEquals("UUID", rs.getString("TYPE_NAME")); + assertEquals(Types.OTHER, rs.getInt("DATA_TYPE")); + assertEquals(Types.OTHER, rs.getShort("DATA_TYPE")); + assertTrue(rs.next()); + if (dialect == Dialect.POSTGRESQL) { + assertEquals("JSONB", rs.getString("TYPE_NAME")); + assertEquals(PgJsonbType.VENDOR_TYPE_NUMBER, rs.getInt("DATA_TYPE")); + assertEquals(PgJsonbType.SHORT_VENDOR_TYPE_NUMBER, rs.getShort("DATA_TYPE")); + } else { + assertEquals("JSON", rs.getString("TYPE_NAME")); + assertEquals(JsonType.VENDOR_TYPE_NUMBER, rs.getInt("DATA_TYPE")); + assertEquals(JsonType.SHORT_VENDOR_TYPE_NUMBER, rs.getShort("DATA_TYPE")); + } + + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(18, rsmd.getColumnCount()); + } + } + } + + @Test + public void testGetUDTs() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getUDTs(DEFAULT_CATALOG, DEFAULT_SCHEMA, null, null)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(7, rsmd.getColumnCount()); + } + } + + @Test + public void testGetVersionColumns() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + try (ResultSet rs = meta.getVersionColumns(DEFAULT_CATALOG, DEFAULT_SCHEMA, TEST_TABLE)) { + assertFalse(rs.next()); + ResultSetMetaData rsmd = rs.getMetaData(); + assertEquals(8, rsmd.getColumnCount()); + } + } + + @Test + public void testGetUserName() throws SQLException, IOException { + ServiceAccountCredentials credentials = + ServiceAccountCredentials.fromStream( + Objects.requireNonNull(ConnectionOptionsTest.class.getResource("test-key.json")) + .openStream()); + JdbcConnection connection = mock(JdbcConnection.class); + ConnectionOptions options = mock(ConnectionOptions.class); + when(options.getCredentials()).thenReturn(credentials); + when(connection.getConnectionOptions()).thenReturn(options); + DatabaseMetaData meta = new JdbcDatabaseMetaData(connection); + assertEquals("test@test-project.iam.gserviceaccount.com", meta.getUserName()); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaDataWithMockedServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaDataWithMockedServerTest.java new file mode 100644 index 000000000000..0300e127d113 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaDataWithMockedServerTest.java @@ -0,0 +1,327 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.connection.AbstractStatementParser.ParametersInfo; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class JdbcDatabaseMetaDataWithMockedServerTest { + private static final ResultSetMetadata RESULTSET_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("TAB1") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("TAB").build()) + .build()) + .setMetadata(RESULTSET_METADATA) + .build(); + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + private AbstractStatementParser parser; + + @BeforeClass + public static void startStaticServer() throws IOException { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build().start(); + } + + @AfterClass + public static void stopServer() throws Exception { + SpannerPool.closeSpannerPool(); + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setup() { + parser = AbstractStatementParser.getInstance(dialect); + } + + @After + public void reset() { + // Close Spanner pool to prevent reusage of the same Spanner instance (and thereby the same + // session pool). + SpannerPool.closeSpannerPool(); + mockSpanner.removeAllExecutionTimes(); + mockSpanner.reset(); + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true", + server.getPort(), "proj", "inst", "db"); + } + + private Connection createConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void getTablesInDdlBatch() throws SQLException { + String sql = + parser.removeCommentsAndTrim( + JdbcDatabaseMetaData.readSqlFromFile("DatabaseMetaData_GetTables.sql", dialect)); + ParametersInfo params = parser.convertPositionalParametersToNamedParameters('?', sql); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder(params.sqlWithNamedParameters) + .bind("p1") + .to("CAT") + .bind("p2") + .to("SCH") + .bind("p3") + .to("TAB") + .bind("p4") + .to("TABLE") + .bind("p5") + .to("VIEW") + .build(), + RESULTSET)); + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("START BATCH DDL"); + // Executing an internal metadata query should be allowed during a DDL batch. + // This query will be executed on a single use read-only transaction. + try (ResultSet tables = connection.getMetaData().getTables("CAT", "SCH", "TAB", null)) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + connection.createStatement().execute("CREATE TABLE FOO"); + connection.createStatement().execute("ABORT BATCH"); + } + } + + @Test + public void getColumnsInDdlBatch() throws SQLException { + String sql = + parser.removeCommentsAndTrim( + JdbcDatabaseMetaData.readSqlFromFile("DatabaseMetaData_GetColumns.sql", dialect)); + ParametersInfo params = parser.convertPositionalParametersToNamedParameters('?', sql); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder(params.sqlWithNamedParameters) + .bind("p1") + .to("CAT") + .bind("p2") + .to("SCH") + .bind("p3") + .to("TAB") + .bind("p4") + .to("%") + .build(), + RESULTSET)); + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("START BATCH DDL"); + try (ResultSet tables = connection.getMetaData().getColumns("CAT", "SCH", "TAB", null)) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + connection.createStatement().execute("CREATE TABLE FOO"); + connection.createStatement().execute("ABORT BATCH"); + } + } + + @Test + public void getKeysInDdlBatch() throws SQLException { + for (String fileName : + new String[] { + "DatabaseMetaData_GetPrimaryKeys.sql", + "DatabaseMetaData_GetImportedKeys.sql", + "DatabaseMetaData_GetExportedKeys.sql" + }) { + String sql = + parser.removeCommentsAndTrim(JdbcDatabaseMetaData.readSqlFromFile(fileName, dialect)); + ParametersInfo params = parser.convertPositionalParametersToNamedParameters('?', sql); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder(params.sqlWithNamedParameters) + .bind("p1") + .to("CAT") + .bind("p2") + .to("SCH") + .bind("p3") + .to("TAB") + .build(), + RESULTSET)); + } + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("START BATCH DDL"); + try (ResultSet tables = connection.getMetaData().getPrimaryKeys("CAT", "SCH", "TAB")) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + try (ResultSet tables = connection.getMetaData().getImportedKeys("CAT", "SCH", "TAB")) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + try (ResultSet tables = connection.getMetaData().getExportedKeys("CAT", "SCH", "TAB")) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + connection.createStatement().execute("CREATE TABLE FOO"); + connection.createStatement().execute("ABORT BATCH"); + } + } + + @Test + public void getCrossReferencesInDdlBatch() throws SQLException { + String sql = + parser.removeCommentsAndTrim( + JdbcDatabaseMetaData.readSqlFromFile( + "DatabaseMetaData_GetCrossReferences.sql", dialect)); + ParametersInfo params = parser.convertPositionalParametersToNamedParameters('?', sql); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder(params.sqlWithNamedParameters) + .bind("p1") + .to("CAT") + .bind("p2") + .to("SCH") + .bind("p3") + .to("TAB") + .bind("p4") + .to("CAT2") + .bind("p5") + .to("SCH2") + .bind("p6") + .to("TAB2") + .build(), + RESULTSET)); + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("START BATCH DDL"); + try (ResultSet tables = + connection.getMetaData().getCrossReference("CAT", "SCH", "TAB", "CAT2", "SCH2", "TAB2")) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + connection.createStatement().execute("CREATE TABLE FOO"); + connection.createStatement().execute("ABORT BATCH"); + } + } + + @Test + public void getIndexInfoInDdlBatch() throws SQLException { + String sql = + parser.removeCommentsAndTrim( + JdbcDatabaseMetaData.readSqlFromFile("DatabaseMetaData_GetIndexInfo.sql", dialect)); + ParametersInfo params = parser.convertPositionalParametersToNamedParameters('?', sql); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder(params.sqlWithNamedParameters) + .bind("p1") + .to("CAT") + .bind("p2") + .to("SCH") + .bind("p3") + .to("TAB") + .bind("p4") + .to("%") + .bind("p5") + .to("YES") + .build(), + RESULTSET)); + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("START BATCH DDL"); + try (ResultSet tables = + connection.getMetaData().getIndexInfo("CAT", "SCH", "TAB", true, false)) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + connection.createStatement().execute("CREATE TABLE FOO"); + connection.createStatement().execute("ABORT BATCH"); + } + } + + @Test + public void getSchemasInDdlBatch() throws SQLException { + String sql = + parser.removeCommentsAndTrim( + JdbcDatabaseMetaData.readSqlFromFile("DatabaseMetaData_GetSchemas.sql", dialect)); + ParametersInfo params = parser.convertPositionalParametersToNamedParameters('?', sql); + mockSpanner.putStatementResult( + StatementResult.query( + Statement.newBuilder(params.sqlWithNamedParameters) + .bind("p1") + .to("%") + .bind("p2") + .to("%") + .build(), + RESULTSET)); + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("START BATCH DDL"); + try (ResultSet tables = connection.getMetaData().getSchemas()) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + try (ResultSet tables = connection.getMetaData().getSchemas(null, null)) { + //noinspection StatementWithEmptyBody + while (tables.next()) {} + } + connection.createStatement().execute("CREATE TABLE FOO"); + connection.createStatement().execute("ABORT BATCH"); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDriverTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDriverTest.java new file mode 100644 index 000000000000..9ae33fa6f294 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcDriverTest.java @@ -0,0 +1,256 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcDriver.EXTERNAL_HOST_URL_PATTERN; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.ServiceOptions; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.common.collect.Collections2; +import com.google.common.collect.ImmutableList; +import com.google.rpc.Code; +import io.grpc.Context; +import io.grpc.Contexts; +import io.grpc.Metadata; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.util.Collection; +import java.util.Objects; +import java.util.Properties; +import java.util.regex.Matcher; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcDriverTest { + // Make sure the JDBC driver class is loaded. This is needed when running the test using Maven. + static { + try { + Class.forName("com.google.cloud.spanner.jdbc.JdbcDriver"); + } catch (ClassNotFoundException e) { + throw new IllegalStateException( + "JdbcDataSource failed to load com.google.cloud.spanner.jdbc.JdbcDriver", e); + } + } + + private static Server server; + private static final String TEST_KEY_PATH = + Objects.requireNonNull(JdbcDriverTest.class.getResource("test-key.json")).getFile(); + + @BeforeClass + public static void startStaticServer() throws IOException { + MockSpannerServiceImpl mockSpanner = new MockSpannerServiceImpl(); + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .intercept( + new ServerInterceptor() { + @Override + public Listener interceptCall( + ServerCall call, + Metadata headers, + ServerCallHandler next) { + String clientLibToken = + headers.get( + Metadata.Key.of("x-goog-api-client", Metadata.ASCII_STRING_MARSHALLER)); + assertNotNull(clientLibToken); + assertTrue( + clientLibToken.contains(ServiceOptions.getGoogApiClientLibName() + "/")); + return Contexts.interceptCall(Context.current(), call, headers, next); + } + }) + .build() + .start(); + } + + @AfterClass + public static void stopServer() throws Exception { + SpannerPool.closeSpannerPool(); + server.shutdown(); + server.awaitTermination(); + } + + @Test + public void testClientLibToken() { + assertThat(JdbcDriver.getClientLibToken()).isEqualTo("sp-jdbc"); + } + + @Test + public void testVersion() throws SQLException { + assertEquals(JdbcDriver.MAJOR_VERSION, JdbcDriver.getRegisteredDriver().getMajorVersion()); + assertEquals(JdbcDriver.MINOR_VERSION, JdbcDriver.getRegisteredDriver().getMinorVersion()); + } + + @Test + public void testRegister() throws SQLException { + assertThat(JdbcDriver.isRegistered()).isTrue(); + JdbcDriver.deregister(); + assertThat(JdbcDriver.isRegistered()).isFalse(); + try { + JdbcDriver.getRegisteredDriver(); + fail("missing expected exception"); + } catch (SQLException e) { + assertThat(e.getErrorCode()).isEqualTo(Code.FAILED_PRECONDITION_VALUE); + } + JdbcDriver.register(); + assertThat(JdbcDriver.isRegistered()).isTrue(); + assertThat(JdbcDriver.getRegisteredDriver()).isNotNull(); + } + + @Test + public void testConnect() throws SQLException { + for (String prefix : new String[] {"cloudspanner", "spanner"}) + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:%s://localhost:%d/projects/some-company.com:test-project/instances/static-test-instance/databases/test-database;usePlainText=true;credentials=%s", + prefix, server.getPort(), TEST_KEY_PATH))) { + assertThat(connection.isClosed()).isFalse(); + } + } + + @Test(expected = SQLException.class) + public void testInvalidConnect() throws SQLException { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner://localhost:%d/projects/some-company.com:test-project/instances/static-test-instance/databases/test-database;usePlainText=true;credentialsUrl=%s", + server.getPort(), TEST_KEY_PATH))) { + assertThat(connection.isClosed()).isFalse(); + } + } + + @Test + public void testConnectWithCredentialsAndOAuthToken() { + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner://localhost:%d/projects/test-project/instances/static-test-instance/databases/test-database;usePlainText=true;credentials=%s;OAuthToken=%s", + server.getPort(), TEST_KEY_PATH, "some-token"))) { + fail("missing expected exception"); + } catch (SQLException e) { + assertThat(e.getMessage()) + .contains( + "Specify only one of credentialsUrl, encodedCredentials, credentialsProvider and OAuth token"); + } + } + + @Test + public void testGetPropertyInfo() throws SQLException { + DriverPropertyInfo[] props = + JdbcDriver.getRegisteredDriver() + .getPropertyInfo( + "jdbc:cloudspanner:/projects/p/instances/i/databases/d", new Properties()); + assertThat(props).hasLength(ConnectionPropertiesHelper.VALID_CONNECTION_PROPERTIES.size()); + + Collection validConnectionPropertyNames = + Collections2.transform( + ConnectionPropertiesHelper.VALID_CONNECTION_PROPERTIES, + ConnectionPropertiesHelper::getConnectionPropertyName); + Collection driverPropertyNames = + Collections2.transform(ImmutableList.copyOf(props), input -> input.name); + assertThat(driverPropertyNames).containsExactlyElementsIn(validConnectionPropertyNames); + } + + @Test + public void testLenient() throws SQLException { + // With lenient=true the driver should accept unknown properties and only generate a warning. + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner://localhost:%d/projects/p/instances/i/databases/d?usePlainText=true;credentials=%s;lenient=true;foo=bar", + server.getPort(), TEST_KEY_PATH))) { + assertThat(connection.isClosed()).isFalse(); + assertThat((Throwable) connection.getWarnings()).isNotNull(); + assertThat(connection.getWarnings().getMessage()).contains("foo"); + } + + // Without lenient the driver should throw an exception for unknown properties. + try (Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:cloudspanner://localhost:%d/projects/p/instances/i/databases/d?usePlainText=true;credentials=%s;foo=bar", + server.getPort(), TEST_KEY_PATH))) { + fail("missing expected exception"); + } catch (SQLException e) { + assertThat((Throwable) e).isInstanceOf(JdbcSqlException.class); + JdbcSqlException jdbc = (JdbcSqlException) e; + assertThat(jdbc.getMessage()).contains("foo"); + assertThat(jdbc.getCode()).isEqualTo(Code.INVALID_ARGUMENT); + } + } + + @Test + public void testAcceptsURL() throws SQLException { + JdbcDriver driver = JdbcDriver.getRegisteredDriver(); + assertTrue( + driver.acceptsURL( + "jdbc:cloudspanner:/projects/my-project/instances/my-instance/databases/my-database")); + assertTrue( + driver.acceptsURL( + "jdbc:spanner:/projects/my-project/instances/my-instance/databases/my-database")); + } + + @Test + public void testJdbcExternalHostFormat() { + Matcher matcherWithoutInstance = + EXTERNAL_HOST_URL_PATTERN.matcher("jdbc:cloudspanner://localhost:15000/databases/test-db"); + assertTrue(matcherWithoutInstance.matches()); + assertEquals("test-db", matcherWithoutInstance.group("DATABASEGROUP")); + Matcher matcherWithProperty = + EXTERNAL_HOST_URL_PATTERN.matcher( + "jdbc:cloudspanner://localhost:15000/instances/default/databases/singers-db?usePlainText=true"); + assertTrue(matcherWithProperty.matches()); + assertEquals("default", matcherWithProperty.group("INSTANCEGROUP")); + assertEquals("singers-db", matcherWithProperty.group("DATABASEGROUP")); + Matcher matcherWithoutPort = + EXTERNAL_HOST_URL_PATTERN.matcher( + "jdbc:cloudspanner://localhost/instances/default/databases/test-db"); + assertTrue(matcherWithoutPort.matches()); + assertEquals("default", matcherWithoutPort.group("INSTANCEGROUP")); + assertEquals("test-db", matcherWithoutPort.group("DATABASEGROUP")); + Matcher matcherWithProject = + EXTERNAL_HOST_URL_PATTERN.matcher( + "jdbc:cloudspanner://localhost:15000/projects/default/instances/default/databases/singers-db"); + assertFalse(matcherWithProject.matches()); + Matcher matcherWithoutHost = + EXTERNAL_HOST_URL_PATTERN.matcher( + "jdbc:cloudspanner:/instances/default/databases/singers-db"); + assertFalse(matcherWithoutHost.matches()); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcGrpcErrorTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcGrpcErrorTest.java new file mode 100644 index 000000000000..147d76b31206 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcGrpcErrorTest.java @@ -0,0 +1,449 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.admin.database.v1.MockDatabaseAdminImpl; +import com.google.cloud.spanner.admin.instance.v1.MockInstanceAdminImpl; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.rpc.Code; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +/** Test that the JDBC driver propagates {@link SQLException}s when a gRPC error occurs. */ +@RunWith(JUnit4.class) +public class JdbcGrpcErrorTest { + private static final Statement SELECT1 = Statement.of("SELECT 1 AS COL1"); + private static final ResultSetMetadata SELECT1_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("COL1") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .build()) + .build(); + private static final com.google.spanner.v1.ResultSet SELECT1_RESULTSET = + com.google.spanner.v1.ResultSet.newBuilder() + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .setMetadata(SELECT1_METADATA) + .build(); + private static final Statement UPDATE_STATEMENT = + Statement.of("UPDATE FOO SET BAR=1 WHERE BAZ=2"); + private static final int UPDATE_COUNT = 1; + private static final Statement INVALID_UPDATE_STATEMENT = + Statement.of("UPDATE NON_EXISING_TABLE SET FOO=1 WHERE BAR=2"); + + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + + // INVALID_ARGUMENT is chosen as the test error code as it should never be retryable, and it does + // not overlap with the 'are multiplexed sessions supported?' check. + private final Exception serverException = + Status.INVALID_ARGUMENT.withDescription("test exception").asRuntimeException(); + + @BeforeClass + public static void startStaticServer() throws IOException { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); // We don't want any unpredictable aborted transactions. + mockSpanner.putStatementResult(StatementResult.query(SELECT1, SELECT1_RESULTSET)); + mockSpanner.putStatementResult(StatementResult.update(UPDATE_STATEMENT, UPDATE_COUNT)); + mockSpanner.putStatementResult( + StatementResult.exception( + INVALID_UPDATE_STATEMENT, + Status.NOT_FOUND.withDescription("Unknown table name").asRuntimeException())); + MockInstanceAdminImpl mockInstanceAdmin = new MockInstanceAdminImpl(); + MockDatabaseAdminImpl mockDatabaseAdmin = new MockDatabaseAdminImpl(); + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = + NettyServerBuilder.forAddress(address) + .addService(mockSpanner) + .addService(mockInstanceAdmin) + .addService(mockDatabaseAdmin) + .build() + .start(); + } + + @AfterClass + public static void stopServer() throws Exception { + server.shutdown(); + server.awaitTermination(); + } + + @After + public void reset() { + // Close Spanner pool to prevent reusage of the same Spanner instance (and thereby the same + // session pool). + try { + SpannerPool.closeSpannerPool(); + } catch (SpannerException e) { + // Ignore leaked session errors that can be caused by the internal dialect auto-detection that + // is executed at startup. This query can still be running when an error is caused by tests in + // this class, and that will be registered as a session leak as that session has not yet been + // checked in to the pool. + if (!(e.getErrorCode() == ErrorCode.FAILED_PRECONDITION + && e.getMessage() + .contains( + "There is/are 1 connection(s) still open. Close all connections before calling closeSpanner()"))) { + throw e; + } + } + mockSpanner.removeAllExecutionTimes(); + mockSpanner.reset(); + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true;minSessions=0", + server.getPort(), "proj", "inst", "db"); + } + + private Connection createConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void autocommitBeginTransaction() throws SQLException { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + // This triggers a retry with an explicit BeginTransaction RPC. + mockSpanner.abortNextStatement(); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void autocommitBeginPDMLTransaction() throws SQLException { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'"); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void transactionalBeginTransaction() throws SQLException { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + // This triggers a retry with an explicit BeginTransaction RPC. + mockSpanner.abortNextStatement(); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void readOnlyBeginTransaction() throws SQLException { + mockSpanner.setBeginTransactionExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + connection.setReadOnly(true); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeQuery(SELECT1.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void autocommitExecuteSql() throws SQLException { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void autocommitPDMLExecuteSql() throws SQLException { + // Make sure the dialect auto-detection has finished before we instruct the RPC to always return + // an error. + try (java.sql.Connection connection = createConnection()) { + connection.unwrap(CloudSpannerJdbcConnection.class).getDialect(); + } + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.createStatement().execute("SET AUTOCOMMIT_DML_MODE='PARTITIONED_NON_ATOMIC'"); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void transactionalExecuteSql() throws SQLException { + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void autocommitExecuteBatchDml() throws SQLException { + mockSpanner.setExecuteBatchDmlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + try (java.sql.Statement statement = connection.createStatement()) { + statement.addBatch(UPDATE_STATEMENT.getSql()); + statement.addBatch(UPDATE_STATEMENT.getSql()); + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, statement::executeBatch); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + } + + @Test + public void transactionalExecuteBatchDml() throws SQLException { + mockSpanner.setExecuteBatchDmlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + try (java.sql.Statement statement = connection.createStatement()) { + statement.addBatch(UPDATE_STATEMENT.getSql()); + statement.addBatch(UPDATE_STATEMENT.getSql()); + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, statement::executeBatch); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + } + + @Test + public void autocommitCommit() throws SQLException { + mockSpanner.setCommitExecutionTime(SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void transactionalCommit() throws SQLException { + mockSpanner.setCommitExecutionTime(SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql()); + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, connection::commit); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void autocommitRollback() throws SQLException { + // The JDBC driver should throw the exception of the SQL statement and ignore any errors from + // the rollback() method. + mockSpanner.setRollbackExecutionTime(SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(INVALID_UPDATE_STATEMENT.getSql())); + assertEquals(Code.NOT_FOUND, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), sqlException.getMessage().contains("Unknown table name")); + } + } + + @Test + public void transactionalRollback() throws SQLException { + // Rollback exceptions are ignored by the client library and not propagated to the JDBC driver. + // This method will therefore not throw any errors. + mockSpanner.setRollbackExecutionTime(SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql()); + connection.rollback(); + } + } + + @Test + public void autocommitExecuteStreamingSql() throws SQLException { + // Make sure the dialect auto-detection has finished before we instruct the RPC to always return + // an error. + try (java.sql.Connection connection = createConnection()) { + connection.unwrap(CloudSpannerJdbcConnection.class).getDialect(); + } + + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeQuery(SELECT1.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void transactionalExecuteStreamingSql() throws SQLException { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeQuery(SELECT1.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void readOnlyExecuteStreamingSql() throws SQLException { + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + connection.setReadOnly(true); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeQuery(SELECT1.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void autocommitCreateSession() throws SQLException { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void transactionalCreateSession() throws SQLException { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeUpdate(UPDATE_STATEMENT.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } + + @Test + public void readOnlyCreateSession() throws SQLException { + mockSpanner.setCreateSessionExecutionTime( + SimulatedExecutionTime.ofStickyException(serverException)); + try (java.sql.Connection connection = createConnection()) { + connection.setAutoCommit(false); + connection.setReadOnly(true); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> connection.createStatement().executeQuery(SELECT1.getSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("test exception")); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcParameterStoreTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcParameterStoreTest.java new file mode 100644 index 000000000000..699f36c994ff --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcParameterStoreTest.java @@ -0,0 +1,1236 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.SpannerException; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.cloud.spanner.jdbc.it.SingerProto.Genre; +import com.google.cloud.spanner.jdbc.it.SingerProto.SingerInfo; +import com.google.common.io.CharStreams; +import com.google.protobuf.NullValue; +import com.google.rpc.Code; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.io.StringReader; +import java.math.BigDecimal; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.sql.Date; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.time.LocalDate; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Arrays; +import java.util.Collections; +import java.util.UUID; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class JdbcParameterStoreTest { + @Parameters(name = "dialect = {0}") + public static Object[] parameters() { + return Dialect.values(); + } + + @Parameter public Dialect dialect; + + private AbstractStatementParser parser; + + @Before + public void setUp() { + parser = AbstractStatementParser.getInstance(dialect); + } + + /** Tests setting a {@link Value} as a parameter value. */ + @Test + public void testSetValueAsParameter() throws SQLException { + JdbcParameterStore params = new JdbcParameterStore(dialect); + params.setParameter(1, Value.bool(true)); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, Value.bytes(ByteArray.copyFrom("test"))); + verifyParameter(params, Value.bytes(ByteArray.copyFrom("test"))); + params.setParameter(1, Value.date(com.google.cloud.Date.fromYearMonthDay(2021, 5, 3))); + verifyParameter(params, Value.date(com.google.cloud.Date.fromYearMonthDay(2021, 5, 3))); + params.setParameter(1, Value.float64(3.14d)); + verifyParameter(params, Value.float64(3.14d)); + params.setParameter(1, Value.int64(1L)); + verifyParameter(params, Value.int64(1L)); + params.setParameter(1, Value.numeric(BigDecimal.TEN)); + verifyParameter(params, Value.numeric(BigDecimal.TEN)); + params.setParameter(1, Value.string("test")); + verifyParameter(params, Value.string("test")); + params.setParameter( + 1, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(9999L, 101))); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(9999L, 101))); + + params.setParameter(1, Value.boolArray(new boolean[] {true, false})); + verifyParameter(params, Value.boolArray(new boolean[] {true, false})); + params.setParameter(1, Value.bytesArray(Collections.singleton(ByteArray.copyFrom("test")))); + verifyParameter(params, Value.bytesArray(Collections.singleton(ByteArray.copyFrom("test")))); + params.setParameter( + 1, + Value.dateArray(Collections.singleton(com.google.cloud.Date.fromYearMonthDay(2021, 5, 3)))); + verifyParameter( + params, + Value.dateArray(Collections.singleton(com.google.cloud.Date.fromYearMonthDay(2021, 5, 3)))); + params.setParameter(1, Value.float64Array(Collections.singleton(3.14d))); + verifyParameter(params, Value.float64Array(Collections.singleton(3.14d))); + params.setParameter(1, Value.int64Array(Collections.singleton(1L))); + verifyParameter(params, Value.int64Array(Collections.singleton(1L))); + params.setParameter(1, Value.numericArray(Collections.singleton(BigDecimal.TEN))); + verifyParameter(params, Value.numericArray(Collections.singleton(BigDecimal.TEN))); + params.setParameter(1, Value.stringArray(Collections.singleton("test"))); + verifyParameter(params, Value.stringArray(Collections.singleton("test"))); + params.setParameter( + 1, + Value.timestampArray( + Collections.singleton(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(9999L, 101)))); + verifyParameter( + params, + Value.timestampArray( + Collections.singleton(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(9999L, 101)))); + } + + /** Tests setting a parameter value together with a sql type */ + @SuppressWarnings("deprecation") + @Test + public void testSetParameterWithType() throws SQLException, IOException { + JdbcParameterStore params = new JdbcParameterStore(dialect); + // test the valid default combinations + params.setParameter(1, true, Types.BOOLEAN); + assertTrue((Boolean) params.getParameter(1)); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, (byte) 1, Types.TINYINT); + assertEquals(1, ((Byte) params.getParameter(1)).byteValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, (short) 1, Types.SMALLINT); + assertEquals(1, ((Short) params.getParameter(1)).shortValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, 1, Types.INTEGER); + assertEquals(1, ((Integer) params.getParameter(1)).intValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, 1L, Types.BIGINT); + assertEquals(1, ((Long) params.getParameter(1)).longValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, (float) 1, Types.REAL); + assertEquals(1.0f, (Float) params.getParameter(1), 0.0f); + verifyParameter(params, Value.float32(1)); + params.setParameter(1, (double) 1, Types.DOUBLE); + assertEquals(1.0d, (Double) params.getParameter(1), 0.0d); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, new Date(1970 - 1900, 0, 1), Types.DATE); + assertEquals(new Date(1970 - 1900, 0, 1), params.getParameter(1)); + verifyParameter(params, Value.date(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1))); + params.setParameter(1, new Time(0L), Types.TIME); + assertEquals(new Time(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + params.setParameter(1, new Time(0L), Types.TIME_WITH_TIMEZONE); + assertEquals(new Time(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + params.setParameter(1, new Timestamp(0L), Types.TIMESTAMP); + assertEquals(new Timestamp(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + params.setParameter(1, new Timestamp(0L), Types.TIMESTAMP_WITH_TIMEZONE); + assertEquals(new Timestamp(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + OffsetDateTime offsetDateTime = + OffsetDateTime.of(2021, 9, 24, 12, 27, 59, 42457, ZoneOffset.ofHours(2)); + params.setParameter(1, offsetDateTime, Types.TIMESTAMP_WITH_TIMEZONE); + assertEquals(offsetDateTime, params.getParameter(1)); + verifyParameter( + params, + Value.timestamp( + com.google.cloud.Timestamp.ofTimeSecondsAndNanos( + offsetDateTime.toEpochSecond(), offsetDateTime.getNano()))); + LocalDate localDate = LocalDate.of(2021, 9, 24); + params.setParameter(1, localDate, Types.DATE); + assertEquals(localDate, params.getParameter(1)); + verifyParameter( + params, + Value.date( + com.google.cloud.Date.fromYearMonthDay( + localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth()))); + + params.setParameter(1, new byte[] {1, 2, 3}, Types.BINARY); + assertArrayEquals(new byte[] {1, 2, 3}, (byte[]) params.getParameter(1)); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(new byte[] {1, 2, 3}))); + params.setParameter(1, "test", Types.NVARCHAR); + assertEquals("test", params.getParameter(1)); + verifyParameter(params, Value.string("test")); + + params.setParameter(1, new JdbcBlob(new byte[] {1, 2, 3}), Types.BLOB); + assertEquals(new JdbcBlob(new byte[] {1, 2, 3}), params.getParameter(1)); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(new byte[] {1, 2, 3}))); + params.setParameter(1, new ByteArrayInputStream(new byte[] {1, 2, 3}), Types.BLOB); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(new byte[] {1, 2, 3}))); + + params.setParameter(1, new JdbcClob("test"), Types.CLOB); + assertEquals(new JdbcClob("test"), params.getParameter(1)); + verifyParameter(params, Value.string("test")); + params.setParameter(1, new StringReader("test"), Types.CLOB); + assertTrue(stringReadersEqual((StringReader) params.getParameter(1), new StringReader("test"))); + verifyParameter(params, Value.string("test")); + + params.setParameter(1, new JdbcClob("test"), Types.NCLOB); + assertEquals(new JdbcClob("test"), params.getParameter(1)); + verifyParameter(params, Value.string("test")); + params.setParameter(1, new StringReader("test"), Types.NCLOB); + assertTrue(stringReadersEqual((StringReader) params.getParameter(1), new StringReader("test"))); + verifyParameter(params, Value.string("test")); + + String jsonString = "{\"test\": \"value\"}"; + params.setParameter(1, jsonString, JsonType.VENDOR_TYPE_NUMBER); + assertEquals(jsonString, params.getParameter(1)); + verifyParameter(params, Value.json(jsonString)); + + params.setParameter(1, jsonString, (int) JsonType.SHORT_VENDOR_TYPE_NUMBER); + assertEquals(jsonString, params.getParameter(1)); + verifyParameter(params, Value.json(jsonString)); + + params.setParameter(1, jsonString, JsonType.INSTANCE); + assertEquals(jsonString, params.getParameter(1)); + verifyParameter(params, Value.json(jsonString)); + + params.setParameter(1, jsonString, PgJsonbType.VENDOR_TYPE_NUMBER); + assertEquals(jsonString, params.getParameter(1)); + verifyParameter(params, Value.pgJsonb(jsonString)); + + params.setParameter(1, jsonString, (int) PgJsonbType.SHORT_VENDOR_TYPE_NUMBER); + assertEquals(jsonString, params.getParameter(1)); + verifyParameter(params, Value.pgJsonb(jsonString)); + + params.setParameter(1, jsonString, PgJsonbType.INSTANCE); + assertEquals(jsonString, params.getParameter(1)); + verifyParameter(params, Value.pgJsonb(jsonString)); + + params.setParameter(1, BigDecimal.ONE, Types.DECIMAL); + if (dialect == Dialect.POSTGRESQL) { + verifyParameter(params, Value.pgNumeric(BigDecimal.ONE.toString())); + } else { + verifyParameter(params, Value.numeric(BigDecimal.ONE)); + } + + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + params.setParameter(1, singerInfo, ProtoMessageType.VENDOR_TYPE_NUMBER); + assertEquals(singerInfo, params.getParameter(1)); + verifyParameter(params, Value.protoMessage(singerInfo)); + + params.setParameter(1, singerInfo, (int) ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER); + assertEquals(singerInfo, params.getParameter(1)); + verifyParameter(params, Value.protoMessage(singerInfo)); + + params.setParameter(1, singerInfo, ProtoMessageType.INSTANCE); + assertEquals(singerInfo, params.getParameter(1)); + verifyParameter(params, Value.protoMessage(singerInfo)); + + // Tests inter compatibility between bytes and proto message + params.setParameter(1, singerInfo.toByteArray(), ProtoMessageType.VENDOR_TYPE_NUMBER); + assertArrayEquals(singerInfo.toByteArray(), (byte[]) params.getParameter(1)); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(singerInfo.toByteArray()))); + + params.setParameter( + 1, singerInfo.toByteArray(), (int) ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER); + assertArrayEquals(singerInfo.toByteArray(), (byte[]) params.getParameter(1)); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(singerInfo.toByteArray()))); + + params.setParameter(1, singerInfo, Types.BINARY); + assertEquals(singerInfo, params.getParameter(1)); + verifyParameter(params, Value.protoMessage(singerInfo)); + + params.setParameter(1, Genre.ROCK, ProtoEnumType.VENDOR_TYPE_NUMBER); + assertEquals(Genre.ROCK, params.getParameter(1)); + verifyParameter(params, Value.protoEnum(Genre.ROCK)); + + params.setParameter(1, Genre.ROCK, (int) ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER); + assertEquals(Genre.ROCK, params.getParameter(1)); + verifyParameter(params, Value.protoEnum(Genre.ROCK)); + + params.setParameter(1, Genre.ROCK, ProtoEnumType.INSTANCE); + assertEquals(Genre.ROCK, params.getParameter(1)); + verifyParameter(params, Value.protoEnum(Genre.ROCK)); + + // Tests inter compatibility between int and proto enum + params.setParameter(1, Genre.ROCK.getNumber(), ProtoEnumType.VENDOR_TYPE_NUMBER); + assertEquals(Genre.ROCK.getNumber(), params.getParameter(1)); + verifyParameter(params, Value.int64(Genre.ROCK.getNumber())); + + params.setParameter(1, Genre.ROCK.getNumber(), (int) ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER); + assertEquals(Genre.ROCK.getNumber(), params.getParameter(1)); + verifyParameter(params, Value.int64(Genre.ROCK.getNumber())); + + params.setParameter(1, Genre.ROCK, Types.INTEGER); + assertEquals(Genre.ROCK, params.getParameter(1)); + verifyParameter(params, Value.protoEnum(Genre.ROCK)); + + // types that should lead to int64 + for (int type : + new int[] { + Types.TINYINT, + Types.SMALLINT, + Types.INTEGER, + Types.BIGINT, + ProtoEnumType.VENDOR_TYPE_NUMBER, + ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER + }) { + params.setParameter(1, (byte) 1, type); + assertEquals(1, ((Byte) params.getParameter(1)).byteValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, (short) 1, type); + assertEquals(1, ((Short) params.getParameter(1)).shortValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, 1, type); + assertEquals(1, ((Integer) params.getParameter(1)).intValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, 1L, type); + assertEquals(1, ((Long) params.getParameter(1)).longValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, (float) 1, type); + assertEquals(1.0f, (Float) params.getParameter(1), 0.0f); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, (double) 1, type); + assertEquals(1.0d, (Double) params.getParameter(1), 0.0d); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, BigDecimal.ONE, type); + assertEquals(BigDecimal.ONE, params.getParameter(1)); + verifyParameter(params, Value.int64(1)); + } + + // types that should lead to float32 + for (int type : new int[] {Types.REAL}) { + params.setParameter(1, (byte) 1, type); + assertEquals(1, ((Byte) params.getParameter(1)).byteValue()); + verifyParameter(params, Value.float32(1)); + params.setParameter(1, (short) 1, type); + assertEquals(1, ((Short) params.getParameter(1)).shortValue()); + verifyParameter(params, Value.float32(1)); + params.setParameter(1, 1, type); + assertEquals(1, ((Integer) params.getParameter(1)).intValue()); + verifyParameter(params, Value.float32(1)); + params.setParameter(1, 1L, type); + assertEquals(1, ((Long) params.getParameter(1)).longValue()); + verifyParameter(params, Value.float32(1)); + params.setParameter(1, (float) 1, type); + assertEquals(1.0f, (Float) params.getParameter(1), 0.0f); + verifyParameter(params, Value.float32(1)); + params.setParameter(1, (double) 1, type); + assertEquals(1.0d, (Double) params.getParameter(1), 0.0d); + verifyParameter(params, Value.float32(1)); + params.setParameter(1, BigDecimal.ONE, type); + assertEquals(BigDecimal.ONE, params.getParameter(1)); + verifyParameter(params, Value.float32(1)); + } + + // types that should lead to float64 + for (int type : new int[] {Types.FLOAT, Types.DOUBLE}) { + params.setParameter(1, (byte) 1, type); + assertEquals(1, ((Byte) params.getParameter(1)).byteValue()); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, (short) 1, type); + assertEquals(1, ((Short) params.getParameter(1)).shortValue()); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, 1, type); + assertEquals(1, ((Integer) params.getParameter(1)).intValue()); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, 1L, type); + assertEquals(1, ((Long) params.getParameter(1)).longValue()); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, (float) 1, type); + assertEquals(1.0f, (Float) params.getParameter(1), 0.0f); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, (double) 1, type); + assertEquals(1.0d, (Double) params.getParameter(1), 0.0d); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, BigDecimal.ONE, type); + assertEquals(BigDecimal.ONE, params.getParameter(1)); + verifyParameter(params, Value.float64(1)); + } + + // types that should lead to date + for (int type : new int[] {Types.DATE}) { + params.setParameter(1, new Date(1970 - 1900, 0, 1), type); + assertEquals(new Date(1970 - 1900, 0, 1), params.getParameter(1)); + verifyParameter(params, Value.date(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1))); + params.setParameter(1, new Time(0L), type); + assertEquals(new Time(0L), params.getParameter(1)); + verifyParameter(params, Value.date(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1))); + params.setParameter(1, new Timestamp(1970 - 1900, 0, 1, 0, 0, 0, 0), type); + assertEquals(new Timestamp(1970 - 1900, 0, 1, 0, 0, 0, 0), params.getParameter(1)); + verifyParameter(params, Value.date(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1))); + } + + // types that should lead to timestamp + for (int type : + new int[] { + Types.TIME, Types.TIME_WITH_TIMEZONE, Types.TIMESTAMP, Types.TIMESTAMP_WITH_TIMEZONE + }) { + params.setParameter(1, new Date(0L), type); + assertEquals(new Date(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + params.setParameter(1, new Time(0L), type); + assertEquals(new Time(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + params.setParameter(1, new Timestamp(0L), type); + assertEquals(new Timestamp(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + } + + // types that should lead to bytes (except BLOB which is handled separately) + for (int type : + new int[] { + Types.BINARY, + Types.VARBINARY, + Types.LONGVARBINARY, + ProtoMessageType.VENDOR_TYPE_NUMBER, + ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER + }) { + params.setParameter(1, new byte[] {1, 2, 3}, type); + assertArrayEquals(new byte[] {1, 2, 3}, (byte[]) params.getParameter(1)); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(new byte[] {1, 2, 3}))); + } + + // types that should lead to string + for (int type : + new int[] { + Types.CHAR, + Types.VARCHAR, + Types.LONGVARCHAR, + Types.NCHAR, + Types.NVARCHAR, + Types.LONGNVARCHAR + }) { + params.setParameter(1, "test", type); + assertEquals("test", params.getParameter(1)); + verifyParameter(params, Value.string("test")); + + params.setParameter(1, new StringReader("test"), type); + assertTrue( + stringReadersEqual((StringReader) params.getParameter(1), new StringReader("test"))); + verifyParameter(params, Value.string("test")); + + params.setParameter( + 1, new ByteArrayInputStream(StandardCharsets.US_ASCII.encode("test").array()), type); + assertTrue( + asciiStreamsEqual( + (ByteArrayInputStream) params.getParameter(1), + new ByteArrayInputStream(StandardCharsets.US_ASCII.encode("test").array()))); + verifyParameter(params, Value.string("test")); + + params.setParameter(1, new URL("https://cloud.google.com/spanner"), type); + assertEquals(new URL("https://cloud.google.com/spanner"), params.getParameter(1)); + verifyParameter(params, Value.string("https://cloud.google.com/spanner")); + } + + // types that should lead to bool + for (int type : new int[] {Types.BOOLEAN, Types.BIT}) { + params.setParameter(1, true, type); + assertTrue((Boolean) params.getParameter(1)); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, (byte) 1, type); + assertEquals(1, ((Byte) params.getParameter(1)).byteValue()); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, (short) 0, type); + assertEquals(0, ((Short) params.getParameter(1)).shortValue()); + verifyParameter(params, Value.bool(false)); + params.setParameter(1, 1, type); + assertEquals(1, ((Integer) params.getParameter(1)).intValue()); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, 1L, type); + assertEquals(1, ((Long) params.getParameter(1)).longValue()); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, (float) 1, type); + assertEquals(1.0f, (Float) params.getParameter(1), 0.0f); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, (double) 1, type); + assertEquals(1.0d, (Double) params.getParameter(1), 0.0d); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, BigDecimal.ZERO, type); + assertEquals(BigDecimal.ZERO, params.getParameter(1)); + verifyParameter(params, Value.bool(false)); + params.setParameter(1, null, type); + assertNull(params.getParameter(1)); + verifyParameter(params, Value.bool(null)); + } + + // types that should lead to numeric + for (int type : new int[] {Types.DECIMAL, Types.NUMERIC}) { + final Value expectedIntegralNumeric = + dialect == Dialect.POSTGRESQL ? Value.pgNumeric("1") : Value.numeric(BigDecimal.ONE); + final Value expectedRationalNumeric = + dialect == Dialect.POSTGRESQL + ? Value.pgNumeric("1.0") + : Value.numeric(BigDecimal.valueOf(1.0)); + + params.setParameter(1, BigDecimal.ONE, type); + assertEquals(BigDecimal.ONE, params.getParameter(1)); + verifyParameter(params, expectedIntegralNumeric); + + params.setParameter(1, (byte) 1, type); + assertEquals(1, ((Byte) params.getParameter(1)).byteValue()); + verifyParameter(params, expectedIntegralNumeric); + params.setParameter(1, (short) 1, type); + assertEquals(1, ((Short) params.getParameter(1)).shortValue()); + verifyParameter(params, expectedIntegralNumeric); + params.setParameter(1, 1, type); + assertEquals(1, ((Integer) params.getParameter(1)).intValue()); + verifyParameter(params, expectedIntegralNumeric); + params.setParameter(1, 1L, type); + assertEquals(1, ((Long) params.getParameter(1)).longValue()); + verifyParameter(params, expectedIntegralNumeric); + params.setParameter(1, (float) 1, type); + assertEquals(1.0f, (Float) params.getParameter(1), 0.0f); + verifyParameter(params, expectedRationalNumeric); + params.setParameter(1, (double) 1, type); + assertEquals(1.0d, (Double) params.getParameter(1), 0.0d); + verifyParameter(params, expectedRationalNumeric); + } + } + + @Test + public void testSetInvalidParameterWithType() throws SQLException, IOException { + JdbcParameterStore params = new JdbcParameterStore(dialect); + + // types that should lead to int64, but with invalid values. + for (int type : + new int[] { + Types.TINYINT, + Types.SMALLINT, + Types.INTEGER, + Types.BIGINT, + ProtoEnumType.VENDOR_TYPE_NUMBER, + ProtoEnumType.SHORT_VENDOR_TYPE_NUMBER + }) { + assertInvalidParameter(params, "1", type); + assertInvalidParameter(params, new Object(), type); + assertInvalidParameter(params, Boolean.TRUE, type); + } + + // types that should not be valid float32 parameters. + for (int type : new int[] {Types.REAL}) { + assertInvalidParameter(params, "1", type); + assertInvalidParameter(params, new Object(), type); + assertInvalidParameter(params, Boolean.TRUE, type); + } + + // types that should lead to float64 + for (int type : new int[] {Types.FLOAT, Types.DOUBLE}) { + assertInvalidParameter(params, "1", type); + assertInvalidParameter(params, new Object(), type); + assertInvalidParameter(params, Boolean.TRUE, type); + } + + // types that should lead to date + for (int type : new int[] {Types.DATE}) { + assertInvalidParameter(params, "1", type); + assertInvalidParameter(params, new Object(), type); + assertInvalidParameter(params, Boolean.TRUE, type); + assertInvalidParameter(params, 1, type); + assertInvalidParameter(params, 1L, type); + } + + // types that should lead to timestamp + for (int type : + new int[] { + Types.TIME, Types.TIME_WITH_TIMEZONE, Types.TIMESTAMP, Types.TIMESTAMP_WITH_TIMEZONE + }) { + assertInvalidParameter(params, "1", type); + assertInvalidParameter(params, new Object(), type); + assertInvalidParameter(params, Boolean.TRUE, type); + assertInvalidParameter(params, 1, type); + assertInvalidParameter(params, 1L, type); + } + + // types that should lead to bytes (except BLOB which is handled separately) + for (int type : + new int[] { + Types.BINARY, + Types.VARBINARY, + Types.LONGVARBINARY, + ProtoMessageType.VENDOR_TYPE_NUMBER, + ProtoMessageType.SHORT_VENDOR_TYPE_NUMBER + }) { + assertInvalidParameter(params, "1", type); + assertInvalidParameter(params, new Object(), type); + assertInvalidParameter(params, Boolean.TRUE, type); + assertInvalidParameter(params, 1, type); + assertInvalidParameter(params, 1L, type); + assertInvalidParameter(params, new JdbcBlob(), type); + } + + for (int type : + new int[] { + Types.CHAR, + Types.VARCHAR, + Types.LONGVARCHAR, + Types.NCHAR, + Types.NVARCHAR, + Types.LONGNVARCHAR, + JsonType.VENDOR_TYPE_NUMBER, + JsonType.SHORT_VENDOR_TYPE_NUMBER, + PgJsonbType.VENDOR_TYPE_NUMBER, + PgJsonbType.SHORT_VENDOR_TYPE_NUMBER + }) { + assertInvalidParameter(params, new Object(), type); + assertInvalidParameter(params, Boolean.TRUE, type); + assertInvalidParameter(params, 1, type); + assertInvalidParameter(params, 1L, type); + assertInvalidParameter(params, new JdbcBlob(), type); + assertInvalidParameter(params, new JdbcClob(), type); + } + + // types that should lead to bool + for (int type : new int[] {Types.BOOLEAN, Types.BIT}) { + assertInvalidParameter(params, "1", type); + assertInvalidParameter(params, "true", type); + assertInvalidParameter(params, new Object(), type); + } + + // test setting closed readers and streams. + for (int type : + new int[] { + Types.CHAR, + Types.VARCHAR, + Types.LONGVARCHAR, + Types.NCHAR, + Types.NVARCHAR, + Types.LONGNVARCHAR, + JsonType.VENDOR_TYPE_NUMBER, + JsonType.SHORT_VENDOR_TYPE_NUMBER, + PgJsonbType.VENDOR_TYPE_NUMBER, + PgJsonbType.SHORT_VENDOR_TYPE_NUMBER + }) { + Reader reader = new StringReader("test"); + reader.close(); + params.setParameter(1, reader, type); + verifyParameterBindFails(params); + + InputStream stream = + new InputStream() { + @Override + public int read() throws IOException { + throw new IOException(); + } + }; + params.setParameter(1, stream, type); + verifyParameterBindFails(params); + } + } + + private void assertInvalidParameter(JdbcParameterStore params, Object value, int type) + throws SQLException { + try { + params.setParameter(1, value, type); + fail("missing expected exception"); + } catch (JdbcSqlExceptionImpl e) { + assertEquals(Code.INVALID_ARGUMENT, e.getCode()); + } + } + + /** + * Tests setting a parameter value without knowing the sql type. The type must be deferred from + * the type of the parameter value + */ + @SuppressWarnings("deprecation") + @Test + public void testSetParameterWithoutType() throws SQLException { + JdbcParameterStore params = new JdbcParameterStore(dialect); + params.setParameter(1, (byte) 1, (Integer) null); + assertEquals(1, ((Byte) params.getParameter(1)).byteValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, (short) 1, (Integer) null); + assertEquals(1, ((Short) params.getParameter(1)).shortValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, 1, (Integer) null); + assertEquals(1, ((Integer) params.getParameter(1)).intValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, 1L, (Integer) null); + assertEquals(1, ((Long) params.getParameter(1)).longValue()); + verifyParameter(params, Value.int64(1)); + params.setParameter(1, (float) 1, (Integer) null); + assertEquals(1.0f, (Float) params.getParameter(1), 0.0f); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, (double) 1, (Integer) null); + assertEquals(1.0d, (Double) params.getParameter(1), 0.0d); + verifyParameter(params, Value.float64(1)); + params.setParameter(1, new Date(1970 - 1900, 0, 1), (Integer) null); + assertEquals(new Date(1970 - 1900, 0, 1), params.getParameter(1)); + verifyParameter(params, Value.date(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1))); + params.setParameter(1, new Time(0L), (Integer) null); + assertEquals(new Time(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + params.setParameter(1, new Timestamp(0L), (Integer) null); + assertEquals(new Timestamp(0L), params.getParameter(1)); + verifyParameter( + params, Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(0L, 0))); + params.setParameter(1, new byte[] {1, 2, 3}, (Integer) null); + assertArrayEquals(new byte[] {1, 2, 3}, (byte[]) params.getParameter(1)); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(new byte[] {1, 2, 3}))); + + params.setParameter(1, new JdbcBlob(new byte[] {1, 2, 3}), (Integer) null); + assertEquals(new JdbcBlob(new byte[] {1, 2, 3}), params.getParameter(1)); + verifyParameter(params, Value.bytes(ByteArray.copyFrom(new byte[] {1, 2, 3}))); + params.setParameter(1, new JdbcClob("test"), (Integer) null); + assertEquals(new JdbcClob("test"), params.getParameter(1)); + verifyParameter(params, Value.string("test")); + params.setParameter(1, true, (Integer) null); + assertTrue((Boolean) params.getParameter(1)); + verifyParameter(params, Value.bool(true)); + params.setParameter(1, "test", (Integer) null); + assertEquals("test", params.getParameter(1)); + verifyParameter(params, Value.string("test")); + params.setParameter(1, new JdbcClob("test"), (Integer) null); + assertEquals(new JdbcClob("test"), params.getParameter(1)); + verifyParameter(params, Value.string("test")); + params.setParameter(1, UUID.fromString("83b988cf-1f4e-428a-be3d-cc712621942e"), (Integer) null); + assertEquals(UUID.fromString("83b988cf-1f4e-428a-be3d-cc712621942e"), params.getParameter(1)); + verifyParameter( + params, + Value.untyped( + com.google.protobuf.Value.newBuilder() + .setStringValue("83b988cf-1f4e-428a-be3d-cc712621942e") + .build())); + + String jsonString = "{\"test\": \"value\"}"; + params.setParameter(1, Value.json(jsonString), (Integer) null); + assertEquals(Value.json(jsonString), params.getParameter(1)); + verifyParameter(params, Value.json(jsonString)); + + params.setParameter(1, Value.pgJsonb(jsonString), (Integer) null); + assertEquals(Value.pgJsonb(jsonString), params.getParameter(1)); + verifyParameter(params, Value.pgJsonb(jsonString)); + + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + params.setParameter(1, singerInfo, (Integer) null); + assertEquals(singerInfo, params.getParameter(1)); + verifyParameter(params, Value.protoMessage(singerInfo)); + + params.setParameter(1, Genre.ROCK, (Integer) null); + assertEquals(Genre.ROCK, params.getParameter(1)); + verifyParameter(params, Value.protoEnum(Genre.ROCK)); + } + + private boolean stringReadersEqual(StringReader r1, StringReader r2) throws IOException { + boolean res = CharStreams.toString(r1).equals(CharStreams.toString(r2)); + r1.reset(); + r2.reset(); + return res; + } + + private boolean asciiStreamsEqual(InputStream is1, InputStream is2) throws IOException { + InputStreamReader r1 = new InputStreamReader(is1, StandardCharsets.US_ASCII); + String s1 = CharStreams.toString(r1); + InputStreamReader r2 = new InputStreamReader(is2, StandardCharsets.US_ASCII); + String s2 = CharStreams.toString(r2); + is1.reset(); + is2.reset(); + return s1.equals(s2); + } + + /** Tests setting array types of parameters */ + @Test + public void testSetArrayParameter() throws SQLException { + JdbcParameterStore params = new JdbcParameterStore(dialect); + params.setParameter( + 1, JdbcArray.createArray("BOOL", new Boolean[] {true, false, true}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("BOOL", new Boolean[] {true, false, true}), params.getParameter(1)); + verifyParameter(params, Value.boolArray(new boolean[] {true, false, true})); + + params.setParameter( + 1, JdbcArray.createArray("BOOL", new Boolean[] {true, false, null}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("BOOL", new Boolean[] {true, false, null}), params.getParameter(1)); + verifyParameter(params, Value.boolArray(Arrays.asList(true, false, null))); + + params.setParameter(1, JdbcArray.createArray("BOOL", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("BOOL", null), params.getParameter(1)); + verifyParameter(params, Value.boolArray((boolean[]) null)); + + params.setParameter(1, JdbcArray.createArray("INT64", new Long[] {1L, 2L, 3L}), Types.ARRAY); + assertEquals(JdbcArray.createArray("INT64", new Long[] {1L, 2L, 3L}), params.getParameter(1)); + verifyParameter(params, Value.int64Array(new long[] {1, 2, 3})); + + params.setParameter(1, JdbcArray.createArray("INT64", new Long[] {1L, 2L, null}), Types.ARRAY); + assertEquals(JdbcArray.createArray("INT64", new Long[] {1L, 2L, null}), params.getParameter(1)); + verifyParameter(params, Value.int64Array(Arrays.asList(1L, 2L, null))); + + params.setParameter(1, JdbcArray.createArray("INT64", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("INT64", null), params.getParameter(1)); + verifyParameter(params, Value.int64Array((long[]) null)); + + params.setParameter( + 1, JdbcArray.createArray("FLOAT64", new Double[] {1D, 2D, 3D}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("FLOAT64", new Double[] {1D, 2D, 3D}), params.getParameter(1)); + verifyParameter(params, Value.float64Array(new double[] {1, 2, 3})); + + params.setParameter( + 1, JdbcArray.createArray("FLOAT64", new Double[] {1D, 2D, null}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("FLOAT64", new Double[] {1D, 2D, null}), params.getParameter(1)); + verifyParameter(params, Value.float64Array(Arrays.asList(1D, 2D, null))); + + params.setParameter(1, JdbcArray.createArray("FLOAT64", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("FLOAT64", null), params.getParameter(1)); + verifyParameter(params, Value.float64Array((double[]) null)); + + @SuppressWarnings("deprecation") + Date sqlDate = new Date(2018 - 1900, 12 - 1, 14); + params.setParameter(1, JdbcArray.createArray("DATE", new Date[] {sqlDate}), Types.ARRAY); + assertEquals(JdbcArray.createArray("DATE", new Date[] {sqlDate}), params.getParameter(1)); + verifyParameter( + params, + Value.dateArray( + Collections.singletonList(com.google.cloud.Date.fromYearMonthDay(2018, 12, 14)))); + + params.setParameter(1, JdbcArray.createArray("DATE", new Date[] {sqlDate, null}), Types.ARRAY); + assertEquals(JdbcArray.createArray("DATE", new Date[] {sqlDate, null}), params.getParameter(1)); + verifyParameter( + params, + Value.dateArray(Arrays.asList(com.google.cloud.Date.fromYearMonthDay(2018, 12, 14), null))); + + params.setParameter(1, JdbcArray.createArray("DATE", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("DATE", null), params.getParameter(1)); + verifyParameter(params, Value.dateArray(null)); + + Timestamp sqlTimestamp = new Timestamp(System.currentTimeMillis()); + params.setParameter( + 1, JdbcArray.createArray("TIMESTAMP", new Timestamp[] {sqlTimestamp}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("TIMESTAMP", new Timestamp[] {sqlTimestamp}), params.getParameter(1)); + verifyParameter( + params, + Value.timestampArray( + Collections.singletonList(com.google.cloud.Timestamp.of(sqlTimestamp)))); + + params.setParameter( + 1, JdbcArray.createArray("TIMESTAMP", new Timestamp[] {sqlTimestamp, null}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("TIMESTAMP", new Timestamp[] {sqlTimestamp, null}), + params.getParameter(1)); + verifyParameter( + params, + Value.timestampArray(Arrays.asList(com.google.cloud.Timestamp.of(sqlTimestamp), null))); + + params.setParameter(1, JdbcArray.createArray("TIMESTAMP", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("TIMESTAMP", null), params.getParameter(1)); + verifyParameter(params, Value.timestampArray(null)); + + params.setParameter( + 1, JdbcArray.createArray("BYTES", new byte[][] {{1, 2, 3}, {4, 5, 6}}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("BYTES", new byte[][] {{1, 2, 3}, {4, 5, 6}}), + params.getParameter(1)); + verifyParameter( + params, + Value.bytesArray( + Arrays.asList( + ByteArray.copyFrom(new byte[] {1, 2, 3}), + ByteArray.copyFrom(new byte[] {4, 5, 6})))); + + params.setParameter( + 1, JdbcArray.createArray("BYTES", new byte[][] {{1, 2, 3}, {4, 5, 6}, null}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("BYTES", new byte[][] {{1, 2, 3}, {4, 5, 6}, null}), + params.getParameter(1)); + verifyParameter( + params, + Value.bytesArray( + Arrays.asList( + ByteArray.copyFrom(new byte[] {1, 2, 3}), + ByteArray.copyFrom(new byte[] {4, 5, 6}), + null))); + + params.setParameter(1, JdbcArray.createArray("BYTES", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("BYTES", null), params.getParameter(1)); + verifyParameter(params, Value.bytesArray(null)); + + params.setParameter( + 1, JdbcArray.createArray("STRING", new String[] {"test1", "test2", "test3"}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("STRING", new String[] {"test1", "test2", "test3"}), + params.getParameter(1)); + verifyParameter(params, Value.stringArray(Arrays.asList("test1", "test2", "test3"))); + + params.setParameter( + 1, + JdbcArray.createArray("STRING", new String[] {"test1", null, "test2", "test3"}), + Types.ARRAY); + assertEquals( + JdbcArray.createArray("STRING", new String[] {"test1", null, "test2", "test3"}), + params.getParameter(1)); + verifyParameter(params, Value.stringArray(Arrays.asList("test1", null, "test2", "test3"))); + + params.setParameter(1, JdbcArray.createArray("STRING", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("STRING", null), params.getParameter(1)); + verifyParameter(params, Value.stringArray(null)); + + String jsonString1 = "{\"test\": \"value1\"}"; + String jsonString2 = "{\"test\": \"value2\"}"; + params.setParameter( + 1, + JdbcArray.createArray("JSON", new String[] {jsonString1, jsonString2, null}), + Types.ARRAY); + assertEquals( + JdbcArray.createArray("JSON", new String[] {jsonString1, jsonString2, null}), + params.getParameter(1)); + verifyParameter(params, Value.jsonArray(Arrays.asList(jsonString1, jsonString2, null))); + + params.setParameter(1, JdbcArray.createArray("JSON", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("JSON", null), params.getParameter(1)); + verifyParameter(params, Value.jsonArray(null)); + + params.setParameter(1, Value.jsonArray(Arrays.asList(jsonString1, jsonString2, null))); + assertEquals( + Value.jsonArray(Arrays.asList(jsonString1, jsonString2, null)), params.getParameter(1)); + verifyParameter(params, Value.jsonArray(Arrays.asList(jsonString1, jsonString2, null))); + + // JSONB + params.setParameter( + 1, + JdbcArray.createArray("JSONB", new String[] {jsonString1, jsonString2, null}), + Types.ARRAY); + assertEquals( + JdbcArray.createArray("JSONB", new String[] {jsonString1, jsonString2, null}), + params.getParameter(1)); + verifyParameter(params, Value.pgJsonbArray(Arrays.asList(jsonString1, jsonString2, null))); + + params.setParameter(1, JdbcArray.createArray("JSONB", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("JSONB", null), params.getParameter(1)); + verifyParameter(params, Value.pgJsonbArray(null)); + + params.setParameter(1, Value.pgJsonbArray(Arrays.asList(jsonString1, jsonString2, null))); + assertEquals( + Value.pgJsonbArray(Arrays.asList(jsonString1, jsonString2, null)), params.getParameter(1)); + verifyParameter(params, Value.pgJsonbArray(Arrays.asList(jsonString1, jsonString2, null))); + + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + params.setParameter( + 1, + JdbcArray.createArray( + "PROTO", new SingerInfo[] {singerInfo, SingerInfo.getDefaultInstance()}), + Types.ARRAY); + assertEquals( + JdbcArray.createArray( + "PROTO", new SingerInfo[] {singerInfo, SingerInfo.getDefaultInstance()}), + params.getParameter(1)); + verifyParameter( + params, + Value.protoMessageArray( + Arrays.asList(singerInfo, SingerInfo.getDefaultInstance()), + SingerInfo.getDescriptor())); + + params.setParameter( + 1, + JdbcArray.createArray( + "PROTO", new SingerInfo[] {singerInfo, SingerInfo.getDefaultInstance(), null}), + Types.ARRAY); + assertEquals( + JdbcArray.createArray( + "PROTO", new SingerInfo[] {singerInfo, SingerInfo.getDefaultInstance(), null}), + params.getParameter(1)); + verifyParameter( + params, + Value.protoMessageArray( + Arrays.asList(singerInfo, SingerInfo.getDefaultInstance(), null), + SingerInfo.getDescriptor())); + + params.setParameter(1, JdbcArray.createArray("PROTO", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("PROTO", null), params.getParameter(1)); + verifyParameter( + params, + Value.untyped( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build())); + + params.setParameter( + 1, JdbcArray.createArray("ENUM", new Genre[] {Genre.ROCK, Genre.POP}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("ENUM", new Genre[] {Genre.ROCK, Genre.POP}), params.getParameter(1)); + verifyParameter( + params, Value.protoEnumArray(Arrays.asList(Genre.ROCK, Genre.POP), Genre.getDescriptor())); + + params.setParameter( + 1, JdbcArray.createArray("ENUM", new Genre[] {Genre.ROCK, Genre.POP, null}), Types.ARRAY); + assertEquals( + JdbcArray.createArray("ENUM", new Genre[] {Genre.ROCK, Genre.POP, null}), + params.getParameter(1)); + verifyParameter( + params, + Value.protoEnumArray(Arrays.asList(Genre.ROCK, Genre.POP, null), Genre.getDescriptor())); + + params.setParameter(1, JdbcArray.createArray("ENUM", null), Types.ARRAY); + assertEquals(JdbcArray.createArray("ENUM", null), params.getParameter(1)); + verifyParameter( + params, + Value.untyped( + com.google.protobuf.Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build())); + } + + private void verifyParameter(JdbcParameterStore params, Value value) throws SQLException { + Statement.Builder builder = Statement.newBuilder("SELECT * FROM FOO WHERE BAR=:p1"); + params.bindParameterValue(builder.bind("p1"), 1); + assertEquals(value, builder.build().getParameters().get("p1")); + } + + private void verifyParameterBindFails(JdbcParameterStore params) throws SQLException { + Statement.Builder builder = Statement.newBuilder("SELECT * FROM FOO WHERE BAR=:p1"); + try { + params.bindParameterValue(builder.bind("p1"), 1); + fail("missing expected exception"); + } catch (JdbcSqlExceptionImpl e) { + assertEquals(Code.INVALID_ARGUMENT, e.getCode()); + } + } + + @Test + public void testGoogleStandardSQLDialectConvertPositionalParametersToNamedParameters() { + assumeTrue(dialect == Dialect.GOOGLE_STANDARD_SQL); + assertEquals( + "select * from foo where name=@p1", + parser.convertPositionalParametersToNamedParameters('?', "select * from foo where name=?") + .sqlWithNamedParameters); + assertEquals( + "@p1'?test?\"?test?\"?'@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'?test?\"?test?\"?'?") + .sqlWithNamedParameters); + assertEquals( + "@p1'?it\\'?s'@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'?it\\'?s'?") + .sqlWithNamedParameters); + assertEquals( + "@p1'?it\\\"?s'@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'?it\\\"?s'?") + .sqlWithNamedParameters); + assertEquals( + "@p1\"?it\\\"?s\"@p2", + parser.convertPositionalParametersToNamedParameters('?', "?\"?it\\\"?s\"?") + .sqlWithNamedParameters); + assertEquals( + "@p1`?it\\`?s`@p2", + parser.convertPositionalParametersToNamedParameters('?', "?`?it\\`?s`?") + .sqlWithNamedParameters); + assertEquals( + "@p1'''?it\\'?s'''@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'''?it\\'?s'''?") + .sqlWithNamedParameters); + assertEquals( + "@p1\"\"\"?it\\\"?s\"\"\"@p2", + parser.convertPositionalParametersToNamedParameters('?', "?\"\"\"?it\\\"?s\"\"\"?") + .sqlWithNamedParameters); + assertEquals( + "@p1```?it\\`?s```@p2", + parser.convertPositionalParametersToNamedParameters('?', "?```?it\\`?s```?") + .sqlWithNamedParameters); + assertEquals( + "@p1'''?it\\'?s \n ?it\\'?s'''@p2", + parser.convertPositionalParametersToNamedParameters('?', "?'''?it\\'?s \n ?it\\'?s'''?") + .sqlWithNamedParameters); + + assertUnclosedLiteral("?'?it\\'?s \n ?it\\'?s'?"); + assertUnclosedLiteral("?'?it\\'?s \n ?it\\'?s?"); + assertUnclosedLiteral("?'''?it\\'?s \n ?it\\'?s'?"); + + assertEquals( + "select 1, @p1, 'test?test', \"test?test\", foo.* from `foo` where col1=@p2 and col2='test' and col3=@p3 and col4='?' and col5=\"?\" and col6='?''?''?'", + parser.convertPositionalParametersToNamedParameters( + '?', + "select 1, ?, 'test?test', \"test?test\", foo.* from `foo` where col1=? and col2='test' and col3=? and col4='?' and col5=\"?\" and col6='?''?''?'") + .sqlWithNamedParameters); + + assertEquals( + "select * " + "from foo " + "where name=@p1 " + "and col2 like @p2 " + "and col3 > @p3", + parser.convertPositionalParametersToNamedParameters( + '?', + "select * " + "from foo " + "where name=? " + "and col2 like ? " + "and col3 > ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + "from foo " + "where id between @p1 and @p2", + parser.convertPositionalParametersToNamedParameters( + '?', "select * " + "from foo " + "where id between ? and ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + "from foo " + "limit @p1 offset @p2", + parser.convertPositionalParametersToNamedParameters( + '?', "select * " + "from foo " + "limit ? offset ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + + "from foo " + + "where col1=@p1 " + + "and col2 like @p2 " + + "and col3 > @p3 " + + "and col4 < @p4 " + + "and col5 != @p5 " + + "and col6 not in (@p6, @p7, @p8) " + + "and col7 in (@p9, @p10, @p11) " + + "and col8 between @p12 and @p13", + parser.convertPositionalParametersToNamedParameters( + '?', + "select * " + + "from foo " + + "where col1=? " + + "and col2 like ? " + + "and col3 > ? " + + "and col4 < ? " + + "and col5 != ? " + + "and col6 not in (?, ?, ?) " + + "and col7 in (?, ?, ?) " + + "and col8 between ? and ?") + .sqlWithNamedParameters); + } + + @Test + public void testPostgresDialectConvertPositionalParametersToNamedParameters() { + assumeTrue(dialect == Dialect.POSTGRESQL); + assertEquals( + "select * from foo where name=$1", + parser.convertPositionalParametersToNamedParameters('?', "select * from foo where name=?") + .sqlWithNamedParameters); + assertEquals( + "$1'?test?\"?test?\"?'$2", + parser.convertPositionalParametersToNamedParameters('?', "?'?test?\"?test?\"?'?") + .sqlWithNamedParameters); + assertEquals( + "$1'?it''?s'$2", + parser.convertPositionalParametersToNamedParameters('?', "?'?it''?s'?") + .sqlWithNamedParameters); + assertEquals( + "$1'?it\\\"?s'$2", + parser.convertPositionalParametersToNamedParameters('?', "?'?it\\\"?s'?") + .sqlWithNamedParameters); + assertEquals( + "$1\"?it\"\"?s\"$2", + parser.convertPositionalParametersToNamedParameters('?', "?\"?it\"\"?s\"?") + .sqlWithNamedParameters); + assertEquals( + "$1'''?it''?s'''$2", + parser.convertPositionalParametersToNamedParameters('?', "?'''?it''?s'''?") + .sqlWithNamedParameters); + assertEquals( + "$1\"\"\"?it\"\"?s\"\"\"$2", + parser.convertPositionalParametersToNamedParameters('?', "?\"\"\"?it\"\"?s\"\"\"?") + .sqlWithNamedParameters); + + // PostgreSQL allows newlines inside string literals. + assertEquals( + "$1'?it''?s \n ?it''?s'$2", + parser.convertPositionalParametersToNamedParameters('?', "?'?it''?s \n ?it''?s'?") + .sqlWithNamedParameters); + assertUnclosedLiteral("?'?it\\'?s \n ?it\\'?s?"); + assertEquals( + "$1'''?it''?s \n ?it''?s'$2", + parser.convertPositionalParametersToNamedParameters('?', "?'''?it''?s \n ?it''?s'?") + .sqlWithNamedParameters); + + assertEquals( + "select 1, $1, 'test?test', \"test?test\", foo.* from `foo` where col1=$2 and col2='test' and col3=$3 and col4='?' and col5=\"?\" and col6='?''?''?'", + parser.convertPositionalParametersToNamedParameters( + '?', + "select 1, ?, 'test?test', \"test?test\", foo.* from `foo` where col1=? and col2='test' and col3=? and col4='?' and col5=\"?\" and col6='?''?''?'") + .sqlWithNamedParameters); + + assertEquals( + "select * " + "from foo " + "where name=$1 " + "and col2 like $2 " + "and col3 > $3", + parser.convertPositionalParametersToNamedParameters( + '?', + "select * " + "from foo " + "where name=? " + "and col2 like ? " + "and col3 > ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + "from foo " + "where id between $1 and $2", + parser.convertPositionalParametersToNamedParameters( + '?', "select * " + "from foo " + "where id between ? and ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + "from foo " + "limit $1 offset $2", + parser.convertPositionalParametersToNamedParameters( + '?', "select * " + "from foo " + "limit ? offset ?") + .sqlWithNamedParameters); + assertEquals( + "select * " + + "from foo " + + "where col1=$1 " + + "and col2 like $2 " + + "and col3 > $3 " + + "and col4 < $4 " + + "and col5 != $5 " + + "and col6 not in ($6, $7, $8) " + + "and col7 in ($9, $10, $11) " + + "and col8 between $12 and $13", + parser.convertPositionalParametersToNamedParameters( + '?', + "select * " + + "from foo " + + "where col1=? " + + "and col2 like ? " + + "and col3 > ? " + + "and col4 < ? " + + "and col5 != ? " + + "and col6 not in (?, ?, ?) " + + "and col7 in (?, ?, ?) " + + "and col8 between ? and ?") + .sqlWithNamedParameters); + } + + private void assertUnclosedLiteral(String sql) { + try { + parser.convertPositionalParametersToNamedParameters('?', sql); + fail("missing expected exception"); + } catch (SpannerException e) { + assertEquals(Code.INVALID_ARGUMENT.getNumber(), e.getCode()); + assertTrue( + e.getMessage() + .startsWith( + Code.INVALID_ARGUMENT.name() + + ": SQL statement contains an unclosed literal: " + + sql)); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatementTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatementTest.java new file mode 100644 index 000000000000..c95c38b7a127 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatementTest.java @@ -0,0 +1,443 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcConnection.NO_GENERATED_KEY_COLUMNS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.ReadContext.QueryAnalyzeMode; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.connection.Connection; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeCode; +import java.io.ByteArrayInputStream; +import java.io.StringReader; +import java.math.BigDecimal; +import java.net.MalformedURLException; +import java.net.URL; +import java.sql.Date; +import java.sql.JDBCType; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.Calendar; +import java.util.Collections; +import java.util.TimeZone; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class JdbcPreparedStatementTest { + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private String generateSqlWithParameters(int numberOfParams) { + StringBuilder sql = new StringBuilder("INSERT INTO FOO ("); + boolean first = true; + for (int i = 0; i < numberOfParams; i++) { + if (first) { + first = false; + } else { + sql.append(", "); + } + sql.append("COL").append(i); + } + sql.append(") VALUES ("); + first = true; + for (int i = 0; i < numberOfParams; i++) { + if (first) { + first = false; + } else { + sql.append(", "); + } + sql.append("?"); + } + sql.append(")"); + return sql.toString(); + } + + private JdbcConnection createMockConnection() throws SQLException { + return createMockConnection(mock(Connection.class)); + } + + private JdbcConnection createMockConnection(Connection spanner) throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + when(connection.getParser()).thenReturn(AbstractStatementParser.getInstance(dialect)); + when(connection.getSpannerConnection()).thenReturn(spanner); + when(connection.createBlob()).thenCallRealMethod(); + when(connection.createClob()).thenCallRealMethod(); + when(connection.createNClob()).thenCallRealMethod(); + when(connection.createArrayOf(anyString(), any(Object[].class))).thenCallRealMethod(); + + return connection; + } + + @Test + public void testValueAsParameter() throws SQLException { + String sql = generateSqlWithParameters(1); + JdbcConnection connection = createMockConnection(); + for (Value value : + new Value[] { + Value.bool(true), + Value.bool(false), + Value.bytes(ByteArray.copyFrom("foo")), + Value.date(com.google.cloud.Date.fromYearMonthDay(2021, 5, 17)), + Value.float64(6.626d), + Value.int64(13L), + Value.numeric(new BigDecimal("3.14")), + Value.string("bar"), + Value.json("{}"), + Value.pgJsonb("{}"), + Value.timestamp(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(999L, 99)), + Value.boolArray(Collections.singleton(true)), + Value.bytesArray(Collections.singleton(ByteArray.copyFrom("foo"))), + Value.dateArray( + Collections.singleton(com.google.cloud.Date.fromYearMonthDay(2021, 5, 17))), + Value.float64Array(Collections.singleton(6.626d)), + Value.int64Array(Collections.singleton(13L)), + Value.numericArray(Collections.singleton(new BigDecimal("3.14"))), + Value.stringArray(Collections.singleton("bar")), + Value.jsonArray(Collections.singleton("{}")), + Value.pgJsonbArray(Collections.singleton("{}")), + Value.timestampArray( + Collections.singleton(com.google.cloud.Timestamp.ofTimeSecondsAndNanos(999L, 99))), + }) { + + try (JdbcPreparedStatement ps = + new JdbcPreparedStatement(connection, sql, NO_GENERATED_KEY_COLUMNS)) { + ps.setObject(1, value); + Statement statement = ps.createStatement(); + assertEquals(statement.getParameters().get("p1"), value); + } + } + } + + @SuppressWarnings("deprecation") + @Test + public void testParameters() throws SQLException, MalformedURLException { + final int numberOfParams = 53; + String sql = generateSqlWithParameters(numberOfParams); + + Connection spannerConnection = createMockConnectionWithAnalyzeResults(numberOfParams); + JdbcConnection connection = createMockConnection(spannerConnection); + try (JdbcPreparedStatement ps = + new JdbcPreparedStatement(connection, sql, NO_GENERATED_KEY_COLUMNS)) { + ps.setArray(1, connection.createArrayOf("INT64", new Long[] {1L, 2L, 3L})); + ps.setAsciiStream(2, new ByteArrayInputStream("TEST".getBytes())); + ps.setAsciiStream(3, new ByteArrayInputStream("TEST".getBytes()), 4); + ps.setAsciiStream(4, new ByteArrayInputStream("TEST".getBytes()), 4L); + ps.setBinaryStream(6, new ByteArrayInputStream("TEST".getBytes())); + ps.setBinaryStream(7, new ByteArrayInputStream("TEST".getBytes()), 4); + ps.setBinaryStream(8, new ByteArrayInputStream("TEST".getBytes()), 4L); + ps.setBlob(9, connection.createBlob()); + ps.setBlob(10, new ByteArrayInputStream("TEST".getBytes())); + ps.setBlob(11, new ByteArrayInputStream("TEST".getBytes()), 4L); + ps.setBoolean(12, Boolean.TRUE); + ps.setByte(13, (byte) 1); + ps.setBytes(14, "TEST".getBytes()); + ps.setCharacterStream(15, new StringReader("TEST")); + ps.setCharacterStream(16, new StringReader("TEST"), 4); + ps.setCharacterStream(17, new StringReader("TEST"), 4L); + ps.setClob(18, connection.createClob()); + ps.setClob(19, new StringReader("TEST")); + ps.setClob(20, new StringReader("TEST"), 4L); + ps.setDate(21, new Date(1000L)); + ps.setDate(22, new Date(1000L), Calendar.getInstance(TimeZone.getTimeZone("GMT"))); + ps.setDouble(23, 1d); + ps.setFloat(24, 1f); + ps.setInt(25, 1); + ps.setLong(26, 1L); + ps.setNCharacterStream(27, new StringReader("TEST")); + ps.setNCharacterStream(28, new StringReader("TEST"), 4L); + ps.setNClob(29, connection.createNClob()); + ps.setNClob(30, new StringReader("TEST")); + ps.setNClob(31, new StringReader("TEST"), 4L); + ps.setNString(32, "TEST"); + ps.setNull(33, Types.BIGINT); + ps.setNull(34, Types.BIGINT, "INT64"); + ps.setObject(35, "TEST"); + ps.setObject(36, "TEST", Types.NVARCHAR); + ps.setObject(37, "TEST", Types.NVARCHAR, 20); + ps.setRef(38, null); + ps.setRowId(39, null); + ps.setShort(40, (short) 1); + ps.setSQLXML(41, null); + ps.setString(42, "TEST"); + ps.setTime(43, new Time(1000L)); + ps.setTime(44, new Time(1000L), Calendar.getInstance(TimeZone.getTimeZone("GMT"))); + ps.setTimestamp(45, new Timestamp(1000L)); + ps.setTimestamp(46, new Timestamp(1000L), Calendar.getInstance(TimeZone.getTimeZone("GMT"))); + ps.setUnicodeStream(47, new ByteArrayInputStream("TEST".getBytes()), 4); + ps.setURL(48, new URL("https://spanner.google.com")); + ps.setObject(49, UUID.fromString("83b988cf-1f4e-428a-be3d-cc712621942e")); + ps.setObject(50, "TEST", JDBCType.NVARCHAR); + ps.setObject(51, "TEST", JDBCType.NVARCHAR, 20); + ps.setObject(52, "{}", JsonType.VENDOR_TYPE_NUMBER); + ps.setObject(53, "{}", PgJsonbType.VENDOR_TYPE_NUMBER); + + JdbcParameterMetaData pmd = (JdbcParameterMetaData) ps.getParameterMetaData(); + assertEquals(numberOfParams, pmd.getParameterCount()); + assertEquals(JdbcArray.class.getName(), pmd.getParameterClassName(1)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(2)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(3)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(4)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(6)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(7)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(8)); + assertEquals(JdbcBlob.class.getName(), pmd.getParameterClassName(9)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(10)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(11)); + assertEquals(Boolean.class.getName(), pmd.getParameterClassName(12)); + assertEquals(Byte.class.getName(), pmd.getParameterClassName(13)); + assertEquals(byte[].class.getName(), pmd.getParameterClassName(14)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(15)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(16)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(17)); + assertEquals(JdbcClob.class.getName(), pmd.getParameterClassName(18)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(19)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(20)); + assertEquals(Date.class.getName(), pmd.getParameterClassName(21)); + assertEquals(Date.class.getName(), pmd.getParameterClassName(22)); + assertEquals(Double.class.getName(), pmd.getParameterClassName(23)); + assertEquals(Float.class.getName(), pmd.getParameterClassName(24)); + assertEquals(Integer.class.getName(), pmd.getParameterClassName(25)); + assertEquals(Long.class.getName(), pmd.getParameterClassName(26)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(27)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(28)); + assertEquals(JdbcClob.class.getName(), pmd.getParameterClassName(29)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(30)); + assertEquals(StringReader.class.getName(), pmd.getParameterClassName(31)); + assertEquals(String.class.getName(), pmd.getParameterClassName(32)); + assertEquals(Long.class.getName(), pmd.getParameterClassName(33)); + assertEquals(Long.class.getName(), pmd.getParameterClassName(34)); + assertEquals(String.class.getName(), pmd.getParameterClassName(35)); + assertEquals(String.class.getName(), pmd.getParameterClassName(36)); + assertEquals(String.class.getName(), pmd.getParameterClassName(37)); + + // These parameter values are not set, so the driver returns the type that was returned by + // Cloud Spanner. + assertEquals(String.class.getName(), pmd.getParameterClassName(38)); + assertEquals(String.class.getName(), pmd.getParameterClassName(39)); + + assertEquals(Short.class.getName(), pmd.getParameterClassName(40)); + assertEquals(String.class.getName(), pmd.getParameterClassName(41)); + assertEquals(String.class.getName(), pmd.getParameterClassName(42)); + assertEquals(Time.class.getName(), pmd.getParameterClassName(43)); + assertEquals(Time.class.getName(), pmd.getParameterClassName(44)); + assertEquals(Timestamp.class.getName(), pmd.getParameterClassName(45)); + assertEquals(Timestamp.class.getName(), pmd.getParameterClassName(46)); + assertEquals(ByteArrayInputStream.class.getName(), pmd.getParameterClassName(47)); + assertEquals(URL.class.getName(), pmd.getParameterClassName(48)); + assertEquals(UUID.class.getName(), pmd.getParameterClassName(49)); + assertEquals(String.class.getName(), pmd.getParameterClassName(50)); + assertEquals(String.class.getName(), pmd.getParameterClassName(51)); + assertEquals(String.class.getName(), pmd.getParameterClassName(51)); + assertEquals(String.class.getName(), pmd.getParameterClassName(51)); + + ps.clearParameters(); + pmd = (JdbcParameterMetaData) ps.getParameterMetaData(); + assertEquals(numberOfParams, pmd.getParameterCount()); + } + } + + @Test + public void testSetNullValues() throws SQLException { + final int numberOfParameters = 31; + String sql = generateSqlWithParameters(numberOfParameters); + + JdbcConnection connection = + createMockConnection(createMockConnectionWithAnalyzeResults(numberOfParameters)); + try (JdbcPreparedStatement ps = + new JdbcPreparedStatement(connection, sql, NO_GENERATED_KEY_COLUMNS)) { + int index = 0; + ps.setNull(++index, Types.BLOB); + ps.setNull(++index, Types.NVARCHAR); + ps.setNull(++index, Types.BINARY); + ps.setNull(++index, Types.BOOLEAN); + ps.setNull(++index, Types.TINYINT); + ps.setNull(++index, Types.DATE); + ps.setNull(++index, Types.DOUBLE); + ps.setNull(++index, Types.FLOAT); + ps.setNull(++index, Types.INTEGER); + ps.setNull(++index, Types.BIGINT); + ps.setNull(++index, Types.SMALLINT); + ps.setNull(++index, Types.TIME); + ps.setNull(++index, Types.TIME_WITH_TIMEZONE); + ps.setNull(++index, Types.TIMESTAMP); + ps.setNull(++index, Types.TIMESTAMP_WITH_TIMEZONE); + ps.setNull(++index, Types.CHAR); + ps.setNull(++index, Types.CLOB); + ps.setNull(++index, Types.LONGNVARCHAR); + ps.setNull(++index, Types.LONGVARBINARY); + ps.setNull(++index, Types.LONGVARCHAR); + ps.setNull(++index, Types.NCHAR); + ps.setNull(++index, Types.NCLOB); + ps.setNull(++index, Types.NVARCHAR); + ps.setNull(++index, Types.REAL); + ps.setNull(++index, Types.BIT); + ps.setNull(++index, Types.VARBINARY); + ps.setNull(++index, Types.VARCHAR); + ps.setNull(++index, JsonType.VENDOR_TYPE_NUMBER); + ps.setNull(++index, PgJsonbType.VENDOR_TYPE_NUMBER); + ps.setNull(++index, Types.OTHER); + ps.setNull(++index, Types.NULL); + assertEquals(numberOfParameters, index); + + JdbcParameterMetaData pmd = (JdbcParameterMetaData) ps.getParameterMetaData(); + assertEquals(numberOfParameters, pmd.getParameterCount()); + assertEquals(Timestamp.class.getName(), pmd.getParameterClassName(15)); + + ps.clearParameters(); + pmd = (JdbcParameterMetaData) ps.getParameterMetaData(); + assertEquals(numberOfParameters, pmd.getParameterCount()); + } + } + + @Test + public void testGetResultSetMetadata() throws SQLException { + final String sql = "SELECT * FROM FOO"; + Connection connection = mock(Connection.class); + ResultSet rs = + ResultSets.forRows( + Type.struct( + StructField.of("ID", Type.int64()), + StructField.of("NAME", Type.string()), + StructField.of("AMOUNT", Type.float64()), + dialect == Dialect.POSTGRESQL + ? StructField.of("PERCENTAGE", Type.pgNumeric()) + : StructField.of("PERCENTAGE", Type.numeric())), + Collections.singletonList( + Struct.newBuilder() + .set("ID") + .to(1L) + .set("NAME") + .to("foo") + .set("AMOUNT") + .to(Math.PI) + .set("PERCENTAGE") + .to( + dialect == Dialect.POSTGRESQL + ? Value.pgNumeric("1.23") + : Value.numeric(new BigDecimal("1.23"))) + .build())); + when(connection.analyzeQuery(Statement.of(sql), QueryAnalyzeMode.PLAN)).thenReturn(rs); + try (JdbcPreparedStatement ps = + new JdbcPreparedStatement( + createMockConnection(connection), sql, NO_GENERATED_KEY_COLUMNS)) { + ResultSetMetaData metadata = ps.getMetaData(); + assertEquals(4, metadata.getColumnCount()); + assertEquals("ID", metadata.getColumnLabel(1)); + assertEquals("NAME", metadata.getColumnLabel(2)); + assertEquals("AMOUNT", metadata.getColumnLabel(3)); + assertEquals("PERCENTAGE", metadata.getColumnLabel(4)); + assertEquals(Types.BIGINT, metadata.getColumnType(1)); + assertEquals(Types.NVARCHAR, metadata.getColumnType(2)); + assertEquals(Types.DOUBLE, metadata.getColumnType(3)); + assertEquals(Types.NUMERIC, metadata.getColumnType(4)); + } + } + + @Test + public void testGetResultSetMetaDataForDml() throws SQLException { + Connection connection = mock(Connection.class); + try (JdbcPreparedStatement ps = + new JdbcPreparedStatement( + createMockConnection(connection), + "UPDATE FOO SET BAR=1 WHERE TRUE", + NO_GENERATED_KEY_COLUMNS)) { + ResultSetMetaData metadata = ps.getMetaData(); + assertEquals(0, metadata.getColumnCount()); + } + } + + @Test + public void testInvalidSql() { + String sql = "SELECT * FROM Singers WHERE SingerId='"; + SQLException sqlException = + assertThrows( + SQLException.class, + () -> + new JdbcPreparedStatement( + createMockConnection(mock(Connection.class)), sql, NO_GENERATED_KEY_COLUMNS)); + assertTrue(sqlException instanceof JdbcSqlException); + JdbcSqlException jdbcSqlException = (JdbcSqlException) sqlException; + assertEquals( + ErrorCode.INVALID_ARGUMENT.getGrpcStatusCode().value(), jdbcSqlException.getErrorCode()); + } + + private Connection createMockConnectionWithAnalyzeResults(int numParams) { + Connection spannerConnection = mock(Connection.class); + ResultSet resultSet = mock(ResultSet.class); + when(spannerConnection.analyzeUpdateStatement(any(Statement.class), eq(QueryAnalyzeMode.PLAN))) + .thenReturn(resultSet); + when(spannerConnection.analyzeQuery(any(Statement.class), eq(QueryAnalyzeMode.PLAN))) + .thenReturn(resultSet); + ResultSetMetadata metadata = + ResultSetMetadata.newBuilder() + .setUndeclaredParameters( + StructType.newBuilder() + .addAllFields( + IntStream.range(0, numParams) + .mapToObj( + i -> + Field.newBuilder() + .setName("p" + (i + 1)) + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRING) + .build()) + .build()) + .collect(Collectors.toList())) + .build()) + .build(); + when(resultSet.getMetadata()).thenReturn(metadata); + + return spannerConnection; + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatementWithMockedServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatementWithMockedServerTest.java new file mode 100644 index 000000000000..a3072e31082e --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcPreparedStatementWithMockedServerTest.java @@ -0,0 +1,439 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlBatchUpdateException; +import com.google.spanner.v1.ResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.Status; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Arrays; +import java.util.Collection; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class JdbcPreparedStatementWithMockedServerTest { + private static MockSpannerServiceImpl mockSpanner; + private static Server server; + + @Parameter public boolean executeLarge; + + @Parameters(name = "executeLarge = {0}") + public static Collection data() { + return Arrays.asList(new Object[][] {{false}, {true}}); + } + + @BeforeClass + public static void startStaticServer() throws IOException { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); + InetSocketAddress address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build().start(); + } + + @AfterClass + public static void stopServer() throws Exception { + server.shutdown(); + server.awaitTermination(); + } + + @After + public void reset() { + SpannerPool.closeSpannerPool(); + mockSpanner.removeAllExecutionTimes(); + mockSpanner.reset(); + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true", + server.getPort(), "proj", "inst", "db"); + } + + private Connection createConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void testExecuteBatch() throws SQLException { + Statement.Builder insertBuilder = + Statement.newBuilder("INSERT INTO Test (Col1, Col2) VALUES (@p1, @p2)"); + mockSpanner.putStatementResult( + StatementResult.update( + insertBuilder.bind("p1").to(1L).bind("p2").to("test 1").build(), 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + insertBuilder.bind("p1").to(2L).bind("p2").to("test 2").build(), 1L)); + try (Connection connection = createConnection()) { + try (PreparedStatement statement = + connection.prepareStatement("INSERT INTO Test (Col1, Col2) VALUES (?, ?)")) { + statement.setLong(1, 1L); + statement.setString(2, "test 1"); + statement.addBatch(); + statement.setLong(1, 2L); + statement.setString(2, "test 2"); + statement.addBatch(); + if (executeLarge) { + assertThat(statement.executeLargeBatch()).asList().containsExactly(1L, 1L); + } else { + assertThat(statement.executeBatch()).asList().containsExactly(1, 1); + } + } + } + } + + @Test + public void testExecuteBatch_withOverflow() throws SQLException { + Statement.Builder insertBuilder = + Statement.newBuilder("INSERT INTO Test (Col1, Col2) VALUES (@p1, @p2)"); + mockSpanner.putStatementResult( + StatementResult.update( + insertBuilder.bind("p1").to(1L).bind("p2").to("test 1").build(), 1L)); + mockSpanner.putStatementResult( + StatementResult.update( + insertBuilder.bind("p1").to(2L).bind("p2").to("test 2").build(), + Integer.MAX_VALUE + 1L)); + try (Connection connection = createConnection()) { + try (PreparedStatement statement = + connection.prepareStatement("INSERT INTO Test (Col1, Col2) VALUES (?, ?)")) { + statement.setLong(1, 1L); + statement.setString(2, "test 1"); + statement.addBatch(); + statement.setLong(1, 2L); + statement.setString(2, "test 2"); + statement.addBatch(); + if (executeLarge) { + assertThat(statement.executeLargeBatch()) + .asList() + .containsExactly(1L, Integer.MAX_VALUE + 1L); + } else { + try { + statement.executeBatch(); + fail("missing expected OutOfRange exception"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + JdbcSqlException sqlException = (JdbcSqlException) e; + assertEquals( + ErrorCode.OUT_OF_RANGE.getGrpcStatusCode().value(), sqlException.getErrorCode()); + } + } + } + } + } + + @Test + public void testExecuteBatch_withException() throws SQLException { + Statement.Builder insertBuilder = + Statement.newBuilder("INSERT INTO Test (Col1, Col2) VALUES (@p1, @p2)"); + mockSpanner.putStatementResult( + StatementResult.update( + insertBuilder.bind("p1").to(1L).bind("p2").to("test 1").build(), 1L)); + mockSpanner.putStatementResult( + StatementResult.exception( + insertBuilder.bind("p1").to(2L).bind("p2").to("test 2").build(), + Status.ALREADY_EXISTS.asRuntimeException())); + try (Connection connection = createConnection()) { + try (PreparedStatement statement = + connection.prepareStatement("INSERT INTO Test (Col1, Col2) VALUES (?, ?)")) { + statement.setLong(1, 1L); + statement.setString(2, "test 1"); + statement.addBatch(); + statement.setLong(1, 2L); + statement.setString(2, "test 2"); + statement.addBatch(); + try { + if (executeLarge) { + statement.executeLargeBatch(); + } else { + statement.executeBatch(); + } + } catch (JdbcSqlBatchUpdateException e) { + if (executeLarge) { + assertThat(e.getLargeUpdateCounts()).asList().containsExactly(1L); + } else { + assertThat(e.getUpdateCounts()).asList().containsExactly(1); + } + } + } + } + } + + @Test + public void testInsertUntypedNullValues() throws SQLException { + String sql = + "insert into all_nullable_types (ColInt64, ColFloat64, ColBool, ColString, ColBytes, ColDate, ColTimestamp, ColNumeric, ColJson, ColInt64Array, ColFloat64Array, ColBoolArray, ColStringArray, ColBytesArray, ColDateArray, ColTimestampArray, ColNumericArray, ColJsonArray) " + + "values (@p1, @p2, @p3, @p4, @p5, @p6, @p7, @p8, @p9, @p10, @p11, @p12, @p13, @p14, @p15, @p16, @p17, @p18)"; + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of(sql), + ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setUndeclaredParameters( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName("p1") + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p2") + .setType( + Type.newBuilder().setCode(TypeCode.FLOAT64).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p3") + .setType(Type.newBuilder().setCode(TypeCode.BOOL).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p4") + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p5") + .setType(Type.newBuilder().setCode(TypeCode.BYTES).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p6") + .setType(Type.newBuilder().setCode(TypeCode.DATE).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p7") + .setType( + Type.newBuilder().setCode(TypeCode.TIMESTAMP).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p8") + .setType( + Type.newBuilder().setCode(TypeCode.NUMERIC).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p9") + .setType(Type.newBuilder().setCode(TypeCode.JSON).build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p10") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.INT64) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p11") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.FLOAT64) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p12") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.BOOL) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p13") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.STRING) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p14") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.BYTES) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p15") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.DATE) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p16") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.TIMESTAMP) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p17") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .build()) + .build()) + .build()) + .addFields( + Field.newBuilder() + .setName("p18") + .setType( + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.JSON) + .build()) + .build()) + .build()) + .build()) + .build()) + .setStats(ResultSetStats.newBuilder().build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder(sql) + .bind("p1") + .to((Value) null) + .bind("p2") + .to((Value) null) + .bind("p3") + .to((Value) null) + .bind("p4") + .to((Value) null) + .bind("p5") + .to((Value) null) + .bind("p6") + .to((Value) null) + .bind("p7") + .to((Value) null) + .bind("p8") + .to((Value) null) + .bind("p9") + .to((Value) null) + .bind("p10") + .to((Value) null) + .bind("p11") + .to((Value) null) + .bind("p12") + .to((Value) null) + .bind("p13") + .to((Value) null) + .bind("p14") + .to((Value) null) + .bind("p15") + .to((Value) null) + .bind("p16") + .to((Value) null) + .bind("p17") + .to((Value) null) + .bind("p18") + .to((Value) null) + .build(), + 1L)); + try (Connection connection = createConnection()) { + for (int type : new int[] {Types.OTHER, Types.NULL}) { + try (PreparedStatement statement = + connection.prepareStatement( + "insert into all_nullable_types (" + + "ColInt64, ColFloat64, ColBool, ColString, ColBytes, ColDate, ColTimestamp, ColNumeric, ColJson, " + + "ColInt64Array, ColFloat64Array, ColBoolArray, ColStringArray, ColBytesArray, ColDateArray, ColTimestampArray, ColNumericArray, ColJsonArray) " + + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")) { + for (int param = 1; + param <= statement.getParameterMetaData().getParameterCount(); + param++) { + statement.setNull(param, type); + } + assertEquals(1, statement.executeUpdate()); + } + mockSpanner.clearRequests(); + } + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcQueryOptionsTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcQueryOptionsTest.java new file mode 100644 index 000000000000..837235698bfa --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcQueryOptionsTest.java @@ -0,0 +1,326 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.SpannerEnvironment; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.ConnectionOptions; +import com.google.common.base.MoreObjects; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.sql.DriverManager; +import java.sql.SQLException; +import javax.annotation.Nonnull; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcQueryOptionsTest extends AbstractMockServerTest { + + @Test + public void testDefaultOptions() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_VERSION")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_VERSION")).isEqualTo(""); + assertThat(rs.next()).isFalse(); + } + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_STATISTICS_PACKAGE")).isEqualTo(""); + assertThat(rs.next()).isFalse(); + } + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RPC_PRIORITY")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("RPC_PRIORITY")).isEqualTo("PRIORITY_UNSPECIFIED"); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void testOptionsInConnectionUrl() throws SQLException { + try (java.sql.Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:%s;optimizerVersion=%s;optimizerStatisticsPackage=%s;rpcPriority=%s", + getBaseUrl(), "100", "url_package", "LOW"))) { + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_VERSION")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_VERSION")).isEqualTo("100"); + assertThat(rs.next()).isFalse(); + } + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_STATISTICS_PACKAGE")).isEqualTo("url_package"); + assertThat(rs.next()).isFalse(); + } + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RPC_PRIORITY")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("RPC_PRIORITY")).isEqualTo("LOW"); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void testSetOptions() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + connection.createStatement().execute("SET OPTIMIZER_VERSION='20'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_VERSION")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_VERSION")).isEqualTo("20"); + assertThat(rs.next()).isFalse(); + } + connection.createStatement().execute("SET OPTIMIZER_VERSION='latest'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_VERSION")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_VERSION")).isEqualTo("latest"); + assertThat(rs.next()).isFalse(); + } + connection.createStatement().execute("SET OPTIMIZER_VERSION=''"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_VERSION")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_VERSION")).isEqualTo(""); + assertThat(rs.next()).isFalse(); + } + + connection.createStatement().execute("SET OPTIMIZER_STATISTICS_PACKAGE='20210609'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_STATISTICS_PACKAGE")).isEqualTo("20210609"); + assertThat(rs.next()).isFalse(); + } + connection.createStatement().execute("SET OPTIMIZER_STATISTICS_PACKAGE='latest'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_STATISTICS_PACKAGE")).isEqualTo("latest"); + assertThat(rs.next()).isFalse(); + } + connection.createStatement().execute("SET OPTIMIZER_STATISTICS_PACKAGE=''"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE OPTIMIZER_STATISTICS_PACKAGE")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("OPTIMIZER_STATISTICS_PACKAGE")).isEqualTo(""); + assertThat(rs.next()).isFalse(); + } + + connection.createStatement().execute("SET RPC_PRIORITY='LOW'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RPC_PRIORITY")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("RPC_PRIORITY")).isEqualTo("LOW"); + assertThat(rs.next()).isFalse(); + } + connection.createStatement().execute("SET RPC_PRIORITY='MEDIUM'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RPC_PRIORITY")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("RPC_PRIORITY")).isEqualTo("MEDIUM"); + assertThat(rs.next()).isFalse(); + } + connection.createStatement().execute("SET RPC_PRIORITY='NULL'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RPC_PRIORITY")) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getString("RPC_PRIORITY")).isEqualTo("UNSPECIFIED"); + assertThat(rs.next()).isFalse(); + } + } + } + + @Test + public void testSetAndUseOptions() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + connection.createStatement().execute("SET OPTIMIZER_VERSION='20'"); + connection.createStatement().execute("SET OPTIMIZER_STATISTICS_PACKAGE='20210609'"); + connection.createStatement().execute("SET RPC_PRIORITY='LOW'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery(SELECT_COUNT_STATEMENT.getSql())) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + // Verify that the last ExecuteSqlRequest that the server received used the options that + // were set. + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("20"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()).isEqualTo("20210609"); + assertThat(request.getRequestOptions().getPriority().toString()).isEqualTo("PRIORITY_LOW"); + } + + connection.createStatement().execute("SET OPTIMIZER_VERSION='latest'"); + connection.createStatement().execute("SET OPTIMIZER_STATISTICS_PACKAGE='latest'"); + connection.createStatement().execute("SET RPC_PRIORITY='MEDIUM'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery(SELECT_COUNT_STATEMENT.getSql())) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("latest"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()).isEqualTo("latest"); + assertThat(request.getRequestOptions().getPriority().toString()) + .isEqualTo("PRIORITY_MEDIUM"); + } + + // Set the options to ''. This will do a fallback to the default, meaning that it will be read + // from the environment variables as we have nothing set on the connection URL. + connection.createStatement().execute("SET OPTIMIZER_VERSION=''"); + connection.createStatement().execute("SET OPTIMIZER_STATISTICS_PACKAGE=''"); + connection.createStatement().execute("SET RPC_PRIORITY='NULL'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery(SELECT_COUNT_STATEMENT.getSql())) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + // Verify that the last ExecuteSqlRequest that the server received specified an optimizer + // version equal to the environment default. + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + assertThat(request.getQueryOptions().getOptimizerVersion()) + .isEqualTo(MoreObjects.firstNonNull(System.getenv("SPANNER_OPTIMIZER_VERSION"), "")); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo(MoreObjects.firstNonNull(System.getenv("OPTIMIZER_STATISTICS_PACKAGE"), "")); + assertThat(request.getRequestOptions().getPriority().toString()) + .isEqualTo("PRIORITY_UNSPECIFIED"); + } + } + } + + @Test + public void testUseOptionsFromConnectionUrl() throws SQLException { + try (java.sql.Connection connection = + DriverManager.getConnection( + String.format( + "jdbc:%s;optimizerVersion=10;optimizerStatisticsPackage=20210609_10_00_00;rpcPriority=LOW", + getBaseUrl()))) { + // Do a query and verify that the version from the connection URL is used. + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery(SELECT_COUNT_STATEMENT.getSql())) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + // The options should come from the connection URL. + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("10"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("20210609_10_00_00"); + assertThat(request.getRequestOptions().getPriority().toString()).isEqualTo("PRIORITY_LOW"); + } + } + } + + @Test + public void testUseOptionsFromEnvironment() throws SQLException { + // Make sure that all existing connections are closed, so these do not affect this test. + ConnectionOptions.closeSpanner(); + try { + SpannerOptions.useEnvironment( + new SpannerEnvironment() { + @Nonnull + @Override + public String getOptimizerVersion() { + return "20"; + } + + @Nonnull + @Override + public String getOptimizerStatisticsPackage() { + return "env_package"; + } + }); + try (java.sql.Connection connection = + DriverManager.getConnection(String.format("jdbc:%s", getBaseUrl()))) { + // Do a query and verify that the version from the environment is used. + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery(SELECT_COUNT_STATEMENT.getSql())) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + // Verify query options from the environment. + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("20"); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("env_package"); + } + // Now set one of the query options on the connection. That option should be used in + // combination with the other option from the environment. + connection.createStatement().execute("SET OPTIMIZER_VERSION='30'"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery(SELECT_COUNT_STATEMENT.getSql())) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + // Optimizer version should come from the connection. + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo("30"); + // Optimizer statistics package should come from the environment. + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()) + .isEqualTo("env_package"); + } + } + } finally { + SpannerOptions.useDefaultEnvironment(); + } + } + + @Test + public void testUseQueryHint() throws SQLException { + mockSpanner.putStatementResult( + StatementResult.query( + Statement.of( + String.format( + "@{optimizer_version=1, optimizer_statistics_package=hint_package} %s", + SELECT_COUNT_STATEMENT.getSql())), + SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + try (java.sql.Connection connection = + DriverManager.getConnection(String.format("jdbc:%s", getBaseUrl()))) { + try (java.sql.ResultSet rs = + connection + .createStatement() + .executeQuery( + String.format( + "@{optimizer_version=1, optimizer_statistics_package=hint_package} %s", + SELECT_COUNT_STATEMENT.getSql()))) { + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(COUNT_BEFORE_INSERT); + assertThat(rs.next()).isFalse(); + // The options used in the ExecuteSqlRequest should be empty as the query hint is parsed by + // the backend. + ExecuteSqlRequest request = getLastExecuteSqlRequest(); + assertThat(request.getQueryOptions().getOptimizerVersion()).isEqualTo(""); + assertThat(request.getQueryOptions().getOptimizerStatisticsPackage()).isEqualTo(""); + } + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcResultSetMetaDataTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcResultSetMetaDataTest.java new file mode 100644 index 000000000000..18f5de94f113 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcResultSetMetaDataTest.java @@ -0,0 +1,685 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.common.base.Preconditions; +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import org.mockito.internal.stubbing.answers.Returns; + +@RunWith(JUnit4.class) +public class JdbcResultSetMetaDataTest { + private JdbcResultSetMetaData subject; + + private static class TestColumn { + private final Type type; + private final String name; + private final int defaultSize; + private final boolean calculated; + + private TestColumn(Type type, String name, Integer nulls, boolean calculated) { + Preconditions.checkNotNull(type); + Preconditions.checkNotNull(name); + Preconditions.checkNotNull(nulls); + this.type = type; + this.name = name; + this.defaultSize = getDefaultSize(type); + this.calculated = calculated; + } + + private static int getDefaultSize(Type type) { + Preconditions.checkNotNull(type); + switch (type.getCode()) { + case BOOL: + return 1; + case DATE: + return 10; + case FLOAT32: + return 7; + case FLOAT64: + return 14; + case INT64: + return 10; + case TIMESTAMP: + return 24; + case STRING: + case BYTES: + case JSON: + case PG_JSONB: + return 50; + case NUMERIC: + case PG_NUMERIC: + return 50; + case ARRAY: + case STRUCT: + default: + return 50; + } + } + + private boolean isTableColumn() { + return !calculated; + } + + private static class Builder { + private Type type; + private String name; + private Integer nulls; + private boolean calculated = false; + + public static Builder getBuilder() { + return new Builder(); + } + + private TestColumn build() { + return new TestColumn(type, name, nulls, calculated); + } + + private Builder withType(Type type) { + this.type = type; + return this; + } + + private Builder withName(String name) { + this.name = name; + return this; + } + + private Builder withNotNull() { + this.nulls = ResultSetMetaData.columnNoNulls; + return this; + } + + private Builder withNullable() { + this.nulls = ResultSetMetaData.columnNullable; + return this; + } + + private Builder withNullableUnknown() { + this.nulls = ResultSetMetaData.columnNullableUnknown; + return this; + } + + private Builder withCalculated() { + this.calculated = true; + return this; + } + } + } + + private static final List TEST_COLUMNS = createTestColumns(); + + @Before + public void setup() throws SQLException { + java.sql.Connection connection = mock(java.sql.Connection.class); + Statement statement = mock(Statement.class); + JdbcResultSet resultSet = getFooTestResultSet(statement); + when(connection.getSchema()).thenReturn(""); + when(connection.getCatalog()).thenReturn("test-database"); + when(statement.getConnection()).then(new Returns(connection)); + + subject = resultSet.getMetaData(); + } + + private static List createTestColumns() { + List res = new ArrayList<>(); + int index = 1; + for (Type type : getAllTypes()) { + TestColumn.Builder builder = TestColumn.Builder.getBuilder(); + builder.withName("COL" + index).withType(type); + if (index % 2 == 1) { + builder.withNotNull(); + } else { + builder.withNullable(); + } + res.add(builder.build()); + index++; + } + TestColumn.Builder builder = TestColumn.Builder.getBuilder(); + TestColumn column = + builder + .withName("CALCULATED") + .withType(Type.int64()) + .withNullableUnknown() + .withCalculated() + .build(); + res.add(column); + return res; + } + + private static List getAllTypes() { + List types = new ArrayList<>(); + types.add(Type.bool()); + types.add(Type.bytes()); + types.add(Type.date()); + types.add(Type.float32()); + types.add(Type.float64()); + types.add(Type.int64()); + types.add(Type.string()); + types.add(Type.json()); + types.add(Type.pgJsonb()); + types.add(Type.timestamp()); + types.add(Type.numeric()); + types.add(Type.pgNumeric()); + List arrayTypes = new ArrayList<>(); + for (Type type : types) { + arrayTypes.add(Type.array(type)); + } + types.addAll(arrayTypes); + + return types; + } + + private JdbcResultSet getFooTestResultSet(Statement statement) { + List rows = new ArrayList<>(4); + for (int row = 1; row <= 4; row++) { + Struct.Builder builder = Struct.newBuilder(); + for (TestColumn col : TEST_COLUMNS) { + builder.set(col.name).to(getDefaultValue(col.type, row)); + } + rows.add(builder.build()); + } + StructField[] fields = new StructField[TEST_COLUMNS.size()]; + int index = 0; + for (TestColumn col : TEST_COLUMNS) { + fields[index] = StructField.of(col.name, col.type); + index++; + } + + ResultSet rs = ResultSets.forRows(Type.struct(fields), rows); + return JdbcResultSet.of(statement, rs); + } + + private Value getDefaultValue(Type type, int row) { + Preconditions.checkNotNull(type); + switch (type.getCode()) { + case BOOL: + return Value.bool(Boolean.TRUE); + case INT64: + return Value.int64(12345L); + case NUMERIC: + return Value.numeric(new BigDecimal("3.14")); + case PG_NUMERIC: + return Value.pgNumeric("3.14"); + case FLOAT32: + return Value.float32(6.626f); + case FLOAT64: + return Value.float64(123.45D); + case STRING: + return Value.string("test value " + row); + case JSON: + return Value.json("{\"test_value\": " + row + "}"); + case PG_JSONB: + return Value.pgJsonb("{\"test_value\": " + row + "}"); + case BYTES: + return Value.bytes(ByteArray.copyFrom("test byte array " + row)); + case TIMESTAMP: + return Value.timestamp(com.google.cloud.Timestamp.now()); + case DATE: + return Value.date(com.google.cloud.Date.fromYearMonthDay(2018, 4, 1)); + case ARRAY: + switch (type.getArrayElementType().getCode()) { + case BOOL: + return Value.boolArray(Arrays.asList(Boolean.TRUE, Boolean.FALSE)); + case INT64: + return Value.int64Array(Arrays.asList(12345L, 54321L)); + case NUMERIC: + return Value.numericArray(Arrays.asList(BigDecimal.ONE, BigDecimal.TEN)); + case PG_NUMERIC: + return Value.pgNumericArray(Arrays.asList("3.14", null, "NaN", "6.626")); + case FLOAT32: + return Value.float32Array(Arrays.asList(-3498.31490f, 82.353f)); + case FLOAT64: + return Value.float64Array(Arrays.asList(123.45D, 543.21D)); + case STRING: + return Value.stringArray(Arrays.asList("test value " + row, "test value " + row)); + case JSON: + return Value.jsonArray( + Arrays.asList("{\"test_value\": " + row + "}", "{\"test_value\": " + row + "}")); + case PG_JSONB: + return Value.pgJsonbArray( + Arrays.asList("{\"test_value\": " + row + "}", "{\"test_value\": " + row + "}")); + case BYTES: + return Value.bytesArray( + Arrays.asList( + ByteArray.copyFrom("test byte array " + row), + ByteArray.copyFrom("test byte array " + row))); + case TIMESTAMP: + return Value.timestampArray( + Arrays.asList(com.google.cloud.Timestamp.now(), com.google.cloud.Timestamp.now())); + case DATE: + return Value.dateArray( + Arrays.asList( + com.google.cloud.Date.fromYearMonthDay(2018, 4, 1), + com.google.cloud.Date.fromYearMonthDay(2018, 4, 2))); + case ARRAY: + case STRUCT: + } + case STRUCT: + default: + return null; + } + } + + @Test + public void testGetColumnCount() { + assertEquals(TEST_COLUMNS.size(), subject.getColumnCount()); + } + + @Test + public void testIsAutoIncrement() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertFalse(subject.isAutoIncrement(i)); + } + } + + @Test + public void testIsCaseSensitive() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + Type type = TEST_COLUMNS.get(i - 1).type; + assertEquals( + type == Type.string() + || type == Type.bytes() + || type == Type.json() + || type == Type.pgJsonb(), + subject.isCaseSensitive(i)); + } + } + + @Test + public void testIsSearchable() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertTrue(subject.isSearchable(i)); + } + } + + @Test + public void testIsCurrency() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertFalse(subject.isCurrency(i)); + } + } + + @Test + public void testIsNullable() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals(ResultSetMetaData.columnNullableUnknown, subject.isNullable(i)); + } + } + + @Test + public void testIsSigned() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + Type type = TEST_COLUMNS.get(i - 1).type; + if (type == Type.int64() + || type == Type.float64() + || type == Type.numeric() + || type == Type.pgNumeric()) { + assertTrue(subject.isSigned(i)); + } else { + assertFalse(subject.isSigned(i)); + } + } + } + + @Test + public void testGetColumnDisplaySize() throws SQLException { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals( + "Wrong column display size for " + TEST_COLUMNS.get(i - 1).type, + getDefaultDisplaySize(TEST_COLUMNS.get(i - 1).type, i), + subject.getColumnDisplaySize(i)); + } + } + + private int getDefaultDisplaySize(Type type, int column) throws SQLException { + Preconditions.checkNotNull(type); + switch (type.getCode()) { + case BOOL: + return 5; + case INT64: + return 10; + case FLOAT32: + return 7; + case NUMERIC: + case PG_NUMERIC: + return 14; + case FLOAT64: + return 14; + case STRING: + int length = subject.getPrecision(column); + return length == 0 ? 50 : length; + case JSON: + case PG_JSONB: + return 50; + case BYTES: + return 50; + case TIMESTAMP: + return 16; + case DATE: + return 10; + case ARRAY: + return 50; + case STRUCT: + default: + return 10; + } + } + + @Test + public void testGetColumnLabel() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals(TEST_COLUMNS.get(i - 1).name, subject.getColumnLabel(i)); + } + } + + @Test + public void testGetColumnName() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals(TEST_COLUMNS.get(i - 1).name, subject.getColumnName(i)); + } + } + + @Test + public void testGetSchemaName() throws SQLException { + assertEquals("", subject.getSchemaName(1)); + } + + @Test + public void testGetPrecision() throws SQLException { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals( + "Wrong precision for type " + TEST_COLUMNS.get(i - 1).type, + getPrecision(TEST_COLUMNS.get(i - 1)), + subject.getPrecision(i)); + } + } + + private int getPrecision(TestColumn col) { + Preconditions.checkNotNull(col); + switch (col.type.getCode()) { + case BOOL: + return 1; + case DATE: + return 10; + case FLOAT32: + return 7; + case FLOAT64: + return 14; + case INT64: + return 10; + case TIMESTAMP: + return 24; + case NUMERIC: + case PG_NUMERIC: + return 14; + case STRING: + case JSON: + case PG_JSONB: + case BYTES: + case ARRAY: + case STRUCT: + default: + return col.isTableColumn() ? col.defaultSize : 50; + } + } + + @Test + public void testGetScale() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals(getScale(TEST_COLUMNS.get(i - 1)), subject.getScale(i)); + } + } + + private int getScale(TestColumn col) { + if (col.type == Type.float32()) { + return 7; + } + if (col.type == Type.float64() || col.type == Type.numeric() || col.type == Type.pgNumeric()) { + return 15; + } + return 0; + } + + @Test + public void testGetTableName() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals("", subject.getTableName(i)); + } + } + + @Test + public void testGetCatalogName() throws SQLException { + assertEquals("test-database", subject.getCatalogName(1)); + } + + @Test + public void testGetColumnType() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals(getSqlType(TEST_COLUMNS.get(i - 1).type), subject.getColumnType(i)); + } + } + + private int getSqlType(Type type) { + Preconditions.checkNotNull(type); + switch (type.getCode()) { + case BOOL: + return Types.BOOLEAN; + case INT64: + return Types.BIGINT; + case NUMERIC: + case PG_NUMERIC: + return Types.NUMERIC; + case FLOAT32: + return Types.REAL; + case FLOAT64: + return Types.DOUBLE; + case STRING: + case JSON: + case PG_JSONB: + return Types.NVARCHAR; + case BYTES: + return Types.BINARY; + case TIMESTAMP: + return Types.TIMESTAMP; + case DATE: + return Types.DATE; + case ARRAY: + return Types.ARRAY; + case STRUCT: + default: + return Types.OTHER; + } + } + + @Test + public void getColumnTypeName() { + int index = 1; + for (TestColumn col : TEST_COLUMNS) { + assertEquals(col.type.getCode().name(), subject.getColumnTypeName(index)); + index++; + } + } + + @Test + public void getColumnTypeNameForPostgreSQL() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + JdbcStatement statement = mock(JdbcStatement.class); + JdbcResultSet resultSet = getFooTestResultSet(statement); + when(connection.getSchema()).thenReturn(""); + when(connection.getCatalog()).thenReturn("test-database"); + when(statement.getConnection()).then(new Returns(connection)); + when(connection.getDialect()).thenReturn(Dialect.POSTGRESQL); + + JdbcResultSetMetaData sub = resultSet.getMetaData(); + + int index = 1; + for (TestColumn col : TEST_COLUMNS) { + if (col.type.getCode() == Type.Code.ARRAY + && col.type.getSpannerTypeName(Dialect.POSTGRESQL).contains("bool")) { + assertEquals("_boolean", sub.getColumnTypeName(index)); + } else if (col.type.getCode() == Type.Code.BOOL) { + assertEquals("boolean", sub.getColumnTypeName(index)); + } + index++; + } + } + + @Test + public void testIsReadOnly() { + for (int i = 0; i < TEST_COLUMNS.size(); i++) { + assertFalse(subject.isReadOnly(i)); + } + } + + @Test + public void testIsWritable() { + for (int i = 0; i < TEST_COLUMNS.size(); i++) { + assertTrue(subject.isWritable(i)); + } + } + + @Test + public void testIsDefinitelyWritable() { + for (int i = 0; i < TEST_COLUMNS.size(); i++) { + assertFalse(subject.isDefinitelyWritable(i)); + } + } + + @Test + public void testGetColumnClassName() { + for (int i = 1; i <= TEST_COLUMNS.size(); i++) { + assertEquals(getTypeClassName(TEST_COLUMNS.get(i - 1).type), subject.getColumnClassName(i)); + } + } + + private String getTypeClassName(Type type) { + Preconditions.checkNotNull(type); + switch (type.getCode()) { + case BOOL: + return Boolean.class.getName(); + case INT64: + return Long.class.getName(); + case NUMERIC: + case PG_NUMERIC: + return BigDecimal.class.getName(); + case FLOAT32: + return Float.class.getName(); + case FLOAT64: + return Double.class.getName(); + case STRING: + case JSON: + case PG_JSONB: + return String.class.getName(); + case BYTES: + return byte[].class.getName(); + case TIMESTAMP: + return Timestamp.class.getName(); + case DATE: + return Date.class.getName(); + case ARRAY: + switch (type.getArrayElementType().getCode()) { + case BOOL: + return Boolean[].class.getName(); + case INT64: + return Long[].class.getName(); + case NUMERIC: + case PG_NUMERIC: + return BigDecimal[].class.getName(); + case FLOAT32: + return Float[].class.getName(); + case FLOAT64: + return Double[].class.getName(); + case STRING: + case JSON: + case PG_JSONB: + return String[].class.getName(); + case BYTES: + return byte[][].class.getName(); + case TIMESTAMP: + return Timestamp[].class.getName(); + case DATE: + return Date[].class.getName(); + case ARRAY: + case STRUCT: + default: + // fallthrough + } + case STRUCT: + default: + return null; + } + } + + private static final String EXPECTED_TO_STRING = + "Col 1: COL1 BOOL\n" + + "Col 2: COL2 BYTES\n" + + "Col 3: COL3 DATE\n" + + "Col 4: COL4 FLOAT32\n" + + "Col 5: COL5 FLOAT64\n" + + "Col 6: COL6 INT64\n" + + "Col 7: COL7 STRING\n" + + "Col 8: COL8 JSON\n" + + "Col 9: COL9 PG_JSONB\n" + + "Col 10: COL10 TIMESTAMP\n" + + "Col 11: COL11 NUMERIC\n" + + "Col 12: COL12 PG_NUMERIC\n" + + "Col 13: COL13 ARRAY\n" + + "Col 14: COL14 ARRAY\n" + + "Col 15: COL15 ARRAY\n" + + "Col 16: COL16 ARRAY\n" + + "Col 17: COL17 ARRAY\n" + + "Col 18: COL18 ARRAY\n" + + "Col 19: COL19 ARRAY\n" + + "Col 20: COL20 ARRAY\n" + + "Col 21: COL21 ARRAY\n" + + "Col 22: COL22 ARRAY\n" + + "Col 23: COL23 ARRAY\n" + + "Col 24: COL24 ARRAY\n" + + "Col 25: CALCULATED INT64\n"; + + @Test + public void testToString() { + assertEquals(subject.toString(), EXPECTED_TO_STRING); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcResultSetTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcResultSetTest.java new file mode 100644 index 000000000000..4aac4e9ea79b --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcResultSetTest.java @@ -0,0 +1,1966 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.ResultSets; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.Type.StructField; +import com.google.cloud.spanner.Value; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.cloud.spanner.jdbc.it.SingerProto.Genre; +import com.google.cloud.spanner.jdbc.it.SingerProto.SingerInfo; +import com.google.common.collect.ImmutableList; +import com.google.rpc.Code; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Types; +import java.time.Instant; +import java.time.LocalDate; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcResultSetTest { + private static final String UNKNOWN_COLUMN = "UNKNOWN_COLUMN"; + private static final String STRING_COL_NULL = "STRING_COL_NULL"; + private static final String STRING_COL_NOT_NULL = "STRING_COL_NOT_NULL"; + private static final String STRING_VALUE = "FOO"; + private static final int STRING_COLINDEX_NULL = 1; + private static final int STRING_COLINDEX_NOTNULL = 2; + private static final String BOOLEAN_COL_NULL = "BOOLEAN_COL_NULL"; + private static final String BOOLEAN_COL_NOT_NULL = "BOOLEAN_COL_NOT_NULL"; + private static final boolean BOOLEAN_VALUE = true; + private static final int BOOLEAN_COLINDEX_NULL = 3; + private static final int BOOLEAN_COLINDEX_NOTNULL = 4; + private static final String DOUBLE_COL_NULL = "DOUBLE_COL_NULL"; + private static final String DOUBLE_COL_NOT_NULL = "DOUBLE_COL_NOT_NULL"; + private static final double DOUBLE_VALUE = 3.14159265359D; + private static final int DOUBLE_COLINDEX_NULL = 5; + private static final int DOUBLE_COLINDEX_NOTNULL = 6; + private static final String BYTES_COL_NULL = "BYTES_COL_NULL"; + private static final String BYTES_COL_NOT_NULL = "BYTES_COL_NOT_NULL"; + private static final ByteArray BYTES_VALUE = ByteArray.copyFrom("FOO"); + private static final int BYTES_COLINDEX_NULL = 7; + private static final int BYTES_COLINDEX_NOTNULL = 8; + private static final String LONG_COL_NULL = "LONG_COL_NULL"; + private static final String LONG_COL_NOT_NULL = "LONG_COL_NOT_NULL"; + private static final long LONG_VALUE = 1L; + private static final int LONG_COLINDEX_NULL = 9; + private static final int LONG_COLINDEX_NOTNULL = 10; + private static final String DATE_COL_NULL = "DATE_COL_NULL"; + private static final String DATE_COL_NOT_NULL = "DATE_COL_NOT_NULL"; + private static final Date DATE_VALUE = Date.fromYearMonthDay(2019, 1, 18); + private static final int DATE_COLINDEX_NULL = 11; + private static final int DATE_COLINDEX_NOTNULL = 12; + private static final String TIMESTAMP_COL_NULL = "TIMESTAMP_COL_NULL"; + private static final String TIMESTAMP_COL_NOT_NULL = "TIMESTAMP_COL_NOT_NULL"; + private static final Timestamp TIMESTAMP_VALUE = + Timestamp.parseTimestamp("2019-01-18T10:00:01.1213Z"); + private static final int TIMESTAMP_COLINDEX_NULL = 13; + private static final int TIMESTAMP_COLINDEX_NOTNULL = 14; + private static final String TIME_COL_NULL = "TIME_COL_NULL"; + private static final String TIME_COL_NOT_NULL = "TIME_COL_NOT_NULL"; + private static final Timestamp TIME_VALUE = Timestamp.parseTimestamp("1970-01-01T10:01:02.995Z"); + private static final int TIME_COLINDEX_NULL = 15; + private static final int TIME_COLINDEX_NOTNULL = 16; + private static final String ARRAY_COL_NULL = "ARRAY_COL_NULL"; + private static final String ARRAY_COL_NOT_NULL = "ARRAY_COL_NOT_NULL"; + private static final long[] ARRAY_VALUE = new long[] {1L, 2L, 3L}; + private static final int ARRAY_COLINDEX_NULL = 17; + private static final int ARRAY_COLINDEX_NOTNULL = 18; + private static final String URL_COL_NULL = "URL_COL_NULL"; + private static final String URL_COL_NOT_NULL = "URL_COL_NOT_NULL"; + private static final String URL_VALUE = "https://cloud.google.com/spanner/docs/apis"; + private static final int URL_COLINDEX_NULL = 19; + private static final int URL_COLINDEX_NOTNULL = 20; + + private static final String STRING_COL_NUMBER = "STRING_COL_NUMBER"; + private static final int STRING_COLINDEX_NUMBER = 21; + private static final String STRING_NUMBER_VALUE = "123"; + private static final String STRING_COL_DATE = "STRING_COL_DATE"; + private static final int STRING_COLINDEX_DATE = 22; + private static final String STRING_DATE_VALUE = "2020-06-01"; + private static final String STRING_COL_TIMESTAMP = "STRING_COL_TIMESTAMP"; + private static final int STRING_COLINDEX_TIMESTAMP = 23; + private static final String STRING_TIMESTAMP_VALUE = "2020-06-01T10:31:15.123Z"; + private static final String STRING_COL_TIME = "STRING_COL_TIME"; + private static final int STRING_COLINDEX_TIME = 24; + private static final String STRING_TIME_VALUE = "10:31:15"; + private static final String NUMERIC_COL_NULL = "NUMERIC_COL_NULL"; + private static final String NUMERIC_COL_NOT_NULL = "NUMERIC_COL_NOT_NULL"; + private static final BigDecimal NUMERIC_VALUE = new BigDecimal("3.14"); + private static final int NUMERIC_COLINDEX_NULL = 25; + private static final int NUMERIC_COLINDEX_NOTNULL = 26; + private static final String PG_NUMERIC_COL_NULL = "PG_NUMERIC_COL_NULL"; + private static final String PG_NUMERIC_COL_NOT_NULL = "PG_NUMERIC_COL_NOT_NULL"; + private static final String PG_NUMERIC_COL_NAN = "PG_NUMERIC_COL_NAN"; + private static final int PG_NUMERIC_COLINDEX_NULL = 44; + private static final int PG_NUMERIC_COLINDEX_NOTNULL = 45; + private static final int PG_NUMERIC_COLINDEX_NAN = 46; + private static final String JSON_COL_NULL = "JSON_COL_NULL"; + private static final String JSON_COL_NOT_NULL = "JSON_COL_NOT_NULL"; + private static final int JSON_COLINDEX_NULL = 27; + private static final int JSON_COLINDEX_NOT_NULL = 28; + private static final String JSON_VALUE = "{\"name\":\"John\", \"age\":30, \"car\":null}"; + + private static final String PROTO_MSG_COL_NULL = "PROTO_MSG_COL_NULL"; + private static final String PROTO_MSG_COL_NOT_NULL = "PROTO_MSG_COL_NOT_NULL"; + private static final int PROTO_MSG_COLINDEX_NULL = 29; + private static final int PROTO_MSG_COLINDEX_NOT_NULL = 30; + private static final ByteArray PROTO_MSG_VALUE = + ByteArray.copyFrom(SingerInfo.newBuilder().setSingerId(1).build().toByteArray()); + private static final String PROTO_ENUM_COL_NULL = "PROTO_ENUM_COL_NULL"; + private static final String PROTO_ENUM_COL_NOT_NULL = "PROTO_ENUM_COL_NOT_NULL"; + private static final int PROTO_ENUM_COLINDEX_NULL = 31; + private static final int PROTO_ENUM_COLINDEX_NOT_NULL = 32; + private static final long PROTO_ENUM_VALUE = Genre.ROCK.getNumber(); + + private static final String BOOL_ARRAY_COL = "BOOL_ARRAY"; + private static final List BOOL_ARRAY_VALUE = Arrays.asList(true, null, false); + private static final String BYTES_ARRAY_COL = "BYTES_ARRAY"; + private static final List BYTES_ARRAY_VALUE = Arrays.asList(BYTES_VALUE, null); + private static final String DATE_ARRAY_COL = "DATE_ARRAY"; + private static final List DATE_ARRAY_VALUE = Arrays.asList(DATE_VALUE, null); + private static final String FLOAT64_ARRAY_COL = "FLOAT64_ARRAY"; + private static final List FLOAT64_ARRAY_VALUE = Arrays.asList(DOUBLE_VALUE, null); + private static final String INT64_ARRAY_COL = "INT64_ARRAY"; + private static final List INT64_ARRAY_VALUE = Arrays.asList(LONG_VALUE, null); + private static final String NUMERIC_ARRAY_COL = "NUMERIC_ARRAY"; + private static final List NUMERIC_ARRAY_VALUE = Arrays.asList(NUMERIC_VALUE, null); + private static final String STRING_ARRAY_COL = "STRING_ARRAY"; + private static final List STRING_ARRAY_VALUE = Arrays.asList(STRING_VALUE, null); + private static final String TIMESTAMP_ARRAY_COL = "TIMESTAMP_ARRAY"; + private static final List TIMESTAMP_ARRAY_VALUE = Arrays.asList(TIMESTAMP_VALUE, null); + private static final String JSON_ARRAY_COL = "JSON_ARRAY"; + private static final List JSON_ARRAY_VALUE = Arrays.asList(JSON_VALUE, null); + private static final String PROTO_MSG_ARRAY_COL = "PROTO_MSG_ARRAY"; + private static final List PROTO_MSG_ARRAY_VALUE = Arrays.asList(PROTO_MSG_VALUE, null); + private static final String PROTO_ENUM_ARRAY_COL = "PROTO_ENUM_ARRAY"; + private static final List PROTO_ENUM_ARRAY_VALUE = Arrays.asList(PROTO_ENUM_VALUE, null); + + private final JdbcResultSet subject; + + static ResultSet getMockResultSet() { + return ResultSets.forRows( + Type.struct( + StructField.of(STRING_COL_NULL, Type.string()), + StructField.of(STRING_COL_NOT_NULL, Type.string()), + StructField.of(BOOLEAN_COL_NULL, Type.bool()), + StructField.of(BOOLEAN_COL_NOT_NULL, Type.bool()), + StructField.of(DOUBLE_COL_NULL, Type.float64()), + StructField.of(DOUBLE_COL_NOT_NULL, Type.float64()), + StructField.of(BYTES_COL_NULL, Type.bytes()), + StructField.of(BYTES_COL_NOT_NULL, Type.bytes()), + StructField.of(LONG_COL_NULL, Type.int64()), + StructField.of(LONG_COL_NOT_NULL, Type.int64()), + StructField.of(DATE_COL_NULL, Type.date()), + StructField.of(DATE_COL_NOT_NULL, Type.date()), + StructField.of(TIMESTAMP_COL_NULL, Type.timestamp()), + StructField.of(TIMESTAMP_COL_NOT_NULL, Type.timestamp()), + StructField.of(TIME_COL_NULL, Type.timestamp()), + StructField.of(TIME_COL_NOT_NULL, Type.timestamp()), + StructField.of(ARRAY_COL_NULL, Type.array(Type.int64())), + StructField.of(ARRAY_COL_NOT_NULL, Type.array(Type.int64())), + StructField.of(URL_COL_NULL, Type.string()), + StructField.of(URL_COL_NOT_NULL, Type.string()), + StructField.of(STRING_COL_NUMBER, Type.string()), + StructField.of(STRING_COL_DATE, Type.string()), + StructField.of(STRING_COL_TIMESTAMP, Type.string()), + StructField.of(STRING_COL_TIME, Type.string()), + StructField.of(NUMERIC_COL_NULL, Type.numeric()), + StructField.of(NUMERIC_COL_NOT_NULL, Type.numeric()), + StructField.of(JSON_COL_NULL, Type.json()), + StructField.of(JSON_COL_NOT_NULL, Type.json()), + StructField.of( + PROTO_MSG_COL_NULL, Type.proto(SingerInfo.getDescriptor().getFullName())), + StructField.of( + PROTO_MSG_COL_NOT_NULL, Type.proto(SingerInfo.getDescriptor().getFullName())), + StructField.of( + PROTO_ENUM_COL_NULL, Type.protoEnum(Genre.getDescriptor().getFullName())), + StructField.of( + PROTO_ENUM_COL_NOT_NULL, Type.protoEnum(Genre.getDescriptor().getFullName())), + StructField.of(BOOL_ARRAY_COL, Type.array(Type.bool())), + StructField.of(BYTES_ARRAY_COL, Type.array(Type.bytes())), + StructField.of(DATE_ARRAY_COL, Type.array(Type.date())), + StructField.of(FLOAT64_ARRAY_COL, Type.array(Type.float64())), + StructField.of(INT64_ARRAY_COL, Type.array(Type.int64())), + StructField.of(NUMERIC_ARRAY_COL, Type.array(Type.numeric())), + StructField.of(JSON_ARRAY_COL, Type.array(Type.json())), + StructField.of(STRING_ARRAY_COL, Type.array(Type.string())), + StructField.of(TIMESTAMP_ARRAY_COL, Type.array(Type.timestamp())), + StructField.of( + PROTO_MSG_ARRAY_COL, + Type.array(Type.proto(SingerInfo.getDescriptor().getFullName()))), + StructField.of( + PROTO_ENUM_ARRAY_COL, + Type.array(Type.protoEnum(Genre.getDescriptor().getFullName()))), + StructField.of(PG_NUMERIC_COL_NULL, Type.pgNumeric()), + StructField.of(PG_NUMERIC_COL_NOT_NULL, Type.pgNumeric()), + StructField.of(PG_NUMERIC_COL_NAN, Type.pgNumeric())), + Collections.singletonList( + Struct.newBuilder() + .set(STRING_COL_NULL) + .to((String) null) + .set(STRING_COL_NOT_NULL) + .to(STRING_VALUE) + .set(BOOLEAN_COL_NULL) + .to((Boolean) null) + .set(BOOLEAN_COL_NOT_NULL) + .to(BOOLEAN_VALUE) + .set(DOUBLE_COL_NULL) + .to((Double) null) + .set(DOUBLE_COL_NOT_NULL) + .to(DOUBLE_VALUE) + .set(BYTES_COL_NULL) + .to((ByteArray) null) + .set(BYTES_COL_NOT_NULL) + .to(BYTES_VALUE) + .set(LONG_COL_NULL) + .to((Long) null) + .set(LONG_COL_NOT_NULL) + .to(LONG_VALUE) + .set(DATE_COL_NULL) + .to((Date) null) + .set(DATE_COL_NOT_NULL) + .to(DATE_VALUE) + .set(TIMESTAMP_COL_NULL) + .to((Timestamp) null) + .set(TIMESTAMP_COL_NOT_NULL) + .to(TIMESTAMP_VALUE) + .set(TIME_COL_NULL) + .to((Timestamp) null) + .set(TIME_COL_NOT_NULL) + .to(TIME_VALUE) + .set(ARRAY_COL_NULL) + .toInt64Array((long[]) null) + .set(ARRAY_COL_NOT_NULL) + .toInt64Array(ARRAY_VALUE) + .set(URL_COL_NULL) + .to((String) null) + .set(URL_COL_NOT_NULL) + .to(URL_VALUE) + .set(STRING_COL_NUMBER) + .to(STRING_NUMBER_VALUE) + .set(STRING_COL_DATE) + .to(STRING_DATE_VALUE) + .set(STRING_COL_TIMESTAMP) + .to(STRING_TIMESTAMP_VALUE) + .set(STRING_COL_TIME) + .to(STRING_TIME_VALUE) + .set(NUMERIC_COL_NULL) + .to((BigDecimal) null) + .set(NUMERIC_COL_NOT_NULL) + .to(NUMERIC_VALUE) + .set(JSON_COL_NULL) + .to(Value.json(null)) + .set(JSON_COL_NOT_NULL) + .to(Value.json(JSON_VALUE)) + .set(PROTO_MSG_COL_NULL) + .to((ByteArray) null, SingerInfo.getDescriptor().getFullName()) + .set(PROTO_MSG_COL_NOT_NULL) + .to(PROTO_MSG_VALUE, SingerInfo.getDescriptor().getFullName()) + .set(PROTO_ENUM_COL_NULL) + .to((Long) null, Genre.getDescriptor().getFullName()) + .set(PROTO_ENUM_COL_NOT_NULL) + .to(PROTO_ENUM_VALUE, Genre.getDescriptor().getFullName()) + .set(BOOL_ARRAY_COL) + .toBoolArray(BOOL_ARRAY_VALUE) + .set(BYTES_ARRAY_COL) + .toBytesArray(BYTES_ARRAY_VALUE) + .set(DATE_ARRAY_COL) + .toDateArray(DATE_ARRAY_VALUE) + .set(FLOAT64_ARRAY_COL) + .toFloat64Array(FLOAT64_ARRAY_VALUE) + .set(INT64_ARRAY_COL) + .toInt64Array(INT64_ARRAY_VALUE) + .set(NUMERIC_ARRAY_COL) + .toNumericArray(NUMERIC_ARRAY_VALUE) + .set(JSON_ARRAY_COL) + .toJsonArray(JSON_ARRAY_VALUE) + .set(STRING_ARRAY_COL) + .toStringArray(STRING_ARRAY_VALUE) + .set(TIMESTAMP_ARRAY_COL) + .toTimestampArray(TIMESTAMP_ARRAY_VALUE) + .set(PROTO_MSG_ARRAY_COL) + .toProtoMessageArray( + PROTO_MSG_ARRAY_VALUE, SingerInfo.getDescriptor().getFullName()) + .set(PROTO_ENUM_ARRAY_COL) + .toProtoEnumArray(PROTO_ENUM_ARRAY_VALUE, Genre.getDescriptor().getFullName()) + .set(PG_NUMERIC_COL_NULL) + .to(Value.pgNumeric((String) null)) + .set(PG_NUMERIC_COL_NOT_NULL) + .to(Value.pgNumeric("3.14")) + .set(PG_NUMERIC_COL_NAN) + .to(Value.pgNumeric("NaN")) + .build())); + } + + public JdbcResultSetTest() throws SQLException { + subject = JdbcResultSet.of(mock(Statement.class), getMockResultSet()); + subject.next(); + } + + @Test + public void testWasNull() throws SQLException { + String value = subject.getString(STRING_COL_NULL); + boolean wasNull = subject.wasNull(); + assertTrue(wasNull); + assertNull(value); + String valueNotNull = subject.getString(STRING_COL_NOT_NULL); + boolean wasNotNull = subject.wasNull(); + assertFalse(wasNotNull); + assertNotNull(valueNotNull); + } + + @Test + public void testNext() throws SQLException { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + assertTrue(rs.isBeforeFirst()); + assertFalse(rs.isAfterLast()); + int num = 0; + while (rs.next()) { + num++; + } + assertTrue(num > 0); + assertFalse(rs.isBeforeFirst()); + assertTrue(rs.isAfterLast()); + } + } + + @Test + public void testClose() throws SQLException { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + assertFalse(rs.isClosed()); + rs.next(); + assertNotNull(rs.getString(STRING_COL_NOT_NULL)); + rs.close(); + assertTrue(rs.isClosed()); + boolean failed = false; + try { + // Should fail + rs.getString(STRING_COL_NOT_NULL); + } catch (SQLException e) { + failed = true; + } + assertTrue(failed); + } + } + + @Test + public void testGetStringIndex() throws SQLException { + assertNotNull(subject.getString(STRING_COLINDEX_NOTNULL)); + assertEquals(STRING_VALUE, subject.getString(STRING_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetJsonIndex() throws SQLException { + assertNotNull(subject.getString(JSON_COLINDEX_NOT_NULL)); + assertEquals(JSON_VALUE, subject.getString(JSON_COLINDEX_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(JSON_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringIndexForBool() throws SQLException { + assertNotNull(subject.getString(BOOLEAN_COLINDEX_NOTNULL)); + assertEquals(String.valueOf(BOOLEAN_VALUE), subject.getString(BOOLEAN_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(BOOLEAN_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringIndexForInt64() throws SQLException { + assertNotNull(subject.getString(LONG_COLINDEX_NOTNULL)); + assertEquals(String.valueOf(LONG_VALUE), subject.getString(LONG_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringIndexForFloat64() throws SQLException { + assertNotNull(subject.getString(DOUBLE_COLINDEX_NOTNULL)); + assertEquals(String.valueOf(DOUBLE_VALUE), subject.getString(DOUBLE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringIndexForBytes() throws SQLException { + assertNotNull(subject.getString(BYTES_COLINDEX_NOTNULL)); + assertEquals(BYTES_VALUE.toBase64(), subject.getString(BYTES_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(BYTES_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringIndexForDate() throws SQLException { + assertNotNull(subject.getString(DATE_COLINDEX_NOTNULL)); + assertEquals(String.valueOf(DATE_VALUE), subject.getString(DATE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(DATE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringIndexForTimestamp() throws SQLException { + assertNotNull(subject.getString(TIMESTAMP_COLINDEX_NOTNULL)); + assertEquals(String.valueOf(TIMESTAMP_VALUE), subject.getString(TIMESTAMP_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(TIMESTAMP_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringIndexForArray() { + try { + subject.getString(ARRAY_COLINDEX_NOTNULL); + fail("missing SQLException"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + JdbcSqlException jse = (JdbcSqlException) e; + assertEquals(jse.getCode(), Code.INVALID_ARGUMENT); + } + } + + @Test + public void testGetStringIndexForNullArray() { + try { + subject.getString(ARRAY_COLINDEX_NULL); + fail("missing SQLException"); + } catch (SQLException e) { + assertTrue(e instanceof JdbcSqlException); + JdbcSqlException jse = (JdbcSqlException) e; + assertEquals(jse.getCode(), Code.INVALID_ARGUMENT); + } + } + + @Test + public void testGetNStringIndex() throws SQLException { + assertNotNull(subject.getNString(STRING_COLINDEX_NOTNULL)); + assertEquals(STRING_VALUE, subject.getNString(STRING_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getNString(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetURLIndex() throws SQLException, MalformedURLException { + assertNotNull(subject.getURL(URL_COLINDEX_NOTNULL)); + assertEquals(new URL(URL_VALUE), subject.getURL(URL_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getURL(URL_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetURLIndexInvalid() throws SQLException { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> subject.getURL(STRING_COLINDEX_NOTNULL)); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException + .getMessage() + .contains("Invalid URL: " + subject.getString(STRING_COLINDEX_NOTNULL))); + } + + @Test + public void testGetBooleanIndex() throws SQLException { + assertTrue(subject.getBoolean(BOOLEAN_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertFalse(subject.getBoolean(BOOLEAN_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBooleanIndexForLong() throws SQLException { + assertTrue(subject.getBoolean(LONG_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertFalse(subject.getBoolean(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBooleanIndexForDouble() throws SQLException { + assertTrue(subject.getBoolean(DOUBLE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertFalse(subject.getBoolean(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBooleanIndexForString() throws SQLException { + assertFalse(subject.getBoolean(STRING_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertFalse(subject.getBoolean(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBooleanIndexForDate() { + SQLException e = + assertThrows(SQLException.class, () -> subject.getBoolean(DATE_COLINDEX_NOTNULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + + e = assertThrows(SQLException.class, () -> subject.getBoolean(DATE_COLINDEX_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetNullBooleanIndex() throws SQLException { + assertFalse(subject.getBoolean(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertFalse(subject.getBoolean(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertFalse(subject.getBoolean(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetLongIndex() throws SQLException { + assertEquals(1L, subject.getLong(LONG_COLINDEX_NOTNULL)); + assertEquals(LONG_VALUE, subject.getLong(LONG_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertEquals(0L, subject.getLong(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetLongIndexForBool() throws SQLException { + assertEquals(1L, subject.getLong(BOOLEAN_COLINDEX_NOTNULL)); + assertEquals(BOOLEAN_VALUE ? 1L : 0L, subject.getLong(BOOLEAN_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertEquals(0L, subject.getLong(BOOLEAN_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetLongIndexForFloat64() throws SQLException { + assertEquals(3L, subject.getLong(DOUBLE_COLINDEX_NOTNULL)); + assertEquals((long) DOUBLE_VALUE, subject.getLong(DOUBLE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertEquals(0L, subject.getLong(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetIntegerTypesOnNumeric() throws SQLException { + assertEquals((byte) 0, subject.getByte(NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals((byte) 3, subject.getByte(NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + + assertEquals((short) 0, subject.getShort(NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals((short) 3, subject.getShort(NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + + assertEquals(0, subject.getInt(NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(3, subject.getInt(NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + + assertEquals(0L, subject.getLong(NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(3L, subject.getLong(NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetIntegerTypesOnPgNumeric() throws SQLException { + assertEquals((byte) 0, subject.getByte(PG_NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals((byte) 3, subject.getByte(PG_NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + + assertEquals((short) 0, subject.getShort(PG_NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals((short) 3, subject.getShort(PG_NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + + assertEquals(0, subject.getInt(PG_NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(3, subject.getInt(PG_NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + + assertEquals(0L, subject.getLong(PG_NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(3L, subject.getLong(PG_NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetIntegerTypesOnPgNumericNaN() throws SQLException { + assertTrue(Double.isNaN(subject.getDouble(PG_NUMERIC_COLINDEX_NAN))); + assertTrue(Float.isNaN(subject.getFloat(PG_NUMERIC_COLINDEX_NAN))); + assertEquals("NaN", subject.getString(PG_NUMERIC_COLINDEX_NAN)); + SQLException e = + assertThrows(SQLException.class, () -> subject.getByte(PG_NUMERIC_COLINDEX_NAN)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + + e = assertThrows(SQLException.class, () -> subject.getShort(PG_NUMERIC_COLINDEX_NAN)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + + e = assertThrows(SQLException.class, () -> subject.getInt(PG_NUMERIC_COLINDEX_NAN)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + + e = assertThrows(SQLException.class, () -> subject.getLong(PG_NUMERIC_COLINDEX_NAN)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + + e = assertThrows(SQLException.class, () -> subject.getBigDecimal(PG_NUMERIC_COLINDEX_NAN)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetLongIndexForString() { + SQLException e = + assertThrows(SQLException.class, () -> subject.getLong(STRING_COLINDEX_NOTNULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetLongIndexForNumberString() throws SQLException { + assertEquals( + Long.valueOf(STRING_NUMBER_VALUE).longValue(), subject.getLong(STRING_COLINDEX_NUMBER)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetLongIndexForNullString() throws SQLException { + assertEquals(0L, subject.getLong(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetLongIndexForTimestamp() { + SQLException e = + assertThrows(SQLException.class, () -> subject.getLong(TIMESTAMP_COLINDEX_NOTNULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetDoubleIndex() throws SQLException { + assertEquals(DOUBLE_VALUE, subject.getDouble(DOUBLE_COLINDEX_NOTNULL), 0d); + assertFalse(subject.wasNull()); + assertEquals(0d, subject.getDouble(DOUBLE_COLINDEX_NULL), 0d); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDoubleIndexFromString() throws SQLException { + assertEquals( + Double.parseDouble(STRING_NUMBER_VALUE), subject.getDouble(STRING_COLINDEX_NUMBER), 0d); + assertFalse(subject.wasNull()); + assertEquals(0d, subject.getDouble(STRING_COLINDEX_NULL), 0d); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDoubleIndexFromBool() throws SQLException { + assertEquals(BOOLEAN_VALUE ? 1d : 0d, subject.getDouble(BOOLEAN_COLINDEX_NOTNULL), 0d); + assertFalse(subject.wasNull()); + assertEquals(0d, subject.getDouble(BOOLEAN_COLINDEX_NULL), 0d); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDoubleIndexFromInt64() throws SQLException { + assertEquals(LONG_VALUE, subject.getDouble(LONG_COLINDEX_NOTNULL), 0d); + assertFalse(subject.wasNull()); + assertEquals(0d, subject.getDouble(LONG_COLINDEX_NULL), 0d); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDoubleIndexFromTimestamp() { + SQLException e = + assertThrows(SQLException.class, () -> subject.getDouble(TIMESTAMP_COLINDEX_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetBigDecimalIndexAndScale() throws SQLException { + assertNotNull(subject.getBigDecimal(DOUBLE_COLINDEX_NOTNULL, 2)); + assertEquals( + BigDecimal.valueOf(DOUBLE_VALUE).setScale(2, RoundingMode.HALF_UP), + subject.getBigDecimal(DOUBLE_COLINDEX_NOTNULL, 2)); + assertFalse(subject.wasNull()); + assertNull(subject.getBigDecimal(DOUBLE_COLINDEX_NULL, 2)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBytesIndex() throws SQLException { + assertNotNull(subject.getBytes(BYTES_COLINDEX_NOTNULL)); + assertArrayEquals(BYTES_VALUE.toByteArray(), subject.getBytes(BYTES_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getBytes(BYTES_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBytesIndexFromProtoMessage() throws SQLException { + assertNotNull(subject.getBytes(PROTO_MSG_COLINDEX_NOT_NULL)); + assertArrayEquals(PROTO_MSG_VALUE.toByteArray(), subject.getBytes(PROTO_MSG_COLINDEX_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getBytes(PROTO_MSG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetProtoMessage() throws SQLException { + SingerInfo singerInfo = subject.getObject(PROTO_MSG_COLINDEX_NOT_NULL, SingerInfo.class); + assertEquals(SingerInfo.newBuilder().setSingerId(1).build(), singerInfo); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(PROTO_MSG_COLINDEX_NULL, SingerInfo.class)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBytesAsObjectIndexFromProtoMessage() throws SQLException { + assertNotNull(subject.getObject(PROTO_MSG_COLINDEX_NOT_NULL)); + assertArrayEquals( + PROTO_MSG_VALUE.toByteArray(), (byte[]) subject.getObject(PROTO_MSG_COLINDEX_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(PROTO_MSG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetProtoMessageArrayAsObject() throws SQLException { + assertNotNull(subject.getObject(PROTO_MSG_ARRAY_COL, SingerInfo[].class)); + assertArrayEquals( + new SingerInfo[] {SingerInfo.newBuilder().setSingerId(1).build(), null}, + subject.getObject(PROTO_MSG_ARRAY_COL, SingerInfo[].class)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetIntIndexFromProtoEnum() throws SQLException { + assertEquals(PROTO_ENUM_VALUE, subject.getInt(PROTO_ENUM_COLINDEX_NOT_NULL)); + assertFalse(subject.wasNull()); + assertEquals(0, subject.getInt(PROTO_ENUM_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetEnum() throws SQLException { + Genre singerGenre = subject.getObject(PROTO_ENUM_COLINDEX_NOT_NULL, Genre.class); + assertEquals(Genre.ROCK, singerGenre); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(PROTO_ENUM_COLINDEX_NULL, Genre.class)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetIntAsObjectIndexFromProtoEnum() throws SQLException { + assertNotNull(subject.getObject(PROTO_ENUM_COLINDEX_NOT_NULL)); + assertEquals(PROTO_ENUM_VALUE, subject.getObject(PROTO_ENUM_COLINDEX_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(PROTO_ENUM_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetProtoEnumArrayAsObject() throws SQLException { + assertNotNull(subject.getObject(PROTO_ENUM_ARRAY_COL, Genre[].class)); + assertArrayEquals( + new Genre[] {Genre.ROCK, null}, subject.getObject(PROTO_ENUM_ARRAY_COL, Genre[].class)); + assertFalse(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetDateIndex() throws SQLException { + assertNotNull(subject.getDate(DATE_COLINDEX_NOTNULL)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getDate(DATE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getDate(DATE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDateIndexFromString() throws SQLException { + assertNotNull(subject.getDate(STRING_COLINDEX_DATE)); + assertEquals(java.sql.Date.valueOf(STRING_DATE_VALUE), subject.getDate(STRING_COLINDEX_DATE)); + assertFalse(subject.wasNull()); + assertNull(subject.getDate(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDateIndexFromTimestamp() throws SQLException { + assertNotNull(subject.getDate(TIMESTAMP_COLINDEX_NOTNULL)); + assertEquals( + new java.sql.Date(TIMESTAMP_VALUE.toSqlTimestamp().getTime()), + subject.getDate(TIMESTAMP_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getDate(TIMESTAMP_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDateIndexFromInt64() { + SQLException e = assertThrows(SQLException.class, () -> subject.getDate(LONG_COLINDEX_NOTNULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetTimeIndex() throws SQLException { + assertNotNull(subject.getTime(TIME_COLINDEX_NOTNULL)); + assertEquals( + new Time(TIME_VALUE.toSqlTimestamp().getTime()), subject.getTime(TIME_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getTime(TIME_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimeIndexFromString() throws SQLException { + assertNotNull(subject.getTime(STRING_COLINDEX_TIME)); + assertEquals(Time.valueOf(STRING_TIME_VALUE), subject.getTime(STRING_COLINDEX_TIME)); + assertFalse(subject.wasNull()); + assertNull(subject.getTime(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimestampIndex() throws SQLException { + assertNotNull(subject.getTimestamp(TIMESTAMP_COLINDEX_NOTNULL)); + assertEquals( + TIMESTAMP_VALUE.toSqlTimestamp(), subject.getTimestamp(TIMESTAMP_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(TIMESTAMP_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimestampIndexFromString() throws SQLException { + assertNotNull(subject.getTimestamp(STRING_COLINDEX_TIMESTAMP)); + assertEquals( + Timestamp.parseTimestamp(STRING_TIMESTAMP_VALUE).toSqlTimestamp(), + subject.getTimestamp(STRING_COLINDEX_TIMESTAMP)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetTimestampIndexFromDate() throws SQLException { + assertNotNull(subject.getTimestamp(DATE_COLINDEX_NOTNULL)); + assertEquals( + new java.sql.Timestamp( + DATE_VALUE.getYear() - 1900, + DATE_VALUE.getMonth() - 1, + DATE_VALUE.getDayOfMonth(), + 0, + 0, + 0, + 0), + subject.getTimestamp(DATE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(DATE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStringLabel() throws SQLException { + assertNotNull(subject.getString(STRING_COL_NOT_NULL)); + assertEquals("FOO", subject.getString(STRING_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(STRING_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetNStringLabel() throws SQLException { + assertNotNull(subject.getNString(STRING_COL_NOT_NULL)); + assertEquals("FOO", subject.getNString(STRING_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getNString(STRING_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetURLLabel() throws SQLException { + assertNotNull(subject.getString(URL_COL_NOT_NULL)); + assertEquals(URL_VALUE, subject.getString(URL_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getString(URL_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetURLLabelInvalid() throws SQLException { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> subject.getURL(STRING_COL_NOT_NULL)); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException + .getMessage() + .contains("Invalid URL: " + subject.getString(STRING_COL_NOT_NULL))); + } + + @Test + public void testGetBooleanLabel() throws SQLException { + assertTrue(subject.getBoolean(BOOLEAN_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertFalse(subject.getBoolean(BOOLEAN_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetLongLabel() throws SQLException { + assertEquals(1L, subject.getLong(LONG_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertEquals(0L, subject.getLong(LONG_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetDoubleLabel() throws SQLException { + assertEquals(DOUBLE_VALUE, subject.getDouble(DOUBLE_COL_NOT_NULL), 0d); + assertFalse(subject.wasNull()); + assertEquals(0d, subject.getDouble(DOUBLE_COL_NULL), 0d); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetBigDecimalLabelAndScale() throws SQLException { + assertNotNull(subject.getBigDecimal(DOUBLE_COL_NOT_NULL, 2)); + assertEquals(BigDecimal.valueOf(3.14d), subject.getBigDecimal(DOUBLE_COL_NOT_NULL, 2)); + assertFalse(subject.wasNull()); + assertNull(subject.getBigDecimal(DOUBLE_COL_NULL, 2)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBytesLabel() throws SQLException { + assertNotNull(subject.getBytes(BYTES_COL_NOT_NULL)); + assertArrayEquals( + ByteArray.copyFrom("FOO").toByteArray(), subject.getBytes(BYTES_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getBytes(BYTES_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetDateLabel() throws SQLException { + assertNotNull(subject.getDate(DATE_COL_NOT_NULL)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getDate(DATE_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getDate(DATE_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimeLabel() throws SQLException { + assertNotNull(subject.getTime(TIME_COL_NOT_NULL)); + assertEquals( + new Time(TIME_VALUE.toSqlTimestamp().getTime()), subject.getTime(TIME_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getTime(TIME_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimestampLabel() throws SQLException { + assertNotNull(subject.getTime(TIMESTAMP_COL_NOT_NULL)); + assertEquals(TIMESTAMP_VALUE.toSqlTimestamp(), subject.getTimestamp(TIMESTAMP_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(TIMESTAMP_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetMetaData() throws SQLException { + JdbcResultSetMetaData metadata = subject.getMetaData(); + assertNotNull(metadata); + } + + @Test + public void testGetMetaDataBeforeNext() throws SQLException { + ResultSet spannerResultSet = mock(ResultSet.class); + when(spannerResultSet.next()).thenReturn(true, false); + + JdbcResultSet resultSet = JdbcResultSet.of(spannerResultSet); + assertNotNull(resultSet.getMetaData()); + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + @Test + public void testGetMetaDataTwiceBeforeNext() throws SQLException { + ResultSet spannerResultSet = mock(ResultSet.class); + when(spannerResultSet.next()).thenReturn(true, false); + + JdbcResultSet resultSet = JdbcResultSet.of(spannerResultSet); + assertNotNull(resultSet.getMetaData()); + assertNotNull(resultSet.getMetaData()); + + // This would have returned false before the fix in + // https://github.com/googleapis/java-spanner-jdbc/pull/323 + assertTrue(resultSet.next()); + assertFalse(resultSet.next()); + } + + @Test + public void testFindColumn() throws SQLException { + assertEquals(2, subject.findColumn(STRING_COL_NOT_NULL)); + } + + @Test + public void testGetBigDecimalFromDouble_usingIndex() throws SQLException { + assertNotNull(subject.getBigDecimal(DOUBLE_COLINDEX_NOTNULL)); + assertEquals(BigDecimal.valueOf(DOUBLE_VALUE), subject.getBigDecimal(DOUBLE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getBigDecimal(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBigDecimalFromDouble_usingLabel() throws SQLException { + assertNotNull(subject.getBigDecimal(DOUBLE_COL_NOT_NULL)); + assertEquals(BigDecimal.valueOf(DOUBLE_VALUE), subject.getBigDecimal(DOUBLE_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getBigDecimal(DOUBLE_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBigDecimalIndex() throws SQLException { + assertNotNull(subject.getBigDecimal(NUMERIC_COLINDEX_NOTNULL)); + assertEquals(NUMERIC_VALUE, subject.getBigDecimal(NUMERIC_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getBigDecimal(NUMERIC_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBigDecimalLabel() throws SQLException { + assertNotNull(subject.getBigDecimal(NUMERIC_COL_NOT_NULL)); + assertEquals(NUMERIC_VALUE, subject.getBigDecimal(NUMERIC_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getBigDecimal(NUMERIC_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetStatement() throws SQLException { + assertNotNull(subject.getStatement()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetDateIndexCalendar() throws SQLException { + Calendar cal = Calendar.getInstance(); + assertNotNull(subject.getDate(DATE_COLINDEX_NOTNULL, cal)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getDate(DATE_COLINDEX_NOTNULL, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getDate(DATE_COLINDEX_NULL, cal)); + assertTrue(subject.wasNull()); + + Calendar calGMT = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + Calendar expectedCal = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + expectedCal.clear(); + //noinspection MagicConstant + expectedCal.set(DATE_VALUE.getYear(), DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()); + java.sql.Date expected = new java.sql.Date(expectedCal.getTimeInMillis()); + assertEquals(expected, subject.getDate(DATE_COLINDEX_NOTNULL, calGMT)); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetDateLabelCalendar() throws SQLException { + Calendar cal = Calendar.getInstance(); + assertNotNull(subject.getDate(DATE_COL_NOT_NULL, cal)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getDate(DATE_COL_NOT_NULL, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getDate(DATE_COL_NULL, cal)); + assertTrue(subject.wasNull()); + + Calendar calGMT = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + Calendar expected = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + //noinspection MagicConstant + expected.set( + DATE_VALUE.getYear(), DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth(), 0, 0, 0); + expected.clear(Calendar.MILLISECOND); + assertEquals( + new java.sql.Date(expected.getTimeInMillis()), subject.getDate(DATE_COL_NOT_NULL, calGMT)); + } + + @Test + public void testGetTimeIndexCalendar() throws SQLException { + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + + assertNotNull(subject.getTime(TIME_COLINDEX_NOTNULL, cal)); + assertEquals( + new Time(TIME_VALUE.toSqlTimestamp().getTime()), + subject.getTime(TIME_COLINDEX_NOTNULL, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getTime(TIME_COLINDEX_NULL, cal)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimeLabelCalendar() throws SQLException { + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + + assertNotNull(subject.getTime(TIME_COL_NOT_NULL, cal)); + assertEquals( + new Time(TIME_VALUE.toSqlTimestamp().getTime()), subject.getTime(TIME_COL_NOT_NULL, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getTime(TIME_COL_NULL, cal)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimestampIndexCalendar() throws SQLException { + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + + assertNotNull(subject.getTimestamp(TIMESTAMP_COLINDEX_NOTNULL, cal)); + assertEquals( + TIMESTAMP_VALUE.toSqlTimestamp(), subject.getTimestamp(TIMESTAMP_COLINDEX_NOTNULL, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(TIMESTAMP_COLINDEX_NULL, cal)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimestampIndexCalendarFromString() throws SQLException { + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles")); + + assertNotNull(subject.getTimestamp(STRING_COLINDEX_TIMESTAMP, cal)); + assertEquals( + Timestamp.parseTimestamp(STRING_TIMESTAMP_VALUE.replace("Z", "-07:00")).toSqlTimestamp(), + subject.getTimestamp(STRING_COLINDEX_TIMESTAMP, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(STRING_COLINDEX_NULL, cal)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimestampIndexCalendarFromDate() throws SQLException { + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles")); + + assertNotNull(subject.getTimestamp(DATE_COLINDEX_NOTNULL, cal)); + assertEquals( + Timestamp.parseTimestamp(String.format("%sT00:00:00-08:00", DATE_VALUE)).toSqlTimestamp(), + subject.getTimestamp(DATE_COLINDEX_NOTNULL, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(DATE_COLINDEX_NULL, cal)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetTimestampLabelCalendar() throws SQLException { + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT")); + + assertNotNull(subject.getTimestamp(TIMESTAMP_COL_NOT_NULL, cal)); + assertEquals( + TIMESTAMP_VALUE.toSqlTimestamp(), subject.getTimestamp(TIMESTAMP_COL_NOT_NULL, cal)); + assertFalse(subject.wasNull()); + assertNull(subject.getTimestamp(TIMESTAMP_COL_NULL, cal)); + assertTrue(subject.wasNull()); + } + + @Test + public void testIsClosed() { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + assertFalse(rs.isClosed()); + rs.close(); + assertTrue(rs.isClosed()); + } + } + + @Test + public void testGetByteIndex() throws SQLException { + assertEquals(LONG_VALUE, subject.getByte(LONG_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertEquals(0, subject.getByte(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetByteIndexFromString() throws SQLException { + assertEquals( + Byte.valueOf(STRING_NUMBER_VALUE).byteValue(), subject.getByte(STRING_COLINDEX_NUMBER)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetByteIndexFromDouble() throws SQLException { + assertEquals((byte) DOUBLE_VALUE, subject.getByte(DOUBLE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetByteIndexFromBoolean() throws SQLException { + assertEquals(BOOLEAN_VALUE ? 1 : 0, subject.getByte(BOOLEAN_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetNullByteIndex() throws SQLException { + assertEquals(0, subject.getByte(BOOLEAN_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getByte(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getByte(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getByte(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + SQLException e = assertThrows(SQLException.class, () -> subject.getByte(TIMESTAMP_COL_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetShortIndex() throws SQLException { + assertEquals(LONG_VALUE, subject.getShort(LONG_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertEquals(0, subject.getShort(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetShortIndexFromString() throws SQLException { + assertEquals( + Short.valueOf(STRING_NUMBER_VALUE).shortValue(), subject.getShort(STRING_COLINDEX_NUMBER)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetShortIndexFromDouble() throws SQLException { + assertEquals((short) DOUBLE_VALUE, subject.getShort(DOUBLE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetShortIndexFromBoolean() throws SQLException { + assertEquals(BOOLEAN_VALUE ? 1 : 0, subject.getShort(BOOLEAN_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetShortIndexFromBytes() { + SQLException e = assertThrows(SQLException.class, () -> subject.getShort(BYTES_COL_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetNullShortIndex() throws SQLException { + assertEquals(0, subject.getShort(BOOLEAN_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getShort(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getShort(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getShort(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + SQLException e = assertThrows(SQLException.class, () -> subject.getShort(TIMESTAMP_COL_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetIntIndex() throws SQLException { + int expected = (int) LONG_VALUE; + assertEquals(expected, subject.getInt(LONG_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertEquals(0, subject.getInt(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetIntIndexFromString() throws SQLException { + assertEquals( + Integer.valueOf(STRING_NUMBER_VALUE).intValue(), subject.getInt(STRING_COLINDEX_NUMBER)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetIntIndexFromDouble() throws SQLException { + assertEquals((int) DOUBLE_VALUE, subject.getInt(DOUBLE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetIntIndexFromBoolean() throws SQLException { + assertEquals(BOOLEAN_VALUE ? 1 : 0, subject.getInt(BOOLEAN_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetIntIndexFromTimestamp() { + SQLException e = assertThrows(SQLException.class, () -> subject.getInt(TIMESTAMP_COL_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetNullIntIndex() throws SQLException { + assertEquals(0, subject.getInt(BOOLEAN_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getInt(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getInt(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + assertEquals(0, subject.getInt(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + SQLException e = assertThrows(SQLException.class, () -> subject.getInt(TIMESTAMP_COL_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetFloatIndex() throws SQLException { + float expected = (float) DOUBLE_VALUE; + assertEquals(expected, subject.getFloat(DOUBLE_COLINDEX_NOTNULL), 0f); + assertFalse(subject.wasNull()); + assertEquals(0d, subject.getFloat(DOUBLE_COLINDEX_NULL), 0f); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetFloatIndexFromString() throws SQLException { + assertEquals( + Float.parseFloat(STRING_NUMBER_VALUE), subject.getFloat(STRING_COLINDEX_NUMBER), 0f); + assertFalse(subject.wasNull()); + assertEquals(0f, subject.getFloat(STRING_COLINDEX_NULL), 0f); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetFloatIndexFromBool() throws SQLException { + assertEquals(BOOLEAN_VALUE ? 1f : 0f, subject.getFloat(BOOLEAN_COLINDEX_NOTNULL), 0f); + assertFalse(subject.wasNull()); + assertEquals(0f, subject.getFloat(BOOLEAN_COLINDEX_NULL), 0f); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetFloatIndexFromInt64() throws SQLException { + assertEquals(LONG_VALUE, subject.getFloat(LONG_COLINDEX_NOTNULL), 0f); + assertFalse(subject.wasNull()); + assertEquals(0f, subject.getFloat(LONG_COLINDEX_NULL), 0f); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetFloatIndexFromTimestamp() { + SQLException e = + assertThrows(SQLException.class, () -> subject.getFloat(TIMESTAMP_COLINDEX_NULL)); + assertTrue(e instanceof JdbcSqlException); + assertEquals(Code.INVALID_ARGUMENT, ((JdbcSqlException) e).getCode()); + } + + @Test + public void testGetByteLabel() throws SQLException { + assertEquals(1, subject.getByte(LONG_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertEquals(0, subject.getByte(LONG_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetShortLabel() throws SQLException { + assertEquals(1, subject.getShort(LONG_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertEquals(0, subject.getShort(LONG_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetIntLabel() throws SQLException { + assertEquals(1, subject.getInt(LONG_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertEquals(0, subject.getInt(LONG_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetFloatLabel() throws SQLException { + float expected = (float) DOUBLE_VALUE; + assertEquals(expected, subject.getFloat(DOUBLE_COL_NOT_NULL), 0f); + assertFalse(subject.wasNull()); + assertEquals(0f, subject.getFloat(DOUBLE_COL_NULL), 0f); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetObjectLabel() throws SQLException { + assertNotNull(subject.getObject(DATE_COL_NOT_NULL)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getObject(DATE_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(DATE_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetObjectIndex() throws SQLException { + assertNotNull(subject.getObject(DATE_COLINDEX_NOTNULL)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getObject(DATE_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(DATE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetJsonAsObjectIndex() throws SQLException { + assertNotNull(subject.getObject(JSON_COLINDEX_NOT_NULL)); + assertEquals(JSON_VALUE, subject.getObject(JSON_COLINDEX_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(JSON_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetObjectLabelMap() throws SQLException { + Map> map = new HashMap<>(); + assertNotNull(subject.getObject(DATE_COL_NOT_NULL, map)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getObject(DATE_COL_NOT_NULL, map)); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(DATE_COL_NULL, map)); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetObjectIndexMap() throws SQLException { + Map> map = Collections.emptyMap(); + assertNotNull(subject.getObject(DATE_COLINDEX_NOTNULL, map)); + assertEquals( + new java.sql.Date( + DATE_VALUE.getYear() - 1900, DATE_VALUE.getMonth() - 1, DATE_VALUE.getDayOfMonth()), + subject.getObject(DATE_COLINDEX_NOTNULL, map)); + assertFalse(subject.wasNull()); + assertNull(subject.getObject(DATE_COLINDEX_NULL, map)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetArrayLabel() throws SQLException { + assertNotNull(subject.getArray(ARRAY_COL_NOT_NULL)); + assertEquals( + JdbcArray.createArray(JdbcDataType.INT64, Arrays.asList(1L, 2L, 3L)), + subject.getArray(ARRAY_COL_NOT_NULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getArray(ARRAY_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetArrayIndex() throws SQLException { + assertNotNull(subject.getArray(ARRAY_COLINDEX_NOTNULL)); + assertEquals( + JdbcArray.createArray(JdbcDataType.INT64, Arrays.asList(1L, 2L, 3L)), + subject.getArray(ARRAY_COLINDEX_NOTNULL)); + assertFalse(subject.wasNull()); + assertNull(subject.getArray(ARRAY_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetWarnings() { + assertNull(subject.getWarnings()); + } + + @Test + public void testClearWarnings() { + subject.clearWarnings(); + } + + @Test + public void testIsBeforeFirst() throws SQLException { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + assertTrue(rs.isBeforeFirst()); + rs.next(); + assertFalse(rs.isBeforeFirst()); + } + } + + @Test + public void testIsAfterLast() throws SQLException { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + assertFalse(rs.isAfterLast()); + while (rs.next()) { + assertFalse(rs.isAfterLast()); + } + assertTrue(rs.isAfterLast()); + } + } + + @Test + public void testGetCharacterStreamIndex() throws SQLException, IOException { + assertNotNull(subject.getCharacterStream(STRING_COLINDEX_NOTNULL)); + Reader actual = subject.getCharacterStream(STRING_COLINDEX_NOTNULL); + char[] cbuf = new char[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals(STRING_VALUE, new String(cbuf, 0, len)); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getCharacterStream(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetCharacterStreamLabel() throws SQLException, IOException { + assertNotNull(subject.getCharacterStream(STRING_COL_NOT_NULL)); + Reader actual = subject.getCharacterStream(STRING_COL_NOT_NULL); + char[] cbuf = new char[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals("FOO", new String(cbuf, 0, len)); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getCharacterStream(STRING_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetNCharacterStreamIndex() throws SQLException, IOException { + assertNotNull(subject.getNCharacterStream(STRING_COLINDEX_NOTNULL)); + Reader actual = subject.getNCharacterStream(STRING_COLINDEX_NOTNULL); + char[] cbuf = new char[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals(STRING_VALUE, new String(cbuf, 0, len)); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getNCharacterStream(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetNCharacterStreamLabel() throws SQLException, IOException { + assertNotNull(subject.getNCharacterStream(STRING_COL_NOT_NULL)); + Reader actual = subject.getNCharacterStream(STRING_COL_NOT_NULL); + char[] cbuf = new char[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals("FOO", new String(cbuf, 0, len)); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getNCharacterStream(STRING_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetAsciiStreamIndex() throws SQLException, IOException { + assertNotNull(subject.getAsciiStream(STRING_COLINDEX_NOTNULL)); + InputStream actual = subject.getAsciiStream(STRING_COLINDEX_NOTNULL); + byte[] cbuf = new byte[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals(STRING_VALUE, new String(cbuf, 0, len, StandardCharsets.US_ASCII)); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getAsciiStream(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetAsciiStreamIndexForBool() throws SQLException, IOException { + assertNotNull(subject.getAsciiStream(BOOLEAN_COLINDEX_NOTNULL)); + InputStream actual = subject.getAsciiStream(BOOLEAN_COLINDEX_NOTNULL); + byte[] cbuf = new byte[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals( + String.valueOf(BOOLEAN_VALUE), new String(cbuf, 0, len, StandardCharsets.US_ASCII)); + assertEquals(4, len); + assertFalse(subject.wasNull()); + assertNull(subject.getAsciiStream(BOOLEAN_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetAsciiStreamIndexForInt64() throws SQLException, IOException { + assertNotNull(subject.getAsciiStream(LONG_COLINDEX_NOTNULL)); + InputStream actual = subject.getAsciiStream(LONG_COLINDEX_NOTNULL); + byte[] cbuf = new byte[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals(String.valueOf(LONG_VALUE), new String(cbuf, 0, len, StandardCharsets.US_ASCII)); + assertEquals(1, len); + assertFalse(subject.wasNull()); + assertNull(subject.getAsciiStream(LONG_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetAsciiStreamIndexForFloat64() throws SQLException, IOException { + assertNotNull(subject.getAsciiStream(DOUBLE_COLINDEX_NOTNULL)); + InputStream actual = subject.getAsciiStream(DOUBLE_COLINDEX_NOTNULL); + byte[] cbuf = new byte[20]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals(String.valueOf(DOUBLE_VALUE), new String(cbuf, 0, len, StandardCharsets.US_ASCII)); + assertEquals(13, len); + assertFalse(subject.wasNull()); + assertNull(subject.getAsciiStream(DOUBLE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetAsciiStreamIndexForDate() throws SQLException, IOException { + assertNotNull(subject.getAsciiStream(DATE_COLINDEX_NOTNULL)); + InputStream actual = subject.getAsciiStream(DATE_COLINDEX_NOTNULL); + byte[] cbuf = new byte[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals(String.valueOf(DATE_VALUE), new String(cbuf, 0, len, StandardCharsets.US_ASCII)); + assertEquals(10, len); + assertFalse(subject.wasNull()); + assertNull(subject.getAsciiStream(DATE_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetAsciiStreamIndexForTimestamp() throws SQLException, IOException { + assertNotNull(subject.getAsciiStream(TIMESTAMP_COLINDEX_NOTNULL)); + InputStream actual = subject.getAsciiStream(TIMESTAMP_COLINDEX_NOTNULL); + byte[] cbuf = new byte[100]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals( + String.valueOf(TIMESTAMP_VALUE), new String(cbuf, 0, len, StandardCharsets.US_ASCII)); + assertEquals(30, len); + assertFalse(subject.wasNull()); + assertNull(subject.getAsciiStream(TIMESTAMP_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetUnicodeStreamIndex() throws SQLException, IOException { + assertNotNull(subject.getUnicodeStream(STRING_COLINDEX_NOTNULL)); + InputStream actual = subject.getUnicodeStream(STRING_COLINDEX_NOTNULL); + byte[] cbuf = new byte[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals(STRING_VALUE, new String(cbuf, 0, len, StandardCharsets.UTF_16LE)); + assertEquals(6, len); + assertFalse(subject.wasNull()); + assertNull(subject.getUnicodeStream(STRING_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBinaryStreamIndex() throws SQLException, IOException { + assertNotNull(subject.getBinaryStream(BYTES_COLINDEX_NOTNULL)); + InputStream actual = subject.getBinaryStream(BYTES_COLINDEX_NOTNULL); + byte[] cbuf = new byte[3]; + int len = actual.read(cbuf, 0, cbuf.length); + assertArrayEquals(BYTES_VALUE.toByteArray(), cbuf); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getBinaryStream(BYTES_COLINDEX_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetAsciiStreamLabel() throws SQLException, IOException { + assertNotNull(subject.getAsciiStream(STRING_COL_NOT_NULL)); + InputStream actual = subject.getAsciiStream(STRING_COL_NOT_NULL); + byte[] cbuf = new byte[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals("FOO", new String(cbuf, 0, len, StandardCharsets.US_ASCII)); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getAsciiStream(STRING_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @SuppressWarnings("deprecation") + @Test + public void testGetUnicodeStreamLabel() throws SQLException, IOException { + assertNotNull(subject.getUnicodeStream(STRING_COL_NOT_NULL)); + InputStream actual = subject.getUnicodeStream(STRING_COL_NOT_NULL); + byte[] cbuf = new byte[10]; + int len = actual.read(cbuf, 0, cbuf.length); + assertEquals("FOO", new String(cbuf, 0, len, StandardCharsets.UTF_16LE)); + assertEquals(6, len); + assertFalse(subject.wasNull()); + assertNull(subject.getUnicodeStream(STRING_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBinaryStreamLabel() throws SQLException, IOException { + assertNotNull(subject.getBinaryStream(BYTES_COL_NOT_NULL)); + InputStream actual = subject.getBinaryStream(BYTES_COL_NOT_NULL); + byte[] cbuf = new byte[3]; + int len = actual.read(cbuf, 0, cbuf.length); + assertArrayEquals(ByteArray.copyFrom("FOO").toByteArray(), cbuf); + assertEquals(3, len); + assertFalse(subject.wasNull()); + assertNull(subject.getBinaryStream(BYTES_COL_NULL)); + assertTrue(subject.wasNull()); + } + + @Test + public void testGetBeforeNext() { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> rs.getBigDecimal(LONG_COLINDEX_NOTNULL)); + assertEquals(Code.FAILED_PRECONDITION, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException.getMessage().contains("ResultSet is before first row. Call next() first.")); + } + } + + @Test + public void testGetAfterLast() throws SQLException { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + while (rs.next()) { + assertNotNull(rs.getBigDecimal(LONG_COLINDEX_NOTNULL)); + } + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> rs.getBigDecimal(LONG_COLINDEX_NOTNULL)); + assertEquals(Code.FAILED_PRECONDITION, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException + .getMessage() + .contains("ResultSet is after last row. There is no more data available.")); + } + } + + @Test + public void testFindIllegalColumnName() { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> subject.findColumn(UNKNOWN_COLUMN)); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException.getMessage().contains("no column with label " + UNKNOWN_COLUMN + " found")); + } + + @Test + public void testGetRowAndIsFirst() throws SQLException { + try (JdbcResultSet rs = JdbcResultSet.of(mock(Statement.class), getMockResultSet())) { + int row = 0; + while (rs.next()) { + row++; + assertEquals(row, rs.getRow()); + assertEquals(row == 1, rs.isFirst()); + } + } + } + + @Test + public void testGetHoldability() throws SQLException { + assertEquals(java.sql.ResultSet.CLOSE_CURSORS_AT_COMMIT, subject.getHoldability()); + } + + @Test + public void testGetObjectAsValue() throws SQLException { + assertEquals( + Value.bool(BOOLEAN_VALUE), subject.getObject(BOOLEAN_COLINDEX_NOTNULL, Value.class)); + assertEquals(Value.bytes(BYTES_VALUE), subject.getObject(BYTES_COLINDEX_NOTNULL, Value.class)); + assertEquals(Value.date(DATE_VALUE), subject.getObject(DATE_COLINDEX_NOTNULL, Value.class)); + assertEquals( + Value.float64(DOUBLE_VALUE), subject.getObject(DOUBLE_COLINDEX_NOTNULL, Value.class)); + assertEquals(Value.int64(LONG_VALUE), subject.getObject(LONG_COLINDEX_NOTNULL, Value.class)); + assertEquals( + Value.numeric(NUMERIC_VALUE), subject.getObject(NUMERIC_COLINDEX_NOTNULL, Value.class)); + assertEquals( + Value.string(STRING_VALUE), subject.getObject(STRING_COLINDEX_NOTNULL, Value.class)); + assertEquals( + Value.timestamp(TIMESTAMP_VALUE), + subject.getObject(TIMESTAMP_COLINDEX_NOTNULL, Value.class)); + assertEquals( + Value.protoMessage(PROTO_MSG_VALUE, SingerInfo.getDescriptor()), + subject.getObject(PROTO_MSG_COLINDEX_NOT_NULL, Value.class)); + assertEquals( + Value.protoEnum(PROTO_ENUM_VALUE, Genre.getDescriptor()), + subject.getObject(PROTO_ENUM_COLINDEX_NOT_NULL, Value.class)); + + assertEquals(Value.boolArray(BOOL_ARRAY_VALUE), subject.getObject(BOOL_ARRAY_COL, Value.class)); + assertEquals( + Value.bytesArray(BYTES_ARRAY_VALUE), subject.getObject(BYTES_ARRAY_COL, Value.class)); + assertEquals(Value.dateArray(DATE_ARRAY_VALUE), subject.getObject(DATE_ARRAY_COL, Value.class)); + assertEquals( + Value.float64Array(FLOAT64_ARRAY_VALUE), subject.getObject(FLOAT64_ARRAY_COL, Value.class)); + assertEquals( + Value.int64Array(INT64_ARRAY_VALUE), subject.getObject(INT64_ARRAY_COL, Value.class)); + assertEquals( + Value.numericArray(NUMERIC_ARRAY_VALUE), subject.getObject(NUMERIC_ARRAY_COL, Value.class)); + assertEquals( + Value.stringArray(STRING_ARRAY_VALUE), subject.getObject(STRING_ARRAY_COL, Value.class)); + assertEquals( + Value.timestampArray(TIMESTAMP_ARRAY_VALUE), + subject.getObject(TIMESTAMP_ARRAY_COL, Value.class)); + assertEquals( + Value.protoMessageArray(PROTO_MSG_ARRAY_VALUE, SingerInfo.getDescriptor().getFullName()), + subject.getObject(PROTO_MSG_ARRAY_COL, Value.class)); + assertEquals( + Value.protoEnumArray(PROTO_ENUM_ARRAY_VALUE, Genre.getDescriptor().getFullName()), + subject.getObject(PROTO_ENUM_ARRAY_COL, Value.class)); + } + + @Test + public void testGetLocalDate() throws SQLException { + LocalDate localDate = subject.getObject(DATE_COL_NOT_NULL, LocalDate.class); + assertEquals( + LocalDate.of(DATE_VALUE.getYear(), DATE_VALUE.getMonth(), DATE_VALUE.getDayOfMonth()), + localDate); + assertFalse(subject.wasNull()); + } + + @Test + public void testGetOffsetDateTime() throws SQLException { + OffsetDateTime offsetDateTime = subject.getObject(TIMESTAMP_COL_NOT_NULL, OffsetDateTime.class); + assertEquals( + OffsetDateTime.ofInstant( + Instant.ofEpochSecond(TIMESTAMP_VALUE.getSeconds(), TIMESTAMP_VALUE.getNanos()), + ZoneOffset.systemDefault()), + offsetDateTime); + assertFalse(subject.wasNull()); + } + + @Test + public void testCopyOf() throws SQLException { + ResultSet original = + ResultSets.forRows( + Type.struct(StructField.of("id", Type.int64()), StructField.of("value", Type.string())), + ImmutableList.of( + Struct.newBuilder().set("id").to(1L).set("value").to("One").build(), + Struct.newBuilder().set("id").to(2L).set("value").to("Two").build())); + java.sql.ResultSet copy = JdbcResultSet.copyOf(original); + // The original result set has been fully consumed. + assertFalse(original.next()); + // We can safely close the original result set and still use the copy. + original.close(); + + ResultSetMetaData metadata = copy.getMetaData(); + assertEquals(2, metadata.getColumnCount()); + assertEquals("id", metadata.getColumnName(1)); + assertEquals("value", metadata.getColumnName(2)); + assertEquals(Types.BIGINT, metadata.getColumnType(1)); + assertEquals(Types.NVARCHAR, metadata.getColumnType(2)); + + assertTrue(copy.next()); + assertEquals(1L, copy.getLong(1)); + assertEquals("One", copy.getString(2)); + assertTrue(copy.next()); + assertEquals(2L, copy.getLong("id")); + assertEquals("Two", copy.getString("value")); + assertFalse(copy.next()); + } + + @Test + public void testCopyOfEmpty() throws SQLException { + ResultSet original = + ResultSets.forRows( + Type.struct(StructField.of("id", Type.int64()), StructField.of("value", Type.string())), + ImmutableList.of()); + java.sql.ResultSet copy = JdbcResultSet.copyOf(original); + // The original result set has been fully consumed. + assertFalse(original.next()); + // We can safely close the original result set and still use the copy. + original.close(); + + ResultSetMetaData metadata = copy.getMetaData(); + assertEquals(2, metadata.getColumnCount()); + assertEquals("id", metadata.getColumnName(1)); + assertEquals("value", metadata.getColumnName(2)); + assertEquals(Types.BIGINT, metadata.getColumnType(1)); + assertEquals(Types.NVARCHAR, metadata.getColumnType(2)); + + // The copy should not contain any rows. + assertFalse(copy.next()); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcSavepointTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcSavepointTest.java new file mode 100644 index 000000000000..50b000b7df2a --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcSavepointTest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.sql.SQLException; +import java.sql.Savepoint; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcSavepointTest { + + @Test + public void testNamed() throws SQLException { + Savepoint savepoint = JdbcSavepoint.named("test"); + assertEquals("test", savepoint.getSavepointName()); + assertThrows(SQLException.class, savepoint::getSavepointId); + } + + @Test + public void testUnnamed() throws SQLException { + Savepoint savepoint = JdbcSavepoint.unnamed(); + assertTrue( + String.format("Savepoint id: %d", savepoint.getSavepointId()), + savepoint.getSavepointId() > 0); + assertThrows(SQLException.class, savepoint::getSavepointName); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcSqlScriptVerifier.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcSqlScriptVerifier.java new file mode 100644 index 000000000000..10a9327b42c0 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcSqlScriptVerifier.java @@ -0,0 +1,186 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.connection.AbstractSqlScriptVerifier; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.rpc.Code; +import java.sql.Array; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.sql.Types; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** SQL Script verifier for JDBC connections */ +public class JdbcSqlScriptVerifier extends AbstractSqlScriptVerifier { + + static class JdbcGenericStatementResult extends GenericStatementResult { + private final boolean result; + private final long updateCount; + private final ResultSet resultSet; + + private JdbcGenericStatementResult(Statement statement, boolean result) throws SQLException { + this.result = result; + if (result) { + this.resultSet = statement.getResultSet(); + this.updateCount = -1L; + } else { + this.resultSet = null; + this.updateCount = statement.getUpdateCount(); + } + } + + @Override + protected ResultType getResultType() { + if (result) { + return ResultType.RESULT_SET; + } + if (updateCount == -2L) { + return ResultType.NO_RESULT; + } + return ResultType.UPDATE_COUNT; + } + + @Override + protected GenericResultSet getResultSet() { + return new JdbcGenericResultSet(resultSet); + } + + @Override + protected long getUpdateCount() { + return updateCount; + } + } + + static class JdbcGenericResultSet extends GenericResultSet { + private final ResultSet resultSet; + + private JdbcGenericResultSet(ResultSet resultSet) { + this.resultSet = resultSet; + } + + @Override + protected boolean next() throws SQLException { + return resultSet.next(); + } + + @Override + protected Object getValue(String col) throws SQLException { + Object value = resultSet.getObject(col); + if (value instanceof Timestamp) { + return com.google.cloud.Timestamp.of((Timestamp) value); + } else if (value instanceof Array) { + Array array = (Array) value; + if (array.getBaseType() == Types.BIGINT) { + Long[] longs = (Long[]) array.getArray(); + List res = new ArrayList<>(); + Collections.addAll(res, longs); + return res; + } + throw new IllegalArgumentException("Unsupported array base type: " + array.getBaseType()); + } + return value; + } + + @Override + protected int getColumnCount() throws Exception { + return resultSet.getMetaData().getColumnCount(); + } + + @Override + protected Object getFirstValue() throws Exception { + String col = resultSet.getMetaData().getColumnName(1); + return getValue(col); + } + } + + public static class JdbcGenericConnection extends GenericConnection { + private final CloudSpannerJdbcConnection connection; + + /** + * Use this to strip comments from a statement before the statement is executed. This should + * only be used when the connection is used in a unit test with a mocked underlying connection. + */ + private boolean stripCommentsBeforeExecute; + + public static JdbcGenericConnection of(CloudSpannerJdbcConnection connection) { + return new JdbcGenericConnection(connection); + } + + private JdbcGenericConnection(CloudSpannerJdbcConnection connection) { + this.connection = connection; + } + + @Override + protected GenericStatementResult execute(String sql) throws SQLException { + Statement statement = connection.createStatement(); + if (isStripCommentsBeforeExecute()) { + sql = AbstractStatementParser.getInstance(getDialect()).removeCommentsAndTrim(sql); + } + boolean result = statement.execute(sql); + return new JdbcGenericStatementResult(statement, result); + } + + @Override + public void close() throws Exception { + if (this.connection != null) { + this.connection.close(); + } + } + + boolean isStripCommentsBeforeExecute() { + return stripCommentsBeforeExecute; + } + + void setStripCommentsBeforeExecute(boolean stripCommentsBeforeExecute) { + this.stripCommentsBeforeExecute = stripCommentsBeforeExecute; + } + + @Override + public Dialect getDialect() { + return connection.getDialect(); + } + } + + public JdbcSqlScriptVerifier() {} + + public JdbcSqlScriptVerifier(GenericConnectionProvider connectionProvider) { + super(connectionProvider); + } + + @Override + protected void verifyExpectedException( + String statement, Exception e, String code, String messagePrefix) { + assertThat(e).isInstanceOf(JdbcSqlException.class); + JdbcSqlException jdbcException = (JdbcSqlException) e; + assertWithMessage(statement).that(jdbcException.getCode()).isEqualTo(Code.valueOf(code)); + if (messagePrefix != null) { + assertWithMessage(statement) + .that(e.getMessage()) + .startsWith(messagePrefix.substring(1, messagePrefix.length() - 1)); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcStatementTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcStatementTest.java new file mode 100644 index 000000000000..cb4cc5a53cb2 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcStatementTest.java @@ -0,0 +1,765 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcConnection.NO_GENERATED_KEY_COLUMNS; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.anyList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.ErrorCode; +import com.google.cloud.spanner.SpannerExceptionFactory; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.AbstractStatementParser; +import com.google.cloud.spanner.connection.Connection; +import com.google.cloud.spanner.connection.StatementResult; +import com.google.cloud.spanner.connection.StatementResult.ResultType; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.common.collect.ImmutableList; +import com.google.rpc.Code; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import javax.annotation.Nullable; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.mockito.stubbing.Answer; + +@RunWith(Parameterized.class) +public class JdbcStatementTest { + private static final String SELECT = "SELECT 1"; + private static final String UPDATE = "UPDATE FOO SET BAR=1 WHERE BAZ=2"; + private static final String LARGE_UPDATE = "UPDATE FOO SET BAR=1 WHERE 1=1"; + private static final String DML_RETURNING_GSQL = "UPDATE FOO SET BAR=1 WHERE 1=1 THEN RETURN *"; + private static final String DML_RETURNING_PG = "UPDATE FOO SET BAR=1 WHERE 1=1 RETURNING *"; + private static final String DDL = "CREATE INDEX FOO ON BAR(ID)"; + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + private String getDmlReturningSql() { + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + return DML_RETURNING_GSQL; + } + return DML_RETURNING_PG; + } + + @SuppressWarnings("unchecked") + private JdbcStatement createStatement() throws SQLException { + Connection spanner = mock(Connection.class); + when(spanner.getDialect()).thenReturn(dialect); + + final String DML_RETURNING_SQL = getDmlReturningSql(); + + com.google.cloud.spanner.ResultSet resultSet = mock(com.google.cloud.spanner.ResultSet.class); + when(resultSet.next()).thenReturn(true, false); + when(resultSet.getColumnType(0)).thenReturn(Type.int64()); + when(resultSet.getLong(0)).thenReturn(1L); + + StatementResult selectResult = mock(StatementResult.class); + when(selectResult.getResultType()).thenReturn(ResultType.RESULT_SET); + when(selectResult.getResultSet()).thenReturn(resultSet); + when(spanner.execute(com.google.cloud.spanner.Statement.of(SELECT))).thenReturn(selectResult); + + StatementResult updateResult = mock(StatementResult.class); + when(updateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT); + when(updateResult.getUpdateCount()).thenReturn(1L); + when(spanner.execute(com.google.cloud.spanner.Statement.of(UPDATE))).thenReturn(updateResult); + when(spanner.execute(com.google.cloud.spanner.Statement.of(UPDATE + "\nTHEN RETURN *"))) + .thenReturn(updateResult); + when(spanner.execute(com.google.cloud.spanner.Statement.of(UPDATE + "\nRETURNING *"))) + .thenReturn(updateResult); + + StatementResult largeUpdateResult = mock(StatementResult.class); + when(largeUpdateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT); + when(largeUpdateResult.getUpdateCount()).thenReturn(Integer.MAX_VALUE + 1L); + when(spanner.execute(com.google.cloud.spanner.Statement.of(LARGE_UPDATE))) + .thenReturn(largeUpdateResult); + + com.google.cloud.spanner.ResultSet dmlReturningResultSet = + mock(com.google.cloud.spanner.ResultSet.class); + when(dmlReturningResultSet.next()).thenReturn(true, false); + when(dmlReturningResultSet.getColumnCount()).thenReturn(1); + when(dmlReturningResultSet.getColumnType(0)).thenReturn(Type.int64()); + when(dmlReturningResultSet.getLong(0)).thenReturn(1L); + + StatementResult dmlReturningResult = mock(StatementResult.class); + when(dmlReturningResult.getResultType()).thenReturn(ResultType.RESULT_SET); + when(dmlReturningResult.getResultSet()).thenReturn(dmlReturningResultSet); + when(spanner.execute(com.google.cloud.spanner.Statement.of(DML_RETURNING_SQL))) + .thenReturn(dmlReturningResult); + + StatementResult ddlResult = mock(StatementResult.class); + when(ddlResult.getResultType()).thenReturn(ResultType.NO_RESULT); + when(spanner.execute(com.google.cloud.spanner.Statement.of(DDL))).thenReturn(ddlResult); + + when(spanner.executeQuery(com.google.cloud.spanner.Statement.of(SELECT))).thenReturn(resultSet); + when(spanner.executeQuery(com.google.cloud.spanner.Statement.of(UPDATE))) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "not a query")); + when(spanner.executeQuery(com.google.cloud.spanner.Statement.of(DML_RETURNING_SQL))) + .thenReturn(dmlReturningResultSet); + when(spanner.executeQuery(com.google.cloud.spanner.Statement.of(DDL))) + .thenThrow( + SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "not a query")); + + when(spanner.executeUpdate(com.google.cloud.spanner.Statement.of(UPDATE))).thenReturn(1L); + when(spanner.executeUpdate(com.google.cloud.spanner.Statement.of(SELECT))) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "not an update")); + when(spanner.executeUpdate(com.google.cloud.spanner.Statement.of(DDL))) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.INVALID_ARGUMENT, "not an update")); + when(spanner.executeUpdate(com.google.cloud.spanner.Statement.of(DML_RETURNING_SQL))) + .thenThrow( + SpannerExceptionFactory.newSpannerException( + ErrorCode.FAILED_PRECONDITION, "cannot execute dml returning over executeUpdate")); + + when(spanner.executeBatchUpdate(anyList())) + .thenAnswer( + (Answer) + invocation -> { + List statements = + (List) invocation.getArguments()[0]; + if (statements.isEmpty() + || AbstractStatementParser.getInstance(dialect) + .isDdlStatement(statements.get(0).getSql())) { + return new long[0]; + } + long[] res = + new long + [((List) invocation.getArguments()[0]) + .size()]; + Arrays.fill(res, 1L); + return res; + }); + + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + when(connection.getParser()).thenReturn(AbstractStatementParser.getInstance(dialect)); + when(connection.getSpannerConnection()).thenReturn(spanner); + return new JdbcStatement(connection); + } + + @Test + public void testQueryTimeout() throws SQLException { + final String select = "SELECT 1"; + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + Connection spanner = mock(Connection.class); + when(connection.getSpannerConnection()).thenReturn(spanner); + StatementResult result = mock(StatementResult.class); + when(result.getResultType()).thenReturn(ResultType.RESULT_SET); + when(result.getResultSet()).thenReturn(mock(com.google.cloud.spanner.ResultSet.class)); + when(spanner.execute(com.google.cloud.spanner.Statement.of(select))).thenReturn(result); + try (Statement statement = new JdbcStatement(connection)) { + assertThat(statement.getQueryTimeout()).isEqualTo(0); + statement.setQueryTimeout(1); + assertThat(statement.getQueryTimeout()).isEqualTo(1); + statement.setQueryTimeout(99); + assertThat(statement.getQueryTimeout()).isEqualTo(99); + statement.setQueryTimeout(0); + assertThat(statement.getQueryTimeout()).isEqualTo(0); + } + + when(spanner.getStatementTimeout(TimeUnit.SECONDS)).thenReturn(1L); + when(spanner.getStatementTimeout(TimeUnit.MILLISECONDS)).thenReturn(1000L); + when(spanner.getStatementTimeout(TimeUnit.MICROSECONDS)).thenReturn(1000000L); + when(spanner.getStatementTimeout(TimeUnit.NANOSECONDS)).thenReturn(1000000000L); + when(spanner.hasStatementTimeout()).thenReturn(true); + try (Statement statement = new JdbcStatement(connection)) { + assertThat(statement.getQueryTimeout()).isEqualTo(0); + statement.execute(select); + // statement has no timeout, so it should also not be set on the connection + verify(spanner, never()).setStatementTimeout(1L, TimeUnit.SECONDS); + } + try (Statement statement = new JdbcStatement(connection)) { + // now set a query timeout that should temporarily applied to the connection + statement.setQueryTimeout(2); + statement.execute(select); + // assert that it is temporarily set to 2 seconds, and then back to the original 1 second + // value + verify(spanner).setStatementTimeout(2L, TimeUnit.SECONDS); + verify(spanner).setStatementTimeout(1L, TimeUnit.SECONDS); + } + } + + @Test + public void testExecuteWithSelectStatement() throws SQLException { + Statement statement = createStatement(); + boolean res = statement.execute(SELECT); + assertThat(res).isTrue(); + assertThat(statement.getUpdateCount()).isEqualTo(JdbcConstants.STATEMENT_RESULT_SET); + try (ResultSet rs = statement.getResultSet()) { + assertThat(rs).isNotNull(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(1L); + } + } + + @Test + public void testExecuteWithUpdateStatement() throws SQLException { + Statement statement = createStatement(); + boolean res = statement.execute(UPDATE); + assertThat(res).isFalse(); + assertThat(statement.getResultSet()).isNull(); + assertThat(statement.getUpdateCount()).isEqualTo(1); + assertThat(statement.execute(LARGE_UPDATE)).isFalse(); + assertThat(statement.getResultSet()).isNull(); + try { + statement.getUpdateCount(); + fail("missing expected exception"); + } catch (JdbcSqlExceptionImpl e) { + assertThat(e.getCode()).isEqualTo(Code.OUT_OF_RANGE); + } + assertThat(statement.getLargeUpdateCount()).isEqualTo(Integer.MAX_VALUE + 1L); + } + + @Test + public void testExecuteWithDdlStatement() throws SQLException { + Statement statement = createStatement(); + boolean res = statement.execute(DDL); + assertThat(res).isFalse(); + assertThat(statement.getResultSet()).isNull(); + assertThat(statement.getUpdateCount()).isEqualTo(JdbcConstants.STATEMENT_NO_RESULT); + } + + @Test + public void testExecuteWithDmlReturningStatement() throws SQLException { + Statement statement = createStatement(); + boolean res = statement.execute(getDmlReturningSql()); + assertTrue(res); + assertEquals(statement.getUpdateCount(), JdbcConstants.STATEMENT_RESULT_SET); + try (ResultSet rs = statement.getResultSet()) { + assertNotNull(rs); + assertTrue(rs.next()); + assertEquals(rs.getLong(1), 1L); + assertFalse(rs.next()); + } + } + + @Test + public void testExecuteWithGeneratedKeys() throws SQLException { + try (Statement statement = createStatement()) { + assertFalse(statement.execute(UPDATE, Statement.NO_GENERATED_KEYS)); + ResultSet keys = statement.getGeneratedKeys(); + assertFalse(keys.next()); + + statement.execute(UPDATE, Statement.RETURN_GENERATED_KEYS); + keys = statement.getGeneratedKeys(); + assertFalse(keys.next()); + } + } + + @Test + public void testExecuteQuery() throws SQLException { + Statement statement = createStatement(); + try (ResultSet rs = statement.executeQuery(SELECT)) { + assertThat(rs).isNotNull(); + assertThat(rs.next()).isTrue(); + assertThat(rs.getLong(1)).isEqualTo(1L); + } + } + + @Test + public void testExecuteQueryWithUpdateStatement() throws SQLException { + try (Statement statement = createStatement()) { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> statement.executeQuery(UPDATE)); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("not a query")); + } + } + + @Test + public void testExecuteQueryWithDmlReturningStatement() throws SQLException { + Statement statement = createStatement(); + try (ResultSet rs = statement.executeQuery(getDmlReturningSql())) { + assertNotNull(rs); + assertTrue(rs.next()); + assertEquals(rs.getLong(1), 1L); + assertFalse(rs.next()); + } + } + + @Test + public void testExecuteQueryWithDdlStatement() throws SQLException { + try (Statement statement = createStatement()) { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> statement.executeQuery(DDL)); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue(sqlException.getMessage(), sqlException.getMessage().contains("not a query")); + } + } + + @Test + public void testExecuteUpdate() throws SQLException { + try (Statement statement = createStatement()) { + assertEquals(1, statement.executeUpdate(UPDATE)); + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> statement.executeUpdate(LARGE_UPDATE)); + assertEquals(Code.OUT_OF_RANGE, sqlException.getCode()); + } + } + + @Test + public void testInternalExecuteUpdate() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + Connection spannerConnection = mock(Connection.class); + when(connection.getSpannerConnection()).thenReturn(spannerConnection); + com.google.cloud.spanner.Statement updateStatement = + com.google.cloud.spanner.Statement.of(UPDATE); + com.google.cloud.spanner.Statement largeUpdateStatement = + com.google.cloud.spanner.Statement.of(LARGE_UPDATE); + StatementResult updateResult = mock(StatementResult.class); + when(updateResult.getUpdateCount()).thenReturn(1L); + when(updateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT); + when(spannerConnection.execute(updateStatement)).thenReturn(updateResult); + StatementResult largeUpdateResult = mock(StatementResult.class); + when(largeUpdateResult.getUpdateCount()).thenReturn(Integer.MAX_VALUE + 1L); + when(largeUpdateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT); + when(spannerConnection.execute(largeUpdateStatement)).thenReturn(largeUpdateResult); + try (JdbcStatement statement = new JdbcStatement(connection)) { + assertThat(statement.executeUpdate(UPDATE)).isEqualTo(1); + try { + statement.executeUpdate(LARGE_UPDATE); + fail("missing expected exception"); + } catch (JdbcSqlExceptionImpl e) { + assertThat(e.getCode()).isEqualTo(Code.OUT_OF_RANGE); + } + } + } + + @Test + public void testInternalExecuteLargeUpdate() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + Connection spannerConnection = mock(Connection.class); + when(connection.getSpannerConnection()).thenReturn(spannerConnection); + com.google.cloud.spanner.Statement updateStatement = + com.google.cloud.spanner.Statement.of(UPDATE); + com.google.cloud.spanner.Statement largeUpdateStatement = + com.google.cloud.spanner.Statement.of(LARGE_UPDATE); + StatementResult updateResult = mock(StatementResult.class); + when(updateResult.getUpdateCount()).thenReturn(1L); + when(updateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT); + when(spannerConnection.execute(updateStatement)).thenReturn(updateResult); + StatementResult largeUpdateResult = mock(StatementResult.class); + when(largeUpdateResult.getUpdateCount()).thenReturn(Integer.MAX_VALUE + 1L); + when(largeUpdateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT); + when(spannerConnection.execute(largeUpdateStatement)).thenReturn(largeUpdateResult); + try (JdbcStatement statement = new JdbcStatement(connection)) { + assertThat(statement.executeLargeUpdate(UPDATE)).isEqualTo(1); + assertThat(statement.executeLargeUpdate(LARGE_UPDATE)).isEqualTo(Integer.MAX_VALUE + 1L); + } + } + + @Test + public void testExecuteLargeUpdate() throws SQLException { + Statement statement = createStatement(); + assertThat(statement.executeLargeUpdate(UPDATE)).isEqualTo(1L); + assertThat(statement.executeLargeUpdate(LARGE_UPDATE)).isEqualTo(Integer.MAX_VALUE + 1L); + + assertThat(statement.executeLargeUpdate(UPDATE, Statement.NO_GENERATED_KEYS)).isEqualTo(1L); + assertThat(statement.executeLargeUpdate(LARGE_UPDATE, Statement.NO_GENERATED_KEYS)) + .isEqualTo(Integer.MAX_VALUE + 1L); + + assertThat(statement.executeLargeUpdate(UPDATE, new int[0])).isEqualTo(1L); + assertThat(statement.executeLargeUpdate(LARGE_UPDATE, new int[0])) + .isEqualTo(Integer.MAX_VALUE + 1L); + + assertThat(statement.executeLargeUpdate(UPDATE, new String[0])).isEqualTo(1L); + assertThat(statement.executeLargeUpdate(LARGE_UPDATE, new String[0])) + .isEqualTo(Integer.MAX_VALUE + 1L); + } + + @Test + public void testExecuteUpdateWithSelectStatement() throws SQLException { + try (Statement statement = createStatement()) { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> statement.executeUpdate(SELECT)); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException + .getMessage() + .contains("The statement is not a non-returning DML or DDL statement")); + } + } + + @Test + public void testExecuteUpdateWithDmlReturningStatement() throws SQLException { + try (Statement statement = createStatement()) { + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, () -> statement.executeUpdate(getDmlReturningSql())); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException + .getMessage() + .contains("The statement is not a non-returning DML or DDL statement")); + } + } + + @Test + public void testExecuteUpdateWithDdlStatement() throws SQLException { + Statement statement = createStatement(); + assertThat(statement.executeUpdate(DDL)).isEqualTo(0); + } + + @Test + public void testExecuteUpdateWithGeneratedKeys() throws SQLException { + try (Statement statement = createStatement()) { + assertEquals(1, statement.executeUpdate(UPDATE, Statement.NO_GENERATED_KEYS)); + ResultSet keys = statement.getGeneratedKeys(); + assertFalse(keys.next()); + + assertEquals(1, statement.executeUpdate(UPDATE, Statement.RETURN_GENERATED_KEYS)); + keys = statement.getGeneratedKeys(); + assertFalse(keys.next()); + } + } + + @Test + public void testMoreResults() throws SQLException { + Statement statement = createStatement(); + assertThat(statement.execute(SELECT)).isTrue(); + ResultSet rs = statement.getResultSet(); + assertThat(statement.getMoreResults()).isFalse(); + assertThat(statement.getResultSet()).isNull(); + assertThat(rs.isClosed()).isTrue(); + + assertThat(statement.execute(SELECT)).isTrue(); + rs = statement.getResultSet(); + assertThat(statement.getMoreResults(Statement.KEEP_CURRENT_RESULT)).isFalse(); + assertThat(statement.getResultSet()).isNull(); + assertThat(rs.isClosed()).isFalse(); + } + + @Test + public void testNoBatchMixing() throws SQLException { + try (Statement statement = createStatement()) { + statement.addBatch("INSERT INTO FOO (ID, NAME) VALUES (1, 'FOO')"); + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> + statement.addBatch( + "CREATE TABLE FOO (ID INT64, NAME STRING(100)) PRIMARY KEY (ID)")); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException + .getMessage() + .contains("Mixing DML and DDL statements in a batch is not allowed.")); + } + } + + @Test + public void testNoBatchQuery() throws SQLException { + try (Statement statement = createStatement()) { + JdbcSqlExceptionImpl sqlException = + assertThrows(JdbcSqlExceptionImpl.class, () -> statement.addBatch("SELECT * FROM FOO")); + assertEquals(Code.INVALID_ARGUMENT, sqlException.getCode()); + assertTrue( + sqlException.getMessage(), + sqlException + .getMessage() + .contains( + "The statement is not suitable for batching. Only DML and DDL statements are allowed for batching.")); + } + } + + @Test + public void testDmlBatch() throws SQLException { + try (Statement statement = createStatement()) { + // Verify that multiple batches can be executed on the same statement. + for (int i = 0; i < 2; i++) { + statement.addBatch("INSERT INTO FOO (ID, NAME) VALUES (1, 'TEST')"); + statement.addBatch("INSERT INTO FOO (ID, NAME) VALUES (2, 'TEST')"); + statement.addBatch("INSERT INTO FOO (ID, NAME) VALUES (3, 'TEST')"); + assertThat(statement.executeBatch()).asList().containsExactly(1, 1, 1); + } + } + } + + @Test + public void testDmlBatchWithDmlReturning() throws SQLException { + try (Statement statement = createStatement()) { + // Verify that multiple batches can be executed on the same statement. + for (int i = 0; i < 2; i++) { + statement.addBatch(getDmlReturningSql()); + statement.addBatch(getDmlReturningSql()); + statement.addBatch(getDmlReturningSql()); + assertArrayEquals(statement.executeBatch(), new int[] {1, 1, 1}); + } + } + } + + @Test + public void testLargeDmlBatch() throws SQLException { + try (Statement statement = createStatement()) { + // Verify that multiple batches can be executed on the same statement. + for (int i = 0; i < 2; i++) { + statement.addBatch("INSERT INTO FOO (ID, NAME) VALUES (1, 'TEST')"); + statement.addBatch("INSERT INTO FOO (ID, NAME) VALUES (2, 'TEST')"); + statement.addBatch("INSERT INTO FOO (ID, NAME) VALUES (3, 'TEST')"); + assertThat(statement.executeLargeBatch()).asList().containsExactly(1L, 1L, 1L); + } + } + } + + @Test + public void testConvertUpdateCounts() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + try (JdbcStatement statement = new JdbcStatement(connection)) { + int[] updateCounts = statement.convertUpdateCounts(new long[] {1L, 2L, 3L}); + assertThat(updateCounts).asList().containsExactly(1, 2, 3); + updateCounts = statement.convertUpdateCounts(new long[] {0L, 0L, 0L}); + assertThat(updateCounts).asList().containsExactly(0, 0, 0); + + JdbcSqlExceptionImpl sqlException = + assertThrows( + JdbcSqlExceptionImpl.class, + () -> statement.convertUpdateCounts(new long[] {1L, Integer.MAX_VALUE + 1L})); + assertEquals(Code.OUT_OF_RANGE, sqlException.getCode()); + } + } + + @Test + public void testConvertUpdateCountsToSuccessNoInfo() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + try (JdbcStatement statement = new JdbcStatement(connection)) { + long[] updateCounts = new long[3]; + statement.convertUpdateCountsToSuccessNoInfo(new long[] {1L, 2L, 3L}, updateCounts); + assertThat(updateCounts) + .asList() + .containsExactly( + (long) Statement.SUCCESS_NO_INFO, + (long) Statement.SUCCESS_NO_INFO, + (long) Statement.SUCCESS_NO_INFO); + + statement.convertUpdateCountsToSuccessNoInfo(new long[] {0L, 0L, 0L}, updateCounts); + assertThat(updateCounts) + .asList() + .containsExactly( + (long) Statement.EXECUTE_FAILED, + (long) Statement.EXECUTE_FAILED, + (long) Statement.EXECUTE_FAILED); + + statement.convertUpdateCountsToSuccessNoInfo(new long[] {1L, 0L, 2L}, updateCounts); + assertThat(updateCounts) + .asList() + .containsExactly( + (long) Statement.SUCCESS_NO_INFO, + (long) Statement.EXECUTE_FAILED, + (long) Statement.SUCCESS_NO_INFO); + + statement.convertUpdateCountsToSuccessNoInfo( + new long[] {1L, Integer.MAX_VALUE + 1L, 2L}, updateCounts); + assertThat(updateCounts) + .asList() + .containsExactly( + (long) Statement.SUCCESS_NO_INFO, + (long) Statement.SUCCESS_NO_INFO, + (long) Statement.SUCCESS_NO_INFO); + } + } + + @Test + public void testAddReturningToStatement() throws SQLException { + JdbcConnection connection = mock(JdbcConnection.class); + when(connection.getDialect()).thenReturn(dialect); + when(connection.getParser()).thenReturn(AbstractStatementParser.getInstance(dialect)); + try (JdbcStatement statement = new JdbcStatement(connection)) { + assertAddReturningSame( + statement, "insert into test (id, value) values (1, 'One')", NO_GENERATED_KEY_COLUMNS); + assertAddReturningSame( + statement, "insert into test (id, value) values (1, 'One')", ImmutableList.of()); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "insert into test (id, value) values (1, 'One')\nRETURNING \"id\"" + : "insert into test (id, value) values (1, 'One')\nTHEN RETURN `id`", + "insert into test (id, value) values (1, 'One')", + ImmutableList.of("id")); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "insert into test (id, value) values (1, 'One')\nRETURNING \"id\", \"value\"" + : "insert into test (id, value) values (1, 'One')\nTHEN RETURN `id`, `value`", + "insert into test (id, value) values (1, 'One')", + ImmutableList.of("id", "value")); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "insert into test (id, value) values (1, 'One')\nRETURNING *" + : "insert into test (id, value) values (1, 'One')\nTHEN RETURN *", + "insert into test (id, value) values (1, 'One')", + ImmutableList.of("*")); + // Requesting generated keys for a DML statement that already contains a returning clause is a + // no-op. + assertAddReturningSame( + statement, + "insert into test (id, value) values (1, 'One') " + + statement.getReturningClause() + + " value", + ImmutableList.of("id")); + // Requesting generated keys for a query is a no-op. + for (ImmutableList keys : + ImmutableList.of( + ImmutableList.of("id"), ImmutableList.of("id", "value"), ImmutableList.of("*"))) { + assertAddReturningSame(statement, "select id, value from test", keys); + } + + // Update statements may also request generated keys. + assertAddReturningSame( + statement, "update test set value='Two' where id=1", NO_GENERATED_KEY_COLUMNS); + assertAddReturningSame( + statement, "update test set value='Two' where id=1", ImmutableList.of()); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "update test set value='Two' where id=1\nRETURNING \"value\"" + : "update test set value='Two' where id=1\nTHEN RETURN `value`", + "update test set value='Two' where id=1", + ImmutableList.of("value")); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "update test set value='Two' where id=1\nRETURNING \"value\", \"id\"" + : "update test set value='Two' where id=1\nTHEN RETURN `value`, `id`", + "update test set value='Two' where id=1", + ImmutableList.of("value", "id")); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "update test set value='Two' where id=1\nRETURNING *" + : "update test set value='Two' where id=1\nTHEN RETURN *", + "update test set value='Two' where id=1", + ImmutableList.of("*")); + // Requesting generated keys for a DML statement that already contains a returning clause is a + // no-op. + assertAddReturningSame( + statement, + "update test set value='Two' where id=1 " + statement.getReturningClause() + " value", + ImmutableList.of("value")); + + // Delete statements may also request generated keys. + assertAddReturningSame(statement, "delete test where id=1", NO_GENERATED_KEY_COLUMNS); + assertAddReturningSame(statement, "delete test where id=1", ImmutableList.of()); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "delete test where id=1\nRETURNING \"value\"" + : "delete test where id=1\nTHEN RETURN `value`", + "delete test where id=1", + ImmutableList.of("value")); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "delete test where id=1\nRETURNING \"id\", \"value\"" + : "delete test where id=1\nTHEN RETURN `id`, `value`", + "delete test where id=1", + ImmutableList.of("id", "value")); + assertAddReturningEquals( + statement, + dialect == Dialect.POSTGRESQL + ? "delete test where id=1\nRETURNING *" + : "delete test where id=1\nTHEN RETURN *", + "delete test where id=1", + ImmutableList.of("*")); + // Requesting generated keys for a DML statement that already contains a returning clause is a + // no-op. + for (ImmutableList keys : + ImmutableList.of( + ImmutableList.of("id"), ImmutableList.of("id", "value"), ImmutableList.of("*"))) { + assertAddReturningSame( + statement, + "delete test where id=1 " + + (dialect == Dialect.POSTGRESQL + ? "delete test where id=1\nRETURNING" + : "delete test where id=1\nTHEN RETURN") + + " value", + keys); + } + + // Requesting generated keys for DDL is a no-op. + for (ImmutableList keys : + ImmutableList.of( + ImmutableList.of("id"), ImmutableList.of("id", "value"), ImmutableList.of("*"))) { + assertAddReturningSame( + statement, + dialect == Dialect.POSTGRESQL + ? "create table test (id bigint primary key, value varchar)" + : "create table test (id int64, value string(max)) primary key (id)", + keys); + } + } + } + + private void assertAddReturningSame( + JdbcStatement statement, String sql, @Nullable ImmutableList generatedKeysColumns) + throws SQLException { + com.google.cloud.spanner.Statement spannerStatement = + com.google.cloud.spanner.Statement.of(sql); + assertSame( + spannerStatement, + statement.addReturningToStatement(spannerStatement, generatedKeysColumns)); + } + + private void assertAddReturningEquals( + JdbcStatement statement, + String expectedSql, + String sql, + @Nullable ImmutableList generatedKeysColumns) + throws SQLException { + com.google.cloud.spanner.Statement spannerStatement = + com.google.cloud.spanner.Statement.of(sql); + assertEquals( + com.google.cloud.spanner.Statement.of(expectedSql), + statement.addReturningToStatement(spannerStatement, generatedKeysColumns)); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcStatementTimeoutTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcStatementTimeoutTest.java new file mode 100644 index 000000000000..7ce51d362dd2 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcStatementTimeoutTest.java @@ -0,0 +1,176 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; + +import com.google.cloud.spanner.MockSpannerServiceImpl.SimulatedExecutionTime; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlTimeoutException; +import com.google.rpc.Code; +import com.google.spanner.v1.ExecuteSqlRequest; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.time.Duration; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** Tests setting a statement timeout. */ +@RunWith(Parameterized.class) +public class JdbcStatementTimeoutTest extends AbstractMockServerTest { + + @Parameter public boolean useVirtualThreads; + + @Parameters(name = "useVirtualThreads = {0}") + public static Object[] data() { + return new Boolean[] {false, true}; + } + + @Override + protected String getBaseUrl() { + return super.getBaseUrl() + ";useVirtualThreads=" + this.useVirtualThreads; + } + + @After + public void resetExecutionTimes() { + mockSpanner.removeAllExecutionTimes(); + } + + @Test + public void testExecuteTimeout() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + try (Statement statement = connection.createStatement()) { + // First verify that execute does not time out by default. + assertFalse(statement.execute(INSERT_STATEMENT.getSql())); + int result = statement.getUpdateCount(); + assertEquals(1, result); + + // Simulate that executeSql takes 2 seconds and set a statement timeout of 1 second. + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(2000, 0)); + ((JdbcStatement) statement).setQueryTimeout(Duration.ofMillis(5L)); + assertThrows( + JdbcSqlTimeoutException.class, () -> statement.execute(INSERT_STATEMENT.getSql())); + } + } + } + + @Test + public void testExecuteQueryTimeout() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + try (Statement statement = connection.createStatement()) { + // First verify that executeQuery does not time out by default. + try (ResultSet resultSet = statement.executeQuery(SELECT_RANDOM_STATEMENT.getSql())) { + int count = 0; + while (resultSet.next()) { + count++; + } + assertEquals(RANDOM_RESULT_SET_ROW_COUNT, count); + } + + // Simulate that executeStreamingSql takes 2 seconds and set a statement timeout of 1 + // second. + mockSpanner.setExecuteStreamingSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(2000, 0)); + ((JdbcStatement) statement).setQueryTimeout(Duration.ofMillis(5L)); + assertThrows( + JdbcSqlTimeoutException.class, + () -> statement.executeQuery(SELECT_RANDOM_STATEMENT.getSql())); + } + } + } + + @Test + public void testExecuteUpdateTimeout() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + try (Statement statement = connection.createStatement()) { + // First verify that executeUpdate does not time out by default. + assertEquals(1, statement.executeUpdate(INSERT_STATEMENT.getSql())); + + // Simulate that executeSql takes 2 seconds and set a statement timeout of 1 second. + mockSpanner.setExecuteSqlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(2000, 0)); + ((JdbcStatement) statement).setQueryTimeout(Duration.ofMillis(5L)); + assertThrows( + JdbcSqlTimeoutException.class, + () -> statement.executeUpdate(INSERT_STATEMENT.getSql())); + } + } + } + + @Test + public void testExecuteBatchTimeout() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + try (Statement statement = connection.createStatement()) { + // First verify that batch dml does not time out by default. + statement.addBatch(INSERT_STATEMENT.getSql()); + int[] result = statement.executeBatch(); + assertArrayEquals(new int[] {1}, result); + + // Simulate that executeBatchDml takes 2 seconds and set a statement timeout of 1 second. + mockSpanner.setExecuteBatchDmlExecutionTime( + SimulatedExecutionTime.ofMinimumAndRandomTime(2000, 0)); + ((JdbcStatement) statement).setQueryTimeout(Duration.ofMillis(5L)); + statement.addBatch(INSERT_STATEMENT.getSql()); + assertThrows(JdbcSqlTimeoutException.class, statement::executeBatch); + } + } + } + + @Test + public void testCancel() throws Exception { + ExecutorService service = Executors.newSingleThreadExecutor(); + String sql = INSERT_STATEMENT.getSql(); + + try (java.sql.Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + mockSpanner.freeze(); + Future future = + service.submit( + () -> { + // Wait until the request has landed on the server and then cancel the statement. + mockSpanner.waitForRequestsToContain( + message -> + message instanceof ExecuteSqlRequest + && ((ExecuteSqlRequest) message).getSql().equals(sql), + 5000L); + statement.cancel(); + return null; + }); + JdbcSqlExceptionImpl exception = + assertThrows(JdbcSqlExceptionImpl.class, () -> statement.execute(sql)); + assertEquals(Code.CANCELLED, exception.getCode()); + assertNull(future.get()); + } finally { + mockSpanner.unfreeze(); + service.shutdown(); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTimeoutSqlTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTimeoutSqlTest.java new file mode 100644 index 000000000000..0ecb01f87e0d --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTimeoutSqlTest.java @@ -0,0 +1,52 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.jdbc.JdbcConnectionGeneratedSqlScriptTest.TestConnectionProvider; +import java.sql.Connection; +import java.sql.Statement; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +/** + * As JDBC connections store the statement timeout on {@link Statement} objects instead of on the + * {@link Connection}, the JDBC driver needs to set and reset the connection timeout on the + * underlying connection after each statement execution. JDBC also uses seconds as the time unit for + * timeouts, while the underlying {@link com.google.cloud.spanner.connection.Connection}s use + * milliseconds. This test script tests a number of special cases regarding this. + */ +@RunWith(Parameterized.class) +public class JdbcTimeoutSqlTest { + + @Parameter public Dialect dialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Test + public void testTimeoutScript() throws Exception { + JdbcSqlScriptVerifier verifier = new JdbcSqlScriptVerifier(new TestConnectionProvider(dialect)); + String prefix = dialect == Dialect.POSTGRESQL ? "PostgreSQL/" : ""; + verifier.verifyStatementsInFile(prefix + "TimeoutSqlScriptTest.sql", getClass(), false); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTransactionOptionsTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTransactionOptionsTest.java new file mode 100644 index 000000000000..8a5af27c0277 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTransactionOptionsTest.java @@ -0,0 +1,205 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.CommitResponse; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.TransactionOptions.IsolationLevel; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.time.Duration; +import java.util.Arrays; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcTransactionOptionsTest extends AbstractMockServerTest { + + @After + public void closeSpannerPool() { + SpannerPool.closeSpannerPool(); + } + + @Test + public void testDefaultReturnCommitStats() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RETURN_COMMIT_STATS")) { + assertTrue(rs.next()); + assertFalse(rs.getBoolean("RETURN_COMMIT_STATS")); + assertFalse(rs.next()); + } + } + } + + @Test + public void testReturnCommitStatsInConnectionUrl() throws SQLException { + try (java.sql.Connection connection = + DriverManager.getConnection( + String.format("jdbc:%s;returnCommitStats=true", getBaseUrl()))) { + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RETURN_COMMIT_STATS")) { + assertTrue(rs.next()); + assertTrue(rs.getBoolean("RETURN_COMMIT_STATS")); + assertFalse(rs.next()); + } + } + } + + @Test + public void testSetReturnCommitStats() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + connection.createStatement().execute("SET RETURN_COMMIT_STATS=true"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RETURN_COMMIT_STATS")) { + assertTrue(rs.next()); + assertTrue(rs.getBoolean("RETURN_COMMIT_STATS")); + assertFalse(rs.next()); + } + connection.createStatement().execute("SET RETURN_COMMIT_STATS=false"); + try (java.sql.ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE RETURN_COMMIT_STATS")) { + assertTrue(rs.next()); + assertFalse(rs.getBoolean("RETURN_COMMIT_STATS")); + assertFalse(rs.next()); + } + } + } + + @Test + public void testSetAndUseReturnCommitStats() throws SQLException { + try (CloudSpannerJdbcConnection connection = + createJdbcConnection().unwrap(CloudSpannerJdbcConnection.class)) { + connection.setReturnCommitStats(true); + connection.bufferedWrite(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + connection.commit(); + CommitResponse response = connection.getCommitResponse(); + assertNotNull(response); + assertNotNull(response.getCommitStats()); + assertThat(response.getCommitStats().getMutationCount()).isAtLeast(1); + } + } + + @Test + public void testSetAndUseReturnCommitStatsUsingSql() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + connection.createStatement().execute("SET RETURN_COMMIT_STATS=true"); + // Use a Mutation as the mock server only returns a non-zero mutation count for mutations, and + // not for DML statements. + connection + .unwrap(CloudSpannerJdbcConnection.class) + .bufferedWrite(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + connection.commit(); + try (ResultSet rs = + connection.createStatement().executeQuery("SHOW VARIABLE COMMIT_RESPONSE")) { + assertTrue(rs.next()); + assertNotNull(rs.getTimestamp("COMMIT_TIMESTAMP")); + assertThat(rs.getLong("MUTATION_COUNT")).isAtLeast(1L); + assertFalse(rs.next()); + } + } + } + + @Test + public void testMaxCommitDelay() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + connection.createStatement().execute("SET max_commit_delay='50ms'"); + connection + .unwrap(CloudSpannerJdbcConnection.class) + .bufferedWrite(Mutation.newInsertBuilder("FOO").set("ID").to(1L).build()); + connection.commit(); + + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + CommitRequest request = mockSpanner.getRequestsOfType(CommitRequest.class).get(0); + assertEquals(Duration.ofMillis(50).toNanos(), request.getMaxCommitDelay().getNanos()); + } + } + + @Test + public void testDefaultIsolationLevel() throws SQLException { + for (IsolationLevel isolationLevel : + Arrays.stream(IsolationLevel.values()) + .filter(level -> !level.equals(IsolationLevel.UNRECOGNIZED)) + .collect(Collectors.toList())) { + try (java.sql.Connection connection = + DriverManager.getConnection( + "jdbc:" + getBaseUrl() + ";default_isolation_level=" + isolationLevel.name())) { + connection.setAutoCommit(false); + try (ResultSet resultSet = + connection.createStatement().executeQuery(SELECT1_STATEMENT.getSql())) { + while (resultSet.next()) { + // ignore + } + } + connection.commit(); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals(isolationLevel, request.getTransaction().getBegin().getIsolationLevel()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + mockSpanner.clearRequests(); + } + } + } + + @Test + public void testSetIsolationLevel() throws SQLException { + try (java.sql.Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + for (int isolationLevel : + new int[] {Connection.TRANSACTION_REPEATABLE_READ, Connection.TRANSACTION_SERIALIZABLE}) { + connection.setTransactionIsolation(isolationLevel); + try (ResultSet resultSet = + connection.createStatement().executeQuery(SELECT1_STATEMENT.getSql())) { + while (resultSet.next()) { + // ignore + } + } + connection.commit(); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + assertEquals( + IsolationLevelConverter.convertToSpanner(isolationLevel), + request.getTransaction().getBegin().getIsolationLevel()); + assertEquals(1, mockSpanner.countRequestsOfType(CommitRequest.class)); + + mockSpanner.clearRequests(); + } + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTypeConverterTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTypeConverterTest.java new file mode 100644 index 000000000000..2d3727970561 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/JdbcTypeConverterTest.java @@ -0,0 +1,1047 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.convert; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.getAsSqlTimestamp; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.setTimestampInCalendar; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toGoogleBytes; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toGoogleDate; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toGoogleDates; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toGoogleTimestamp; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toJavaByteArrays; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toSqlDate; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toSqlDates; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toSqlTime; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toSqlTimestamp; +import static com.google.cloud.spanner.jdbc.JdbcTypeConverter.toSqlTimestamps; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +import com.google.cloud.ByteArray; +import com.google.cloud.spanner.Type; +import com.google.cloud.spanner.connection.ReadOnlyStalenessUtil; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.cloud.spanner.jdbc.it.SingerProto.Genre; +import com.google.cloud.spanner.jdbc.it.SingerProto.SingerInfo; +import com.google.rpc.Code; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.sql.Array; +import java.sql.Date; +import java.sql.SQLException; +import java.sql.Time; +import java.sql.Timestamp; +import java.text.DecimalFormat; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.List; +import java.util.TimeZone; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class JdbcTypeConverterTest { + private static final Charset UTF8 = StandardCharsets.UTF_8; + + @Test + public void testConvertArray() throws SQLException { + Array testValue = JdbcArray.createArray("INT64", new Long[] {1L, 2L, 3L}); + for (Type t : + new Type[] { + Type.bool(), + Type.bytes(), + Type.date(), + Type.float64(), + Type.int64(), + Type.string(), + Type.json(), + Type.timestamp(), + Type.numeric(), + }) { + assertConvertThrows(testValue, Type.array(t), Boolean.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), Float.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.array(t), BigDecimal.class, Code.INVALID_ARGUMENT); + + assertThat(convert(testValue, Type.array(t), Array.class)).isEqualTo(testValue); + assertThat(convert(testValue, Type.array(t), String.class)).isEqualTo("{1,2,3}"); + } + } + + @Test + public void testConvertBool() throws SQLException { + Boolean[] testValues = new Boolean[] {Boolean.TRUE, Boolean.FALSE}; + for (Boolean b : testValues) { + assertThat(convert(b, Type.bool(), Boolean.class)).isEqualTo(b); + assertThat(convert(b, Type.bool(), Byte.class)).isEqualTo(b ? (byte) 1 : (byte) 0); + assertThat(convert(b, Type.bool(), Short.class)).isEqualTo(b ? (short) 1 : (short) 0); + assertThat(convert(b, Type.bool(), Integer.class)).isEqualTo(b ? 1 : 0); + assertThat(convert(b, Type.bool(), Long.class)).isEqualTo(b ? 1L : 0L); + assertThat(convert(b, Type.bool(), Float.class)).isEqualTo(b ? 1F : 0F); + assertThat(convert(b, Type.bool(), Double.class)).isEqualTo(b ? 1D : 0D); + assertThat(convert(b, Type.bool(), BigInteger.class)) + .isEqualTo(b ? BigInteger.ONE : BigInteger.ZERO); + assertThat(convert(b, Type.bool(), BigDecimal.class)) + .isEqualTo(b ? BigDecimal.ONE : BigDecimal.ZERO); + assertThat(convert(b, Type.bool(), String.class)).isEqualTo(String.valueOf(b)); + } + } + + @Test + public void testConvertBytes() throws SQLException { + byte[] testValues = "test".getBytes(UTF8); + assertConvertThrows(testValues, Type.bytes(), Boolean.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), Float.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.bytes(), BigDecimal.class, Code.INVALID_ARGUMENT); + + assertThat(convert(testValues, Type.bytes(), byte[].class)).isEqualTo(testValues); + assertThat(convert(testValues, Type.bytes(), String.class)).isEqualTo("test"); + } + + @Test + public void testConvertProtoMessage() throws SQLException { + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + byte[] testValues = singerInfo.toByteArray(); + String protoTypeFqn = SingerInfo.getDescriptor().getFullName(); + assertConvertThrows(testValues, Type.proto(protoTypeFqn), Boolean.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.proto(protoTypeFqn), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.proto(protoTypeFqn), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.proto(protoTypeFqn), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.proto(protoTypeFqn), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.proto(protoTypeFqn), Float.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValues, Type.proto(protoTypeFqn), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.proto(protoTypeFqn), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.proto(protoTypeFqn), BigDecimal.class, Code.INVALID_ARGUMENT); + + assertEquals(testValues, convert(testValues, Type.proto(protoTypeFqn), byte[].class)); + assertEquals(singerInfo, convert(testValues, Type.proto(protoTypeFqn), SingerInfo.class)); + } + + @Test + public void testConvertProtoMessageArray() throws SQLException { + SingerInfo singerInfo = + SingerInfo.newBuilder().setSingerId(1).setNationality("Country1").build(); + Array testValues = + JdbcArray.createArray("PROTO", new byte[][] {singerInfo.toByteArray(), null}); + String protoTypeFqn = SingerInfo.getDescriptor().getFullName(); + assertConvertThrows( + testValues, Type.array(Type.proto(protoTypeFqn)), Boolean[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.proto(protoTypeFqn)), Byte[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.proto(protoTypeFqn)), Short[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.proto(protoTypeFqn)), Integer[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.proto(protoTypeFqn)), Long[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.proto(protoTypeFqn)), Float[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.proto(protoTypeFqn)), Double[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, + Type.array(Type.proto(protoTypeFqn)), + BigInteger[].class, + Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, + Type.array(Type.proto(protoTypeFqn)), + BigDecimal[].class, + Code.INVALID_ARGUMENT); + + assertEquals( + testValues, convert(testValues, Type.array(Type.proto(protoTypeFqn)), Array.class)); + assertArrayEquals( + new SingerInfo[] {singerInfo, null}, + (Object[]) convert(testValues, Type.array(Type.proto(protoTypeFqn)), SingerInfo[].class)); + } + + @Test + public void testConvertProtoEnumArray() throws SQLException { + Array testValues = + JdbcArray.createArray("ENUM", new Long[] {(long) Genre.ROCK.getNumber(), null}); + String protoTypeFqn = Genre.getDescriptor().getFullName(); + assertConvertThrows( + testValues, + Type.array(Type.protoEnum(protoTypeFqn)), + Boolean[].class, + Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.protoEnum(protoTypeFqn)), Byte[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.protoEnum(protoTypeFqn)), Short[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, + Type.array(Type.protoEnum(protoTypeFqn)), + Integer[].class, + Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.protoEnum(protoTypeFqn)), Long[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, Type.array(Type.protoEnum(protoTypeFqn)), Float[].class, Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, + Type.array(Type.protoEnum(protoTypeFqn)), + Double[].class, + Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, + Type.array(Type.protoEnum(protoTypeFqn)), + BigInteger[].class, + Code.INVALID_ARGUMENT); + assertConvertThrows( + testValues, + Type.array(Type.protoEnum(protoTypeFqn)), + BigDecimal[].class, + Code.INVALID_ARGUMENT); + + assertEquals( + testValues, convert(testValues, Type.array(Type.protoEnum(protoTypeFqn)), Array.class)); + assertArrayEquals( + new Genre[] {Genre.ROCK, null}, + (Object[]) convert(testValues, Type.array(Type.protoEnum(protoTypeFqn)), Genre[].class)); + } + + @Test + public void testConvertProtoEnum() throws SQLException { + long testValue = Genre.ROCK.getNumber(); + String protoTypeFqn = Genre.getDescriptor().getFullName(); + + assertEquals((int) testValue, convert(testValue, Type.protoEnum(protoTypeFqn), Integer.class)); + assertEquals(testValue, convert(testValue, Type.protoEnum(protoTypeFqn), Long.class)); + assertEquals(Genre.ROCK, convert(testValue, Type.protoEnum(protoTypeFqn), Genre.class)); + } + + @Test + public void testConvertJson() throws SQLException { + String testValue = "{\"test\": foo}"; + assertConvertThrows(testValue, Type.json(), Boolean.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), Float.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.json(), BigDecimal.class, Code.INVALID_ARGUMENT); + + assertThat(convert(testValue, Type.json(), byte[].class)).isEqualTo(testValue.getBytes(UTF8)); + assertThat(convert(testValue, Type.json(), String.class)).isEqualTo(testValue); + } + + private TimeZone[] getTestTimeZones() { + return new TimeZone[] { + TimeZone.getTimeZone("GMT-12:00"), + TimeZone.getTimeZone("GMT-9:00"), + TimeZone.getTimeZone("GMT-1:00"), + TimeZone.getTimeZone("GMT"), + TimeZone.getTimeZone("GMT+1:00"), + TimeZone.getTimeZone("GMT+12:00") + }; + } + + @Test + public void testConvertDate() throws SQLException { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + TimeZone.setDefault(zone); + @SuppressWarnings("deprecation") + Date testValue = new Date(2019 - 1900, 7, 24); + assertConvertThrows(testValue, Type.date(), Boolean.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), Float.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.date(), BigDecimal.class, Code.INVALID_ARGUMENT); + + assertThat(convert(testValue, Type.date(), Date.class)).isEqualTo(testValue); + assertThat(convert(testValue, Type.date(), String.class)).isEqualTo("2019-08-24"); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @Test + public void testConvertTimestamp() throws SQLException { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + TimeZone.setDefault(zone); + @SuppressWarnings("deprecation") + Timestamp testValue = new Timestamp(2019 - 1900, 7, 24, 7, 20, 19, 123456789); + assertConvertThrows(testValue, Type.timestamp(), Boolean.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), Float.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.timestamp(), BigDecimal.class, Code.INVALID_ARGUMENT); + + assertThat(convert(testValue, Type.timestamp(), Timestamp.class)).isEqualTo(testValue); + int offset = zone.getOffset(testValue.getTime()); + int offsetHours = offset / (60_000 * 60); + DecimalFormat fmt = new DecimalFormat("+##00;-#"); + String offsetString = offset == 0 ? "Z" : fmt.format(offsetHours) + ":00"; + assertThat(convert(testValue, Type.timestamp(), String.class)) + .isEqualTo("2019-08-24T07:20:19.123456789" + offsetString); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @Test + public void testConvertString() throws SQLException { + String testValue = "test"; + assertConvertThrows(testValue, Type.string(), Boolean.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), Float.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows(testValue, Type.string(), BigDecimal.class, Code.INVALID_ARGUMENT); + + assertThat(convert(testValue, Type.string(), String.class)).isEqualTo(testValue); + assertThat(convert(testValue, Type.string(), byte[].class)).isEqualTo(testValue.getBytes(UTF8)); + } + + @Test + public void testConvertInt64() throws SQLException { + Long[] testValues = + new Long[] { + 0L, + -1L, + 1L, + Long.MIN_VALUE, + Long.MAX_VALUE, + (long) Integer.MIN_VALUE, + (long) Integer.MAX_VALUE, + ((long) Integer.MIN_VALUE - 1), + ((long) Integer.MAX_VALUE + 1), + (long) Short.MIN_VALUE, + (long) Short.MAX_VALUE, + (long) (Short.MIN_VALUE - 1), + (long) (Short.MAX_VALUE + 1), + (long) Byte.MIN_VALUE, + (long) Byte.MAX_VALUE, + (long) (Byte.MIN_VALUE - 1), + (long) (Byte.MAX_VALUE + 1) + }; + testConvertInt64ToNumber(testValues, Long.class, Long.MIN_VALUE, Long.MAX_VALUE); + testConvertInt64ToNumber(testValues, Integer.class, Integer.MIN_VALUE, Integer.MAX_VALUE); + testConvertInt64ToNumber(testValues, Short.class, Short.MIN_VALUE, Short.MAX_VALUE); + testConvertInt64ToNumber(testValues, Byte.class, Byte.MIN_VALUE, Byte.MAX_VALUE); + testConvertInt64ToNumber(testValues, BigInteger.class, Long.MIN_VALUE, Long.MAX_VALUE); + testConvertInt64ToNumber(testValues, BigDecimal.class, Long.MIN_VALUE, Long.MAX_VALUE); + + for (Long l : testValues) { + assertThat(convert(l, Type.int64(), String.class)).isEqualTo(String.valueOf(l)); + assertThat(convert(l, Type.int64(), Boolean.class)).isEqualTo(l != 0L); + assertConvertThrows(l, Type.int64(), Double.class, Code.INVALID_ARGUMENT); + assertConvertThrows(l, Type.int64(), Float.class, Code.INVALID_ARGUMENT); + } + } + + private void testConvertInt64ToNumber( + Long[] testValues, Class targetType, Number minValue, Number maxValue) + throws SQLException { + for (Long t : testValues) { + if (t < minValue.longValue() || t > maxValue.longValue()) { + assertConvertThrows(t, Type.int64(), targetType, Code.OUT_OF_RANGE); + } else { + assertThat(((Number) convert(t, Type.int64(), targetType)).longValue()).isEqualTo(t); + assertThat(convert(t, Type.int64(), targetType)).isInstanceOf(targetType); + } + } + } + + @Test + public void testConvertFloat64() throws SQLException { + Double[] testValues = + new Double[] { + 0D, + -1D, + 1D, + Double.MIN_VALUE, + Double.MAX_VALUE, + (double) Float.MIN_VALUE, + (double) Float.MAX_VALUE, + Float.MAX_VALUE + 1D + }; + for (Double d : testValues) { + assertThat(convert(d, Type.float64(), Double.class)).isEqualTo(d); + if (Math.abs(d) > Math.abs(Float.MAX_VALUE)) { + assertConvertThrows(d, Type.float64(), Float.class, Code.OUT_OF_RANGE); + } else { + assertThat(convert(d, Type.float64(), Float.class)).isEqualTo(d.floatValue()); + } + assertThat(convert(d, Type.float64(), String.class)).isEqualTo(String.valueOf(d)); + assertThat(convert(d, Type.float64(), Boolean.class)).isEqualTo(d != 0D); + assertConvertThrows(d, Type.float64(), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows(d, Type.float64(), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows(d, Type.float64(), Short.class, Code.INVALID_ARGUMENT); + assertConvertThrows(d, Type.float64(), Byte.class, Code.INVALID_ARGUMENT); + assertConvertThrows(d, Type.float64(), BigInteger.class, Code.INVALID_ARGUMENT); + assertConvertThrows(d, Type.float64(), BigDecimal.class, Code.INVALID_ARGUMENT); + } + } + + @Test + public void testConvertNumeric() throws SQLException { + BigDecimal[] testValues = + new BigDecimal[] { + BigDecimal.ZERO, + BigDecimal.ONE.negate(), + BigDecimal.ONE, + BigDecimal.valueOf(Double.MIN_VALUE), + BigDecimal.valueOf(Double.MAX_VALUE), + BigDecimal.valueOf(Float.MIN_VALUE), + BigDecimal.valueOf(Float.MAX_VALUE), + BigDecimal.valueOf(Float.MAX_VALUE + 1D) + }; + for (BigDecimal d : testValues) { + assertThat(convert(d, Type.numeric(), BigDecimal.class)).isEqualTo(d); + assertThat(convert(d, Type.numeric(), Double.class)).isEqualTo(d.doubleValue()); + assertThat(convert(d, Type.numeric(), Float.class)).isEqualTo(d.floatValue()); + assertThat(convert(d, Type.numeric(), String.class)).isEqualTo(String.valueOf(d)); + assertThat(convert(d, Type.numeric(), Boolean.class)).isEqualTo(!d.equals(BigDecimal.ZERO)); + if (d.compareTo(BigDecimal.valueOf(Long.MAX_VALUE)) > 0 + || d.compareTo(BigDecimal.valueOf(Long.MIN_VALUE)) < 0) { + assertConvertThrows(d, Type.numeric(), Long.class, Code.OUT_OF_RANGE); + } else { + assertThat(convert(d, Type.numeric(), Long.class)).isEqualTo(d.longValue()); + } + if (d.compareTo(BigDecimal.valueOf(Integer.MAX_VALUE)) > 0 + || d.compareTo(BigDecimal.valueOf(Integer.MIN_VALUE)) < 0) { + assertConvertThrows(d, Type.numeric(), Integer.class, Code.OUT_OF_RANGE); + } else { + assertThat(convert(d, Type.numeric(), Integer.class)).isEqualTo(d.intValue()); + } + } + } + + @Test + public void testConvertPgNumeric() throws SQLException { + BigDecimal[] testValues = + new BigDecimal[] { + BigDecimal.ZERO, + BigDecimal.ONE.negate(), + BigDecimal.ONE, + BigDecimal.valueOf(Double.MIN_VALUE), + BigDecimal.valueOf(Double.MAX_VALUE), + BigDecimal.valueOf(Float.MIN_VALUE), + BigDecimal.valueOf(Float.MAX_VALUE), + BigDecimal.valueOf(Float.MAX_VALUE + 1D) + }; + for (BigDecimal d : testValues) { + String strVal = String.valueOf(d); + assertThat(convert(strVal, Type.pgNumeric(), BigDecimal.class)).isEqualTo(d); + assertThat(convert(strVal, Type.pgNumeric(), Double.class)).isEqualTo(d.doubleValue()); + assertThat(convert(strVal, Type.pgNumeric(), Float.class)).isEqualTo(d.floatValue()); + assertThat(convert(strVal, Type.pgNumeric(), String.class)).isEqualTo(strVal); + assertThat(convert(strVal, Type.pgNumeric(), Boolean.class)) + .isEqualTo(!d.equals(BigDecimal.ZERO)); + if (d.compareTo(BigDecimal.valueOf(Long.MAX_VALUE)) > 0 + || d.compareTo(BigDecimal.valueOf(Long.MIN_VALUE)) < 0) { + assertConvertThrows(strVal, Type.pgNumeric(), Long.class, Code.OUT_OF_RANGE); + } else { + assertThat(convert(strVal, Type.pgNumeric(), Long.class)).isEqualTo(d.longValue()); + } + if (d.compareTo(BigDecimal.valueOf(Integer.MAX_VALUE)) > 0 + || d.compareTo(BigDecimal.valueOf(Integer.MIN_VALUE)) < 0) { + assertConvertThrows(strVal, Type.pgNumeric(), Integer.class, Code.OUT_OF_RANGE); + } else { + assertThat(convert(strVal, Type.pgNumeric(), Integer.class)).isEqualTo(d.intValue()); + } + } + + assertThat(convert("NaN", Type.pgNumeric(), Float.class)).isEqualTo(Float.NaN); + assertThat(convert("NaN", Type.pgNumeric(), Double.class)).isEqualTo(Double.NaN); + assertThat(convert("NaN", Type.pgNumeric(), String.class)).isEqualTo("NaN"); + assertConvertThrows("NaN", Type.pgNumeric(), Long.class, Code.INVALID_ARGUMENT); + assertConvertThrows("NaN", Type.pgNumeric(), Integer.class, Code.INVALID_ARGUMENT); + assertConvertThrows("NaN", Type.pgNumeric(), BigDecimal.class, Code.INVALID_ARGUMENT); + } + + private void assertConvertThrows(Object t, Type type, Class destinationType, Code code) + throws SQLException { + try { + convert(t, type, destinationType); + fail("missing conversion exception for " + t); + } catch (JdbcSqlExceptionImpl e) { + assertThat(e.getCode()).isEqualTo(code); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testToGoogleDate() { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + TimeZone.setDefault(zone); + assertThat(toGoogleDate(new Date(2019 - 1900, 7, 24))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2019, 8, 24)); + assertThat(toGoogleDate(new Date(2019 - 1900, 0, 1))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2019, 1, 1)); + assertThat(toGoogleDate(new Date(2019 - 1900, 11, 31))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2019, 12, 31)); + assertThat(toGoogleDate(new Date(2016 - 1900, 1, 29))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2016, 2, 29)); + assertThat(toGoogleDate(new Date(2000 - 1900, 1, 29))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2000, 2, 29)); + + assertThat(toGoogleDate(new Time(12, 0, 0))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1)); + assertThat(toGoogleDate(new Time(0, 0, 0))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1)); + assertThat(toGoogleDate(new Time(23, 59, 59))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(1970, 1, 1)); + + assertThat(toGoogleDate(new Timestamp(2019 - 1900, 7, 24, 8, 51, 21, 987))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2019, 8, 24)); + assertThat(toGoogleDate(new Timestamp(2019 - 1900, 0, 1, 0, 0, 0, 0))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2019, 1, 1)); + assertThat(toGoogleDate(new Timestamp(2019 - 1900, 11, 31, 23, 59, 59, 100))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2019, 12, 31)); + assertThat(toGoogleDate(new Timestamp(2016 - 1900, 1, 29, 23, 59, 59, 0))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2016, 2, 29)); + assertThat(toGoogleDate(new Timestamp(2000 - 1900, 1, 29, 0, 0, 0, 0))) + .isEqualTo(com.google.cloud.Date.fromYearMonthDay(2000, 2, 29)); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @Test + public void testToGoogleDates() { + @SuppressWarnings("deprecation") + Date[] dates = + new Date[] { + new Date(2019 - 1900, 7, 24), + new Date(2019 - 1900, 0, 1), + new Date(2019 - 1900, 11, 31), + new Date(2016 - 1900, 1, 29), + new Date(2000 - 1900, 1, 29) + }; + List expected = + Arrays.asList( + com.google.cloud.Date.fromYearMonthDay(2019, 8, 24), + com.google.cloud.Date.fromYearMonthDay(2019, 1, 1), + com.google.cloud.Date.fromYearMonthDay(2019, 12, 31), + com.google.cloud.Date.fromYearMonthDay(2016, 2, 29), + com.google.cloud.Date.fromYearMonthDay(2000, 2, 29)); + assertThat(toGoogleDates(dates)).isEqualTo(expected); + } + + @SuppressWarnings("deprecation") + @Test + public void testToSqlDate() { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + TimeZone.setDefault(zone); + assertThat(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2019, 8, 24))) + .isEqualTo(new Date(2019 - 1900, 7, 24)); + assertThat(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2019, 1, 1))) + .isEqualTo(new Date(2019 - 1900, 0, 1)); + assertThat(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2019, 12, 31))) + .isEqualTo(new Date(2019 - 1900, 11, 31)); + assertThat(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2016, 2, 29))) + .isEqualTo(new Date(2016 - 1900, 1, 29)); + assertThat(toSqlDate(com.google.cloud.Date.fromYearMonthDay(2000, 2, 29))) + .isEqualTo(new Date(2000 - 1900, 1, 29)); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @Test + public void testToSqlDateWithCalendar() { + for (TimeZone zone : getTestTimeZones()) { + Calendar cal = Calendar.getInstance(zone); + cal.set(2019, Calendar.AUGUST, 24, 0, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + assertThat( + toSqlDate( + com.google.cloud.Date.fromYearMonthDay(2019, 8, 24), Calendar.getInstance(zone))) + .isEqualTo(new Date(cal.getTimeInMillis())); + + cal.set(2019, Calendar.JANUARY, 1, 0, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + assertThat( + toSqlDate( + com.google.cloud.Date.fromYearMonthDay(2019, 1, 1), Calendar.getInstance(zone))) + .isEqualTo(new Date(cal.getTimeInMillis())); + + cal.set(2019, Calendar.DECEMBER, 31, 0, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + assertThat( + toSqlDate( + com.google.cloud.Date.fromYearMonthDay(2019, 12, 31), Calendar.getInstance(zone))) + .isEqualTo(new Date(cal.getTimeInMillis())); + + cal.set(2016, Calendar.FEBRUARY, 29, 0, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + assertThat( + toSqlDate( + com.google.cloud.Date.fromYearMonthDay(2016, 2, 29), Calendar.getInstance(zone))) + .isEqualTo(new Date(cal.getTimeInMillis())); + + cal.set(2000, Calendar.FEBRUARY, 29, 0, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + assertThat( + toSqlDate( + com.google.cloud.Date.fromYearMonthDay(2000, 2, 29), Calendar.getInstance(zone))) + .isEqualTo(new Date(cal.getTimeInMillis())); + } + } + + @Test + public void testToSqlDates() { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + TimeZone.setDefault(zone); + List input = + Arrays.asList( + com.google.cloud.Date.fromYearMonthDay(2019, 8, 24), + com.google.cloud.Date.fromYearMonthDay(2019, 1, 1), + com.google.cloud.Date.fromYearMonthDay(2019, 12, 31), + com.google.cloud.Date.fromYearMonthDay(2016, 2, 29), + com.google.cloud.Date.fromYearMonthDay(2000, 2, 29)); + @SuppressWarnings("deprecation") + List expected = + Arrays.asList( + new Date(2019 - 1900, 7, 24), + new Date(2019 - 1900, 0, 1), + new Date(2019 - 1900, 11, 31), + new Date(2016 - 1900, 1, 29), + new Date(2000 - 1900, 1, 29)); + assertThat(toSqlDates(input)).isEqualTo(expected); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testToSqlTimestamp() { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + List sqlTimestamps = new ArrayList<>(); + List gTimestamps = new ArrayList<>(); + TimeZone.setDefault(zone); + + // Create a timestamp in the current default timezone, but do not set the nanosecond value + // yet, as it would be lost by the ts.getTime() call on the next line. + Timestamp ts = new Timestamp(2019 - 1900, 7, 24, 11 - 2, 23, 1, 0); + ts.setTime(ts.getTime() + zone.getRawOffset()); + ts.setNanos(199800000); + com.google.cloud.Timestamp gts = + ReadOnlyStalenessUtil.parseRfc3339("2019-08-24T11:23:01.1998+02:00"); + assertThat(toSqlTimestamp(gts)).isEqualTo(ts); + sqlTimestamps.add(ts); + gTimestamps.add(gts); + + ts = new Timestamp(2019 - 1900, 11, 31, 23, 59, 59, 0); + ts.setTime(ts.getTime() + zone.getRawOffset()); + ts.setNanos(999999999); + gts = ReadOnlyStalenessUtil.parseRfc3339("2019-12-31T23:59:59.999999999Z"); + assertThat(toSqlTimestamp(gts)).isEqualTo(ts); + sqlTimestamps.add(ts); + gTimestamps.add(gts); + + ts = new Timestamp(2016 - 1900, 1, 29, 12 + 2, 0, 1, 0); + ts.setTime(ts.getTime() + zone.getRawOffset()); + ts.setNanos(1000); + gts = ReadOnlyStalenessUtil.parseRfc3339("2016-02-29T12:00:01.000001000-02:00"); + assertThat(toSqlTimestamp(gts)).isEqualTo(ts); + sqlTimestamps.add(ts); + gTimestamps.add(gts); + + ts = new Timestamp(2000 - 1900, 1, 29, 0, 0, 0, 0); + ts.setTime(ts.getTime() + zone.getRawOffset()); + ts.setNanos(100000000); + gts = ReadOnlyStalenessUtil.parseRfc3339("2000-02-29T00:00:00.100000000Z"); + assertThat(toSqlTimestamp(gts)).isEqualTo(ts); + sqlTimestamps.add(ts); + gTimestamps.add(gts); + + assertThat(toSqlTimestamps(gTimestamps)).isEqualTo(sqlTimestamps); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @Test + public void testGetAsSqlTimestamp() { + for (TimeZone zone : getTestTimeZones()) { + com.google.cloud.Timestamp gts = + ReadOnlyStalenessUtil.parseRfc3339("2019-08-24T11:23:01.1998+03:00"); + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("GMT+03:00")); + cal.set(2019, Calendar.AUGUST, 24, 11, 23, 1); + cal.set(Calendar.MILLISECOND, 0); + Timestamp ts = new Timestamp(cal.getTimeInMillis() + zone.getRawOffset()); + ts.setNanos(199800000); + assertThat(getAsSqlTimestamp(gts, Calendar.getInstance(zone))).isEqualTo(ts); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2019-12-31T23:59:59.999999999-03:00"); + cal = Calendar.getInstance(TimeZone.getTimeZone("GMT-03:00")); + cal.set(2019, Calendar.DECEMBER, 31, 23, 59, 59); + cal.set(Calendar.MILLISECOND, 0); + ts = new Timestamp(cal.getTimeInMillis() + zone.getRawOffset()); + ts.setNanos(999999999); + assertThat(getAsSqlTimestamp(gts, Calendar.getInstance(zone))).isEqualTo(ts); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2016-02-29T12:00:00Z"); + cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + cal.set(2016, Calendar.FEBRUARY, 29, 12, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + ts = new Timestamp(cal.getTimeInMillis() + zone.getRawOffset()); + assertThat(getAsSqlTimestamp(gts, Calendar.getInstance(zone))).isEqualTo(ts); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2000-02-29T00:00:00.000000000-10:00"); + cal = Calendar.getInstance(TimeZone.getTimeZone("GMT-10:00")); + cal.set(2000, Calendar.FEBRUARY, 29, 0, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + ts = new Timestamp(cal.getTimeInMillis() + zone.getRawOffset()); + assertThat(getAsSqlTimestamp(gts, Calendar.getInstance(zone))).isEqualTo(ts); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testSetTimestampInCalendar() { + for (TimeZone zone : getTestTimeZones()) { + Calendar cal = Calendar.getInstance(zone); + cal.set(2019, Calendar.AUGUST, 24, 11, 23, 1); + cal.set(Calendar.MILLISECOND, 0); + Timestamp ts = new Timestamp(2019 - 1900, 7, 24, 11, 23, 1, 0); + Timestamp tsInCal = setTimestampInCalendar(ts, Calendar.getInstance(zone)); + assertThat(tsInCal.getTime()) + .isEqualTo(cal.getTimeInMillis() - TimeZone.getDefault().getOffset(ts.getTime())); + + cal = Calendar.getInstance(zone); + cal.set(2019, Calendar.DECEMBER, 31, 23, 59, 59); + cal.set(Calendar.MILLISECOND, 999); + ts = new Timestamp(2019 - 1900, 11, 31, 23, 59, 59, 999000000); + tsInCal = setTimestampInCalendar(ts, Calendar.getInstance(zone)); + assertThat(tsInCal.getTime()) + .isEqualTo(cal.getTimeInMillis() - TimeZone.getDefault().getOffset(ts.getTime())); + + cal = Calendar.getInstance(zone); + cal.set(2016, Calendar.FEBRUARY, 29, 12, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + ts = new Timestamp(2016 - 1900, 1, 29, 12, 0, 0, 0); + tsInCal = setTimestampInCalendar(ts, Calendar.getInstance(zone)); + assertThat(tsInCal.getTime()) + .isEqualTo(cal.getTimeInMillis() - TimeZone.getDefault().getOffset(ts.getTime())); + + cal = Calendar.getInstance(zone); + cal.set(2000, Calendar.FEBRUARY, 29, 0, 0, 0); + cal.set(Calendar.MILLISECOND, 0); + ts = new Timestamp(2000 - 1900, 1, 29, 0, 0, 0, 0); + tsInCal = setTimestampInCalendar(ts, Calendar.getInstance(zone)); + assertThat(tsInCal.getTime()) + .isEqualTo(cal.getTimeInMillis() - TimeZone.getDefault().getOffset(ts.getTime())); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testToGoogleTimestamp() { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + TimeZone.setDefault(zone); + assertThat(toGoogleTimestamp(new Date(2019 - 1900, 7, 24))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2019 - 1900, 7, 24, 0, 0, 0, 0))); + assertThat(toGoogleTimestamp(new Date(2019 - 1900, 0, 1))) + .isEqualTo(com.google.cloud.Timestamp.of(new Timestamp(2019 - 1900, 0, 1, 0, 0, 0, 0))); + assertThat(toGoogleTimestamp(new Date(2019 - 1900, 11, 31))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2019 - 1900, 11, 31, 0, 0, 0, 0))); + assertThat(toGoogleTimestamp(new Date(2016 - 1900, 1, 29))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2016 - 1900, 1, 29, 0, 0, 0, 0))); + assertThat(toGoogleTimestamp(new Date(2000 - 1900, 1, 29))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2000 - 1900, 1, 29, 0, 0, 0, 0))); + + assertThat(toGoogleTimestamp(new Time(12, 0, 0))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(1970 - 1900, 0, 1, 12, 0, 0, 0))); + assertThat(toGoogleTimestamp(new Time(0, 0, 0))) + .isEqualTo(com.google.cloud.Timestamp.of(new Timestamp(1970 - 1900, 0, 1, 0, 0, 0, 0))); + assertThat(toGoogleTimestamp(new Time(23, 59, 59))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(1970 - 1900, 0, 1, 23, 59, 59, 0))); + + assertThat(toGoogleTimestamp(new Timestamp(2019 - 1900, 7, 24, 8, 51, 21, 987))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2019 - 1900, 7, 24, 8, 51, 21, 987))); + assertThat(toGoogleTimestamp(new Timestamp(2019 - 1900, 0, 1, 0, 0, 0, 0))) + .isEqualTo(com.google.cloud.Timestamp.of(new Timestamp(2019 - 1900, 0, 1, 0, 0, 0, 0))); + assertThat(toGoogleTimestamp(new Timestamp(2019 - 1900, 11, 31, 23, 59, 59, 100))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2019 - 1900, 11, 31, 23, 59, 59, 100))); + assertThat(toGoogleTimestamp(new Timestamp(2016 - 1900, 1, 29, 23, 59, 59, 0))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2016 - 1900, 1, 29, 23, 59, 59, 0))); + assertThat(toGoogleTimestamp(new Timestamp(2000 - 1900, 1, 29, 0, 0, 0, 0))) + .isEqualTo( + com.google.cloud.Timestamp.of(new Timestamp(2000 - 1900, 1, 29, 0, 0, 0, 0))); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testToSqlTime() { + TimeZone initialDefault = TimeZone.getDefault(); + try { + for (TimeZone zone : getTestTimeZones()) { + TimeZone.setDefault(zone); + + com.google.cloud.Timestamp gts = + ReadOnlyStalenessUtil.parseRfc3339("2019-08-24T11:23:01.1998+03:00"); + // Subtract 3 hours to compensate for +03 timezone. + Time time = new Time(11 - 3, 23, 1); + time.setTime(time.getTime() + zone.getRawOffset()); + Time convertedTime = toSqlTime(gts); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2019-12-31T23:59:59.999999999Z"); + time = new Time(23, 59, 59); + time.setTime(time.getTime() + zone.getRawOffset()); + convertedTime = toSqlTime(gts); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2016-02-29T12:00:01.000001000-02:00"); + time = new Time(12 + 2, 0, 1); + time.setTime(time.getTime() + zone.getRawOffset()); + convertedTime = toSqlTime(gts); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2000-02-29T00:00:00.100000000Z"); + time = new Time(0, 0, 0); + time.setTime(time.getTime() + zone.getRawOffset()); + convertedTime = toSqlTime(gts); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + } + } finally { + TimeZone.setDefault(initialDefault); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testToSqlTimeWithCalendar() { + for (TimeZone zone : getTestTimeZones()) { + com.google.cloud.Timestamp gts = + ReadOnlyStalenessUtil.parseRfc3339("2019-08-24T11:23:01.1998+03:00"); + // Compensate for +03 + Time time = new Time(11 - 3, 23, 1); + // Compensate for the test timezone. + time.setHours(time.getHours() + ((int) (zone.getRawOffset() / 60_000L / 60))); + // Compensate for the timezone of the environment on the parsed date. + time.setHours( + time.getHours() + + ((int) + (TimeZone.getDefault().getOffset(gts.toSqlTimestamp().getTime()) + / 60_000L + / 60))); + Time convertedTime = toSqlTime(gts, Calendar.getInstance(zone)); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2019-12-31T23:59:59.999999999Z"); + time = new Time(23, 59, 59); + time.setHours(time.getHours() + ((int) (zone.getRawOffset() / 60_000L / 60))); + time.setHours( + time.getHours() + + ((int) + (TimeZone.getDefault().getOffset(gts.toSqlTimestamp().getTime()) + / 60_000L + / 60))); + convertedTime = toSqlTime(gts, Calendar.getInstance(zone)); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2016-02-29T12:00:01.000001000-02:00"); + time = new Time(12 + 2, 0, 1); + time.setHours(time.getHours() + ((int) (zone.getRawOffset() / 60_000L / 60))); + time.setHours( + time.getHours() + + ((int) + (TimeZone.getDefault().getOffset(gts.toSqlTimestamp().getTime()) + / 60_000L + / 60))); + convertedTime = toSqlTime(gts, Calendar.getInstance(zone)); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + + gts = ReadOnlyStalenessUtil.parseRfc3339("2000-02-29T00:00:00.100000000Z"); + time = new Time(0, 0, 0); + time.setHours(time.getHours() + ((int) (zone.getRawOffset() / 60_000L / 60))); + time.setHours( + time.getHours() + + ((int) + (TimeZone.getDefault().getOffset(gts.toSqlTimestamp().getTime()) + / 60_000L + / 60))); + convertedTime = toSqlTime(gts, Calendar.getInstance(zone)); + assertThat(convertedTime.getHours()).isEqualTo(time.getHours()); + assertThat(convertedTime.getMinutes()).isEqualTo(time.getMinutes()); + assertThat(convertedTime.getSeconds()).isEqualTo(time.getSeconds()); + } + } + + @Test + public void testToGoogleBytes() { + assertThat(toGoogleBytes(new byte[][] {"test1".getBytes(UTF8), "test2".getBytes(UTF8)})) + .isEqualTo(Arrays.asList(ByteArray.copyFrom("test1"), ByteArray.copyFrom("test2"))); + } + + @Test + public void testToJavaByteArrays() { + List input = Arrays.asList(ByteArray.copyFrom("test3"), ByteArray.copyFrom("test4")); + List expected = Arrays.asList("test3".getBytes(UTF8), "test4".getBytes(UTF8)); + List output = toJavaByteArrays(input); + assertThat(Arrays.deepEquals(expected.toArray(), output.toArray())).isTrue(); + } + + @SuppressWarnings("deprecation") + @Test + public void testDateToSqlTimestamp() { + assertThat(JdbcTypeConverter.toSqlTimestamp(com.google.cloud.Date.fromYearMonthDay(2020, 6, 1))) + .isEqualTo(new Timestamp(2020 - 1900, 5, 1, 0, 0, 0, 0)); + } + + @Test + public void testDateToSqlTimestampWithCalendar() { + assertThat( + JdbcTypeConverter.toSqlTimestamp( + com.google.cloud.Date.fromYearMonthDay(2020, 6, 1), + Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles")))) + .isEqualTo( + com.google.cloud.Timestamp.parseTimestamp("2020-06-01T00:00:00-07:00") + .toSqlTimestamp()); + assertThat( + JdbcTypeConverter.toSqlTimestamp( + com.google.cloud.Date.fromYearMonthDay(2020, 6, 1), + Calendar.getInstance(TimeZone.getTimeZone("Europe/Amsterdam")))) + .isEqualTo( + com.google.cloud.Timestamp.parseTimestamp("2020-06-01T00:00:00+02:00") + .toSqlTimestamp()); + } + + @Test + public void testDateToSqlTimestampWithCalendarWithStartOfDST() { + TimeZone timeZone = TimeZone.getTimeZone("Europe/Oslo"); + + List zonedDateTimes = + Arrays.asList( + ZonedDateTime.of(2018, 3, 25, 2, 0, 0, 0, ZoneId.of("+01:00")), + ZonedDateTime.of(2018, 10, 28, 2, 0, 0, 0, ZoneId.of("+01:00"))); + + zonedDateTimes.forEach( + expected -> { + Timestamp expectedTimestamp = Timestamp.from(expected.toInstant()); + Calendar cal = Calendar.getInstance(timeZone); + Timestamp storeTimestamp = + JdbcTypeConverter.setTimestampInCalendar(expectedTimestamp, cal); + + Timestamp resultTimestamp = JdbcTypeConverter.getTimestampInCalendar(storeTimestamp, cal); + ZonedDateTime actual = resultTimestamp.toInstant().atZone(timeZone.toZoneId()); + + assertThat(actual).isEqualTo(expected.withZoneSameInstant(timeZone.toZoneId())); + }); + } + + @Test + public void testParseSqlTimeWithCalendar() { + assertThat( + JdbcTypeConverter.parseSqlTime( + "10:31:15", Calendar.getInstance(TimeZone.getTimeZone("America/Los_Angeles")))) + .isEqualTo( + new Time( + com.google.cloud.Timestamp.parseTimestamp("1970-01-01T10:31:15-08:00") + .toSqlTimestamp() + .getTime())); + assertThat( + JdbcTypeConverter.parseSqlTime( + "10:31:15", Calendar.getInstance(TimeZone.getTimeZone("Europe/Amsterdam")))) + .isEqualTo( + new Time( + com.google.cloud.Timestamp.parseTimestamp("1970-01-01T10:31:15+01:00") + .toSqlTimestamp() + .getTime())); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/MultiplexedSessionsMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/MultiplexedSessionsMockServerTest.java new file mode 100644 index 000000000000..57b79a026e96 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/MultiplexedSessionsMockServerTest.java @@ -0,0 +1,210 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockServerHelper; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.spanner.v1.BatchCreateSessionsRequest; +import com.google.spanner.v1.CreateSessionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.Session; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class MultiplexedSessionsMockServerTest extends AbstractMockServerTest { + private static final String SELECT_RANDOM_SQL = SELECT_RANDOM_STATEMENT.getSql(); + + private static final String INSERT_SQL = INSERT_STATEMENT.getSql(); + + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Before + public void setupDialect() { + if (this.dialect != currentDialect) { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(this.dialect)); + this.currentDialect = dialect; + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + SpannerPool.closeSpannerPool(); + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true", + getPort(), "proj", "inst", "db" + (dialect == Dialect.POSTGRESQL ? "pg" : "")); + } + + @Override + protected Connection createJdbcConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void testUsesMultiplexedSessionForQueryInAutoCommit() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertTrue(connection.getAutoCommit()); + try (ResultSet resultSet = connection.createStatement().executeQuery(SELECT_RANDOM_SQL)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // Just consume the results + } + } + } + // Verify that one multiplexed session was created and used. + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + CreateSessionRequest request = mockSpanner.getRequestsOfType(CreateSessionRequest.class).get(0); + assertTrue(request.getSession().getMultiplexed()); + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + String sessionId = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getSession(); + Session session = MockServerHelper.getSession(mockSpanner, sessionId); + assertNotNull(session); + assertTrue(session.getMultiplexed()); + } + + @Test + public void testUsesMultiplexedSessionForQueryInReadOnlyTransaction() throws SQLException { + int numQueries = 2; + try (Connection connection = createJdbcConnection()) { + connection.setReadOnly(true); + connection.setAutoCommit(false); + + for (int ignore = 0; ignore < numQueries; ignore++) { + try (ResultSet resultSet = connection.createStatement().executeQuery(SELECT_RANDOM_SQL)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // Just consume the results + } + } + } + } + // Verify that one multiplexed session was created and used. + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + CreateSessionRequest request = mockSpanner.getRequestsOfType(CreateSessionRequest.class).get(0); + assertTrue(request.getSession().getMultiplexed()); + + // Verify that both queries used the multiplexed session. + assertEquals(numQueries, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + for (int index = 0; index < numQueries; index++) { + String sessionId = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(index).getSession(); + Session session = MockServerHelper.getSession(mockSpanner, sessionId); + assertNotNull(session); + assertTrue(session.getMultiplexed()); + } + } + + @Test + public void testUsesMultiplexedSessionForDmlInAutoCommit() throws SQLException { + try (Connection connection = createJdbcConnection()) { + assertTrue(connection.getAutoCommit()); + assertEquals(1, connection.createStatement().executeUpdate(INSERT_SQL)); + } + // The JDBC connection creates a multiplexed session by default, because it executes a query to + // check what dialect the database uses. This query is executed using a multiplexed session. + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + CreateSessionRequest request = mockSpanner.getRequestsOfType(CreateSessionRequest.class).get(0); + assertTrue(request.getSession().getMultiplexed()); + // Verify that a regular session was used for the insert statement. + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + INSERT_SQL, mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getSql()); + String sessionId = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getSession(); + Session session = MockServerHelper.getSession(mockSpanner, sessionId); + assertNotNull(session); + assertTrue(session.getMultiplexed()); + } + + @Test + public void testUsesMultiplexedSessionForQueryInTransaction() throws SQLException { + try (Connection connection = createJdbcConnection()) { + connection.setAutoCommit(false); + assertFalse(connection.getAutoCommit()); + + try (ResultSet resultSet = connection.createStatement().executeQuery(SELECT_RANDOM_SQL)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // Just consume the results + } + } + connection.commit(); + } + // The JDBC connection creates a multiplexed session by default, because it executes a query to + // check what dialect the database uses. This query is executed using a multiplexed session. + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + CreateSessionRequest request = mockSpanner.getRequestsOfType(CreateSessionRequest.class).get(0); + assertTrue(request.getSession().getMultiplexed()); + // Verify that a regular session was used for the select statement. + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + assertEquals( + SELECT_RANDOM_SQL, mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getSql()); + String sessionId = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0).getSession(); + Session session = MockServerHelper.getSession(mockSpanner, sessionId); + assertNotNull(session); + assertTrue(session.getMultiplexed()); + } + + @Test + public void testUsesMultiplexedSessionInCombinationWithSessionPoolOptions() throws SQLException { + // Create a connection that uses a session pool with MinSessions=0. + // This should stop any regular sessions from being created. + try (Connection connection = DriverManager.getConnection(createUrl() + ";minSessions=0")) { + assertTrue(connection.getAutoCommit()); + try (ResultSet resultSet = connection.createStatement().executeQuery(SELECT_RANDOM_SQL)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) { + // Just consume the results + } + } + } + + // Verify that one multiplexed session was created and used. + assertEquals(1, mockSpanner.countRequestsOfType(CreateSessionRequest.class)); + CreateSessionRequest request = mockSpanner.getRequestsOfType(CreateSessionRequest.class).get(0); + assertTrue(request.getSession().getMultiplexed()); + // There should be no regular sessions in use. + assertEquals(0, mockSpanner.countRequestsOfType(BatchCreateSessionsRequest.class)); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/MultiplexedSessionsTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/MultiplexedSessionsTest.java new file mode 100644 index 000000000000..17183241479b --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/MultiplexedSessionsTest.java @@ -0,0 +1,236 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +import com.google.cloud.spanner.MockServerHelper; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.common.base.Strings; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeCode; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.Map; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class MultiplexedSessionsTest extends AbstractMockServerTest { + private static final String PROCESS_ENVIRONMENT = "java.lang.ProcessEnvironment"; + private static final String ENVIRONMENT = "theUnmodifiableEnvironment"; + private static final String SOURCE_MAP = "m"; + private static final Object STATIC_METHOD = null; + private static final Class UMODIFIABLE_MAP_CLASS = + Collections.unmodifiableMap(Collections.emptyMap()).getClass(); + private static final Class MAP_CLASS = Map.class; + + private static boolean setEnvVar = false; + + private String query; + private String dml; + private String dmlReturning; + + @SuppressWarnings("unchecked") + private static Map getModifiableEnvironment() throws Exception { + Class environmentClass = Class.forName(PROCESS_ENVIRONMENT); + java.lang.reflect.Field environmentField = environmentClass.getDeclaredField(ENVIRONMENT); + assertNotNull(environmentField); + environmentField.setAccessible(true); + + Object unmodifiableEnvironmentMap = environmentField.get(STATIC_METHOD); + assertNotNull(unmodifiableEnvironmentMap); + assertTrue(UMODIFIABLE_MAP_CLASS.isAssignableFrom(unmodifiableEnvironmentMap.getClass())); + + java.lang.reflect.Field underlyingMapField = + unmodifiableEnvironmentMap.getClass().getDeclaredField(SOURCE_MAP); + underlyingMapField.setAccessible(true); + Object underlyingMap = underlyingMapField.get(unmodifiableEnvironmentMap); + assertNotNull(underlyingMap); + assertTrue(MAP_CLASS.isAssignableFrom(underlyingMap.getClass())); + + return (Map) underlyingMap; + } + + @BeforeClass + public static void setEnvVars() throws Exception { + // Java versions 8 and lower start with 1. (1.8, 1.7 etc.). + // Higher versions start with the major version number. + // So this effectively verifies that the test is running on Java 8. + assumeTrue(System.getProperty("java.version", "undefined").startsWith("1.")); + assumeFalse(System.getProperty("os.name", "").toLowerCase().startsWith("windows")); + + if (Strings.isNullOrEmpty(System.getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW"))) { + Map env = getModifiableEnvironment(); + env.put("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW", "true"); + setEnvVar = true; + } + } + + @AfterClass + public static void clearEnvVars() throws Exception { + if (setEnvVar) { + Map env = getModifiableEnvironment(); + env.remove("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS_FOR_RW"); + } + } + + @Before + public void setupResults() { + query = "select * from my_table"; + dml = "insert into my_table (id, value) values (1, 'One')"; + String DML_THEN_RETURN_ID = dml + "\nTHEN RETURN `id`"; + dmlReturning = "insert into my_table (id, value) values (1, 'One') THEN RETURN *"; + + super.setupResults(); + + com.google.spanner.v1.ResultSet resultSet = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .setName("id") + .build()) + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.STRING).build()) + .setName("value") + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .addValues(Value.newBuilder().setStringValue("One").build()) + .build()) + .build(); + com.google.spanner.v1.ResultSet returnIdResultSet = + com.google.spanner.v1.ResultSet.newBuilder() + .setMetadata( + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setType(Type.newBuilder().setCode(TypeCode.INT64).build()) + .setName("id") + .build()) + .build()) + .build()) + .addRows( + ListValue.newBuilder() + .addValues(Value.newBuilder().setStringValue("1").build()) + .build()) + .build(); + mockSpanner.putStatementResult( + StatementResult.query(com.google.cloud.spanner.Statement.of(query), resultSet)); + mockSpanner.putStatementResult( + StatementResult.update(com.google.cloud.spanner.Statement.of(dml), 1L)); + mockSpanner.putStatementResult( + StatementResult.query( + com.google.cloud.spanner.Statement.of(dmlReturning), + resultSet.toBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + mockSpanner.putStatementResult( + StatementResult.query( + com.google.cloud.spanner.Statement.of(DML_THEN_RETURN_ID), + returnIdResultSet.toBuilder() + .setStats(ResultSetStats.newBuilder().setRowCountExact(1L).build()) + .build())); + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true", + getPort(), "proj", "inst", "db"); + } + + @Override + protected Connection createJdbcConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void testStatementExecuteQuery() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(query)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(MockServerHelper.getSession(mockSpanner, request.getSession()).getMultiplexed()); + } + + @Test + public void testStatementExecuteUpdate() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + assertEquals(1, statement.executeUpdate(dml)); + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(MockServerHelper.getSession(mockSpanner, request.getSession()).getMultiplexed()); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + } + + @Test + public void testStatementExecuteQueryDmlReturning() throws SQLException { + try (Connection connection = createJdbcConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(dmlReturning)) { + //noinspection StatementWithEmptyBody + while (resultSet.next()) {} + } + } + assertEquals(1, mockSpanner.countRequestsOfType(ExecuteSqlRequest.class)); + ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + assertTrue(MockServerHelper.getSession(mockSpanner, request.getSession()).getMultiplexed()); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasBegin()); + assertTrue(request.getTransaction().getBegin().hasReadWrite()); + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PartitionedQueryMockServerTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PartitionedQueryMockServerTest.java new file mode 100644 index 000000000000..42c59b4c3c47 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PartitionedQueryMockServerTest.java @@ -0,0 +1,514 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.PartitionOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.AbstractMockServerTest; +import com.google.cloud.spanner.connection.RandomResultSetGenerator; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.PartitionQueryRequest; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Parameterized.class) +public class PartitionedQueryMockServerTest extends AbstractMockServerTest { + @Parameter public Dialect dialect; + + private Dialect currentDialect; + + @Parameters(name = "dialect = {0}") + public static Object[] data() { + return Dialect.values(); + } + + @Before + public void setupDialect() { + if (currentDialect != dialect) { + mockSpanner.putStatementResult(StatementResult.detectDialectResult(this.dialect)); + currentDialect = dialect; + SpannerPool.closeSpannerPool(); + } + } + + @After + public void clearRequests() { + mockSpanner.clearRequests(); + } + + private int getExpectedColumnCount(Dialect dialect) { + // GoogleSQL also adds 4 PROTO columns. + // PostgreSQL adds 2 OID columns. + return dialect == Dialect.GOOGLE_STANDARD_SQL ? 26 : 24; + } + + private String createUrl() { + return String.format( + "jdbc:cloudspanner://localhost:%d/projects/%s/instances/%s/databases/%s?usePlainText=true", + getPort(), "proj", "inst", "db"); + } + + @Override + protected Connection createJdbcConnection() throws SQLException { + return DriverManager.getConnection(createUrl()); + } + + @Test + public void testPartitionedQueryUsingSql() throws SQLException { + int numRows = 5; + int maxPartitions = 4; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows, dialect); + Statement statement = + Statement.newBuilder( + String.format( + "select * from my_table where active=%s", + dialect == Dialect.POSTGRESQL ? "$1" : "@p1")) + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + String prefix = dialect == Dialect.POSTGRESQL ? "spanner." : ""; + + try (Connection connection = createJdbcConnection()) { + // This will automatically enable Data Boost for any partitioned query that is executed on + // this connection. The property is ignored for any query that is not a partitioned query. + connection.createStatement().execute(String.format("set %sdata_boost_enabled=true", prefix)); + // Sets the maximum number of partitions that should be used by Cloud Spanner. + // This is just a hint that can be ignored by Cloud Spanner, but the mock server that is used + // for testing respects this hint. + connection + .createStatement() + .execute(String.format("set %smax_partitions=%d", prefix, maxPartitions)); + + List partitionIds = new ArrayList<>(); + try (PreparedStatement partitionStatement = + connection.prepareStatement("partition select * from my_table where active=?")) { + partitionStatement.setBoolean(1, true); + try (ResultSet partitions = partitionStatement.executeQuery()) { + assertNotNull(partitions.getMetaData()); + while (partitions.next()) { + partitionIds.add(partitions.getString(1)); + } + } + } + + for (String partitionId : partitionIds) { + // Execute `run partition ''` to get the results of a single partition. + // This can be done from any JDBC connection, including from other hosts. + try (ResultSet resultSet = + connection + .createStatement() + .executeQuery(String.format("run partition '%s'", partitionId))) { + assertNotNull(resultSet.getMetaData()); + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + assertEquals(numRows, rowCount); + } + } + + for (String partitionId : partitionIds) { + // The partition ID may also be set as a query parameter. + try (PreparedStatement runStatement = connection.prepareStatement("run partition")) { + runStatement.setString(1, partitionId); + int rowCount = 0; + try (ResultSet resultSet = runStatement.executeQuery()) { + assertNotNull(resultSet.getMetaData()); + while (resultSet.next()) { + rowCount++; + } + assertEquals(numRows, rowCount); + } + } + } + + // You can also run a query directly as a partitioned query on a single JDBC connection. This + // will not give you the full benefits of a partitioned query, as the entire query is handled + // by a single host and is returned as a single result set. The result set uses multiple + // threads internally to execute the separate partitions. + // This gives users a simple way to access the Data Boost feature that should be accessible + // from most generic frameworks and tools that work with JDBC. + try (PreparedStatement partitionStatement = + connection.prepareStatement( + "run partitioned query select * from my_table where active=?")) { + partitionStatement.setBoolean(1, true); + try (ResultSet results = partitionStatement.executeQuery()) { + assertNotNull(results.getMetaData()); + assertEquals(getExpectedColumnCount(dialect), results.getMetaData().getColumnCount()); + int rowCount = 0; + while (results.next()) { + rowCount++; + } + // The mock server is not smart enough to actually partition the query and only return + // a fraction of the rows per partition. The total row count will therefore be equal to + // the number of partitions multiplied by the number of rows. + assertEquals(numRows * maxPartitions, rowCount); + + // Partitioned queries return a result set with some additional metadata that can be + // inspected to determine the number of partitions and the degree of parallelism that the + // query used. + CloudSpannerJdbcPartitionedQueryResultSet partitionedQueryResultSet = + results.unwrap(CloudSpannerJdbcPartitionedQueryResultSet.class); + assertEquals(maxPartitions, partitionedQueryResultSet.getNumPartitions()); + assertEquals(1, partitionedQueryResultSet.getParallelism()); + } + } + } + } + + @Test + public void testPartitionedQueryStatement() throws SQLException { + int numRows = 5; + int maxPartitions = 4; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows, dialect); + Statement statement = Statement.of("select * from my_table where active=true"); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + try (Connection connection = createJdbcConnection()) { + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + // This will automatically enable Data Boost for any partitioned query that is executed on + // this connection. The property is ignored for any query that is not a partitioned query. + cloudSpannerJdbcConnection.setDataBoostEnabled(true); + // Sets the maximum number of partitions that should be used by Cloud Spanner. + // This is just a hint that can be ignored by Cloud Spanner, but the mock server that is used + // for testing respects this hint. + cloudSpannerJdbcConnection.setMaxPartitions(maxPartitions); + + List partitionIds = new ArrayList<>(); + try (ResultSet partitions = + connection + .createStatement() + .unwrap(CloudSpannerJdbcStatement.class) + .partitionQuery( + "select * from my_table where active=true", + PartitionOptions.getDefaultInstance())) { + assertNotNull(partitions.getMetaData()); + while (partitions.next()) { + partitionIds.add(partitions.getString(1)); + } + } + + for (String partitionId : partitionIds) { + // Execute `run partition ''` to get the results of a single partition. + // This can be done from any JDBC connection, including from other hosts. + try (ResultSet resultSet = + connection + .createStatement() + .unwrap(CloudSpannerJdbcStatement.class) + .runPartition(partitionId)) { + assertNotNull(resultSet.getMetaData()); + int rowCount = 0; + while (resultSet.next()) { + rowCount++; + } + assertEquals(numRows, rowCount); + } + } + + // You can also run a query directly as a partitioned query on a single JDBC connection. This + // will not give you the full benefits of a partitioned query, as the entire query is handled + // by a single host and is returned as a single result set. The result set uses multiple + // threads internally to execute the separate partitions. + // This gives users a simple way to access the Data Boost feature that should be accessible + // from most generic frameworks and tools that work with JDBC. + try (CloudSpannerJdbcPartitionedQueryResultSet results = + connection + .createStatement() + .unwrap(CloudSpannerJdbcStatement.class) + .runPartitionedQuery( + "select * from my_table where active=true", + PartitionOptions.getDefaultInstance())) { + assertNotNull(results.getMetaData()); + int rowCount = 0; + while (results.next()) { + rowCount++; + } + // The mock server is not smart enough to actually partition the query and only return + // a fraction of the rows per partition. The total row count will therefore be equal to + // the number of partitions multiplied by the number of rows. + assertEquals(numRows * maxPartitions, rowCount); + + // Partitioned queries return a result set with some additional metadata that can be + // inspected to determine the number of partitions and the degree of parallelism that the + // query used. + assertEquals(maxPartitions, results.getNumPartitions()); + assertEquals(1, results.getParallelism()); + } + } + } + + @Test + public void testPartitionedQueryPreparedStatement() throws SQLException { + int numRows = 5; + int maxPartitions = 4; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows, dialect); + Statement statement = + Statement.newBuilder( + String.format( + "select * from my_table where active=%s", + dialect == Dialect.POSTGRESQL ? "$1" : "@p1")) + .bind("p1") + .to(true) + .build(); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + try (Connection connection = createJdbcConnection()) { + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + // This will automatically enable Data Boost for any partitioned query that is executed on + // this connection. The property is ignored for any query that is not a partitioned query. + cloudSpannerJdbcConnection.setDataBoostEnabled(true); + // Sets the maximum number of partitions that should be used by Cloud Spanner. + // This is just a hint that can be ignored by Cloud Spanner, but the mock server that is used + // for testing respects this hint. + cloudSpannerJdbcConnection.setMaxPartitions(maxPartitions); + + List partitionIds = new ArrayList<>(); + try (PreparedStatement partitionStatement = + connection.prepareStatement("select * from my_table where active=?")) { + partitionStatement.setBoolean(1, true); + try (ResultSet partitions = + partitionStatement + .unwrap(CloudSpannerJdbcPreparedStatement.class) + .partitionQuery(PartitionOptions.getDefaultInstance())) { + assertNotNull(partitions.getMetaData()); + while (partitions.next()) { + partitionIds.add(partitions.getString(1)); + } + } + } + + for (String partitionId : partitionIds) { + // The partition ID can also be set as a query parameter. + // The statement text in this case is ignored and can be an empty string. + try (PreparedStatement runStatement = connection.prepareStatement("")) { + runStatement.setString(1, partitionId); + int rowCount = 0; + try (ResultSet resultSet = + runStatement.unwrap(CloudSpannerJdbcPreparedStatement.class).runPartition()) { + assertNotNull(resultSet.getMetaData()); + while (resultSet.next()) { + rowCount++; + } + assertEquals(numRows, rowCount); + } + } + } + + // You can also run a query directly as a partitioned query on a single JDBC connection. This + // will not give you the full benefits of a partitioned query, as the entire query is handled + // by a single host and is returned as a single result set. The result set uses multiple + // threads internally to execute the separate partitions. + // This gives users a simple way to access the Data Boost feature that should be accessible + // from most generic frameworks and tools that work with JDBC. + try (PreparedStatement preparedStatement = + connection.prepareStatement("select * from my_table where active=?")) { + preparedStatement.setBoolean(1, true); + try (CloudSpannerJdbcPartitionedQueryResultSet results = + preparedStatement + .unwrap(CloudSpannerJdbcPreparedStatement.class) + .runPartitionedQuery(PartitionOptions.getDefaultInstance())) { + assertNotNull(results.getMetaData()); + int rowCount = 0; + while (results.next()) { + rowCount++; + } + // The mock server is not smart enough to actually partition the query and only return + // a fraction of the rows per partition. The total row count will therefore be equal to + // the number of partitions multiplied by the number of rows. + assertEquals(numRows * maxPartitions, rowCount); + + // Partitioned queries return a result set with some additional metadata that can be + // inspected to determine the number of partitions and the degree of parallelism that the + // query used. + assertEquals(maxPartitions, results.getNumPartitions()); + assertEquals(1, results.getParallelism()); + } + } + } + } + + @Test + public void testAutoPartitionMode() throws SQLException { + int numRows = 5; + int maxPartitions = 4; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows, dialect); + Statement statement = Statement.of("select * from my_table where active=true"); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + try (Connection connection = createJdbcConnection()) { + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + // This will automatically enable Data Boost for any partitioned query that is executed on + // this connection. The property is ignored for any query that is not a partitioned query. + cloudSpannerJdbcConnection.setDataBoostEnabled(true); + // Sets the maximum number of partitions that should be used by Cloud Spanner. + // This is just a hint that can be ignored by Cloud Spanner, but the mock server that is used + // for testing respects this hint. + cloudSpannerJdbcConnection.setMaxPartitions(maxPartitions); + cloudSpannerJdbcConnection.setAutoPartitionMode(true); + + try (ResultSet results = + connection.createStatement().executeQuery("select * from my_table where active=true")) { + assertNotNull(results.getMetaData()); + assertEquals(getExpectedColumnCount(dialect), results.getMetaData().getColumnCount()); + int rowCount = 0; + while (results.next()) { + rowCount++; + } + // The mock server is not smart enough to actually partition the query and only return + // a fraction of the rows per partition. The total row count will therefore be equal to + // the number of partitions multiplied by the number of rows. + assertEquals(numRows * maxPartitions, rowCount); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + + // Partitioned queries return a result set with some additional metadata that can be + // inspected to determine the number of partitions and the degree of parallelism that the + // query used. + assertEquals( + maxPartitions, results.unwrap(JdbcPartitionedQueryResultSet.class).getNumPartitions()); + assertEquals(1, results.unwrap(JdbcPartitionedQueryResultSet.class).getParallelism()); + + // Verify that we can run metadata queries in auto_partition_mode. + // Just add a random result for the table metadata query. We don't care about the result, + // only about the fact that it should be allowed, and that it is executed in normal mode. + if (dialect == Dialect.GOOGLE_STANDARD_SQL) { + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "SELECT TABLE_CATALOG AS TABLE_CAT, TABLE_SCHEMA AS TABLE_SCHEM, TABLE_NAME,\n" + + " CASE WHEN TABLE_TYPE = 'BASE TABLE' THEN 'TABLE' ELSE TABLE_TYPE END AS TABLE_TYPE,\n" + + " NULL AS REMARKS, NULL AS TYPE_CAT, NULL AS TYPE_SCHEM, NULL AS TYPE_NAME,\n" + + " NULL AS SELF_REFERENCING_COL_NAME, NULL AS REF_GENERATION\n" + + "FROM INFORMATION_SCHEMA.TABLES AS T"), + SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + } else { + mockSpanner.putPartialStatementResult( + StatementResult.query( + Statement.of( + "/*\n" + + " * Copyright 2022 Google LLC\n" + + " *\n" + + " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" + + " * you may not use this file except in compliance with the License.\n" + + " * You may obtain a copy of the License at\n" + + " *\n" + + " * http://www.apache.org/licenses/LICENSE-2.0\n" + + " *\n" + + " * Unless required by applicable law or agreed to in writing, software\n" + + " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" + + " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + + " * See the License for the specific language governing permissions and\n" + + " * limitations under the License.\n" + + " */\n" + + "\n" + + "SELECT TABLE_CATALOG AS \"TABLE_CAT\", TABLE_SCHEMA AS \"TABLE_SCHEM\", TABLE_NAME AS \"TABLE_NAME\",\n" + + " CASE WHEN TABLE_TYPE = 'BASE TABLE' THEN 'TABLE' ELSE TABLE_TYPE END AS \"TABLE_TYPE\",\n" + + " NULL AS \"REMARKS\", NULL AS \"TYPE_CAT\", NULL AS \"TYPE_SCHEM\", NULL AS \"TYPE_NAME\",\n" + + " NULL AS \"SELF_REFERENCING_COL_NAME\", NULL AS \"REF_GENERATION\"\n" + + "FROM INFORMATION_SCHEMA.TABLES AS T"), + SELECT_COUNT_RESULTSET_BEFORE_INSERT)); + } + try (ResultSet tables = connection.getMetaData().getTables(null, null, null, null)) { + assertTrue(tables.next()); + assertEquals(0, tables.getInt(1)); + assertFalse(tables.next()); + } + assertEquals( + 1, + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(req -> req.getSql().contains("FROM INFORMATION_SCHEMA.TABLES AS T")) + .count()); + ExecuteSqlRequest request = + mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).stream() + .filter(req -> req.getSql().contains("FROM INFORMATION_SCHEMA.TABLES AS T")) + .findFirst() + .orElse(ExecuteSqlRequest.getDefaultInstance()); + assertTrue(request.hasTransaction()); + assertTrue(request.getTransaction().hasSingleUse()); + assertTrue(request.getTransaction().getSingleUse().hasReadOnly()); + assertTrue(request.getTransaction().getSingleUse().getReadOnly().hasStrong()); + assertEquals(ByteString.EMPTY, request.getPartitionToken()); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + } + } + } + + @Test + public void testAutoPartitionModeEmptyResult() throws SQLException { + int numRows = 0; + int maxPartitions = 1; + RandomResultSetGenerator generator = new RandomResultSetGenerator(numRows, dialect); + Statement statement = Statement.of("select * from my_table where active=true"); + mockSpanner.putStatementResult(StatementResult.query(statement, generator.generate())); + + try (Connection connection = createJdbcConnection()) { + CloudSpannerJdbcConnection cloudSpannerJdbcConnection = + connection.unwrap(CloudSpannerJdbcConnection.class); + // This will automatically enable Data Boost for any partitioned query that is executed on + // this connection. The property is ignored for any query that is not a partitioned query. + cloudSpannerJdbcConnection.setDataBoostEnabled(true); + // Sets the maximum number of partitions that should be used by Cloud Spanner. + // This is just a hint that can be ignored by Cloud Spanner, but the mock server that is used + // for testing respects this hint. + cloudSpannerJdbcConnection.setMaxPartitions(maxPartitions); + cloudSpannerJdbcConnection.setAutoPartitionMode(true); + + try (ResultSet results = + connection.createStatement().executeQuery("select * from my_table where active=true")) { + assertNotNull(results.getMetaData()); + assertEquals(getExpectedColumnCount(dialect), results.getMetaData().getColumnCount()); + int rowCount = 0; + while (results.next()) { + rowCount++; + } + assertEquals(0, rowCount); + assertEquals(1, mockSpanner.countRequestsOfType(PartitionQueryRequest.class)); + + // Partitioned queries return a result set with some additional metadata that can be + // inspected to determine the number of partitions and the degree of parallelism that the + // query used. + assertEquals( + maxPartitions, results.unwrap(JdbcPartitionedQueryResultSet.class).getNumPartitions()); + assertEquals(1, results.unwrap(JdbcPartitionedQueryResultSet.class).getParallelism()); + } + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PgNumericPreparedStatementTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PgNumericPreparedStatementTest.java new file mode 100644 index 000000000000..922c2ec0e28d --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PgNumericPreparedStatementTest.java @@ -0,0 +1,338 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static org.junit.Assert.assertEquals; + +import com.google.cloud.spanner.Dialect; +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ListValue; +import com.google.protobuf.NullValue; +import com.google.protobuf.Struct; +import com.google.protobuf.Value; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.Type; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.math.BigDecimal; +import java.net.InetSocketAddress; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Types; +import java.util.Arrays; +import java.util.Map; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class PgNumericPreparedStatementTest { + + private static final String PROJECT = "my-project"; + private static final String INSTANCE = "my-instance"; + private static final String DATABASE = "my-database"; + private static final String QUERY = "INSERT INTO Table (col1) VALUES (?)"; + private static final String REWRITTEN_QUERY = "INSERT INTO Table (col1) VALUES ($1)"; + private static MockSpannerServiceImpl mockSpanner; + private static InetSocketAddress address; + private static Server server; + private Connection connection; + + @BeforeClass + public static void beforeClass() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); + mockSpanner.putStatementResult(StatementResult.detectDialectResult(Dialect.POSTGRESQL)); + + address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build().start(); + } + + @AfterClass + public static void afterClass() throws Exception { + SpannerPool.closeSpannerPool(); + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() throws Exception { + final String endpoint = address.getHostString() + ":" + server.getPort(); + final String url = + String.format( + "jdbc:cloudspanner://%s/projects/%s/instances/%s/databases/%s?usePlainText=true;dialect=POSTGRESQL", + endpoint, PROJECT, INSTANCE, DATABASE); + connection = DriverManager.getConnection(url); + mockSpanner.clearRequests(); + } + + @After + public void tearDown() throws Exception { + connection.close(); + } + + @Test + public void testSetByteAsObject() throws SQLException { + final Byte param = 1; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetShortAsObject() throws SQLException { + final Short param = 1; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetIntAsObject() throws SQLException { + final Integer param = 1; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetLongAsObject() throws SQLException { + final Long param = 1L; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetFloatAsObject() throws SQLException { + final Float param = 1F; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetFloatNaNAsObject() throws SQLException { + final Float param = Float.NaN; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetDoubleAsObject() throws SQLException { + final Double param = 1D; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetDoubleNaNAsObject() throws SQLException { + final Double param = Double.NaN; + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetBigDecimalAsObject() throws SQLException { + final BigDecimal param = new BigDecimal("1.23"); + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetBigDecimalAsObjectWithoutExplicitType() throws SQLException { + final BigDecimal param = new BigDecimal("1.23"); + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setObject(1, param); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetBigDecimal() throws SQLException { + final BigDecimal param = new BigDecimal("1"); + + mockScalarUpdateWithParam(param.toString()); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setBigDecimal(1, param); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(param.toString()); + } + + @Test + public void testSetNull() throws SQLException { + mockScalarUpdateWithParam(null); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setNull(1, Types.NUMERIC); + preparedStatement.executeUpdate(); + } + assertRequestWithScalar(null); + } + + @Test + public void testSetNumericArray() throws SQLException { + final BigDecimal[] param = {BigDecimal.ONE, null, BigDecimal.TEN}; + + mockArrayUpdateWithParam(Arrays.asList("1", null, "10")); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setArray(1, connection.createArrayOf("numeric", param)); + preparedStatement.executeUpdate(); + } + assertRequestWithArray(Arrays.asList("1", null, "10")); + } + + @Test + public void testSetNullArray() throws SQLException { + mockArrayUpdateWithParam(null); + try (PreparedStatement preparedStatement = connection.prepareStatement(QUERY)) { + preparedStatement.setArray(1, connection.createArrayOf("numeric", null)); + preparedStatement.executeUpdate(); + } + assertRequestWithArray(null); + } + + private void mockScalarUpdateWithParam(String value) { + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder(REWRITTEN_QUERY) + .bind("p1") + .to(com.google.cloud.spanner.Value.pgNumeric(value)) + .build(), + 1)); + } + + private void mockArrayUpdateWithParam(Iterable value) { + mockSpanner.putStatementResult( + StatementResult.update( + Statement.newBuilder(REWRITTEN_QUERY) + .bind("p1") + .to(com.google.cloud.spanner.Value.pgNumericArray(value)) + .build(), + 1)); + } + + private void assertRequestWithScalar(String value) { + final ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + final String actualSql = request.getSql(); + final Struct actualParams = request.getParams(); + final Map actualParamTypes = request.getParamTypesMap(); + + final Value parameterValue = protoValueFromString(value); + final Struct expectedParams = Struct.newBuilder().putFields("p1", parameterValue).build(); + final ImmutableMap expectedTypes = + ImmutableMap.of( + "p1", + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC) + .build()); + assertEquals(REWRITTEN_QUERY, actualSql); + assertEquals(expectedParams, actualParams); + assertEquals(expectedTypes, actualParamTypes); + } + + private void assertRequestWithArray(Iterable value) { + final ExecuteSqlRequest request = mockSpanner.getRequestsOfType(ExecuteSqlRequest.class).get(0); + final String actualSql = request.getSql(); + final Struct actualParams = request.getParams(); + final Map actualParamTypes = request.getParamTypesMap(); + + Value parameterValue; + if (value != null) { + final ListValue.Builder builder = ListValue.newBuilder(); + value.forEach(v -> builder.addValues(protoValueFromString(v))); + parameterValue = Value.newBuilder().setListValue(builder.build()).build(); + } else { + parameterValue = Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(); + } + final Struct expectedParams = Struct.newBuilder().putFields("p1", parameterValue).build(); + final ImmutableMap expectedTypes = + ImmutableMap.of( + "p1", + Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC)) + .build()); + assertEquals(REWRITTEN_QUERY, actualSql); + assertEquals(expectedParams, actualParams); + assertEquals(expectedTypes, actualParamTypes); + } + + private Value protoValueFromString(String value) { + if (value == null) { + return Value.newBuilder().setNullValue(NullValue.NULL_VALUE).build(); + } else { + return Value.newBuilder().setStringValue(value).build(); + } + } +} diff --git a/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PgNumericResultSetTest.java b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PgNumericResultSetTest.java new file mode 100644 index 000000000000..db245402c475 --- /dev/null +++ b/java-spanner-jdbc/src/test/java/com/google/cloud/spanner/jdbc/PgNumericResultSetTest.java @@ -0,0 +1,803 @@ +/* + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner.jdbc; + +import static com.google.protobuf.NullValue.NULL_VALUE; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import com.google.cloud.spanner.MockSpannerServiceImpl; +import com.google.cloud.spanner.MockSpannerServiceImpl.StatementResult; +import com.google.cloud.spanner.connection.SpannerPool; +import com.google.cloud.spanner.jdbc.JdbcSqlExceptionFactory.JdbcSqlExceptionImpl; +import com.google.common.io.ByteSource; +import com.google.protobuf.ListValue; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.StructType; +import com.google.spanner.v1.StructType.Field; +import com.google.spanner.v1.TypeAnnotationCode; +import com.google.spanner.v1.TypeCode; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.Date; +import java.sql.DriverManager; +import java.sql.NClob; +import java.sql.ResultSet; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class PgNumericResultSetTest { + + private static final String PROJECT = "my-project"; + private static final String INSTANCE = "my-instance"; + private static final String DATABASE = "my-database"; + private static final String COLUMN_NAME = "PgNumeric"; + private static final ResultSetMetadata SCALAR_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName(COLUMN_NAME) + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC)))) + .build(); + private static final ResultSetMetadata ARRAY_METADATA = + ResultSetMetadata.newBuilder() + .setRowType( + StructType.newBuilder() + .addFields( + Field.newBuilder() + .setName(COLUMN_NAME) + .setType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.ARRAY) + .setArrayElementType( + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.NUMERIC) + .setTypeAnnotation(TypeAnnotationCode.PG_NUMERIC))))) + .build(); + private static final String QUERY = "SELECT " + COLUMN_NAME + " FROM Table WHERE Id = 0"; + private static final int MAX_PG_NUMERIC_SCALE = 131_072; + private static final int MAX_PG_NUMERIC_PRECISION = 16_383; + + private static MockSpannerServiceImpl mockSpanner; + private static InetSocketAddress address; + private static Server server; + private Connection connection; + + @BeforeClass + public static void beforeClass() throws Exception { + mockSpanner = new MockSpannerServiceImpl(); + mockSpanner.setAbortProbability(0.0D); + + address = new InetSocketAddress("localhost", 0); + server = NettyServerBuilder.forAddress(address).addService(mockSpanner).build().start(); + } + + @AfterClass + public static void afterClass() throws Exception { + SpannerPool.closeSpannerPool(); + server.shutdown(); + server.awaitTermination(); + } + + @Before + public void setUp() throws Exception { + final String endpoint = address.getHostString() + ":" + server.getPort(); + final String url = + String.format( + "jdbc:cloudspanner://%s/projects/%s/instances/%s/databases/%s?usePlainText=true", + endpoint, PROJECT, INSTANCE, DATABASE); + connection = DriverManager.getConnection(url); + } + + @After + public void tearDown() throws Exception { + connection.close(); + } + + @Test + public void testGetString() throws Exception { + final String maxScale = String.join("", Collections.nCopies(MAX_PG_NUMERIC_SCALE, "1")); + final String maxPrecision = + "0." + String.join("", Collections.nCopies(MAX_PG_NUMERIC_PRECISION, "2")); + + mockScalarResults("0", "1", "1.23", maxScale, maxPrecision, "NaN", null); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getString, ResultSet::getString); + + matcher.nextAndAssertEquals("0"); + matcher.nextAndAssertEquals("1"); + matcher.nextAndAssertEquals("1.23"); + matcher.nextAndAssertEquals(maxScale); + matcher.nextAndAssertEquals(maxPrecision); + matcher.nextAndAssertEquals("NaN"); + matcher.nextAndAssertEquals(null); + } + } + + @Test + public void testGetNString() throws Exception { + final String maxScale = String.join("", Collections.nCopies(MAX_PG_NUMERIC_SCALE, "1")); + final String maxPrecision = + "0." + String.join("", Collections.nCopies(MAX_PG_NUMERIC_PRECISION, "2")); + + mockScalarResults("0", "1", "1.23", maxScale, maxPrecision, "NaN", null); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getNString, ResultSet::getNString); + + matcher.nextAndAssertEquals("0"); + matcher.nextAndAssertEquals("1"); + matcher.nextAndAssertEquals("1.23"); + matcher.nextAndAssertEquals(maxScale); + matcher.nextAndAssertEquals(maxPrecision); + matcher.nextAndAssertEquals("NaN"); + matcher.nextAndAssertEquals(null); + } + } + + @Test + public void testGetBoolean() throws Exception { + mockScalarResults("0", null, "1", "NaN", "1.00"); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getBoolean, ResultSet::getBoolean); + + // 0 == false + matcher.nextAndAssertEquals(false); + // NULL == false + matcher.nextAndAssertEquals(false); + // anything else == true + matcher.nextAndAssertEquals(true); // "1" == true + matcher.nextAndAssertEquals(true); // "Nan" == true + matcher.nextAndAssertEquals(true); // "1.00" == true + } + } + + @Test + public void testGetByte() throws Exception { + final String minValue = Byte.MIN_VALUE + ""; + final String underflow = String.valueOf((int) Byte.MIN_VALUE - 1); + final String maxValue = Byte.MAX_VALUE + ""; + final String overflow = String.valueOf((int) Short.MAX_VALUE + 1); + + mockScalarResults(minValue, maxValue, "1.23", null, "NaN", underflow, overflow); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getByte, ResultSet::getByte); + + matcher.nextAndAssertEquals(Byte.MIN_VALUE); + matcher.nextAndAssertEquals(Byte.MAX_VALUE); + matcher.nextAndAssertEquals((byte) 1); + // NULL == 0 + matcher.nextAndAssertEquals((byte) 0); + matcher.nextAndAssertError(JdbcSqlExceptionImpl.class, "NaN is not a valid number"); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for byte: " + underflow); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for byte: " + overflow); + } + } + + @Test + public void testGetShort() throws Exception { + final String minValue = Short.MIN_VALUE + ""; + final String underflow = String.valueOf((int) Short.MIN_VALUE - 1); + final String maxValue = Short.MAX_VALUE + ""; + final String overflow = String.valueOf((int) Short.MAX_VALUE + 1); + + mockScalarResults(minValue, maxValue, "1.23", null, "NaN", underflow, overflow); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getShort, ResultSet::getShort); + + matcher.nextAndAssertEquals(Short.MIN_VALUE); + matcher.nextAndAssertEquals(Short.MAX_VALUE); + matcher.nextAndAssertEquals((short) 1); + // NULL == 0 + matcher.nextAndAssertEquals((short) 0); + matcher.nextAndAssertError(JdbcSqlExceptionImpl.class, "NaN is not a valid number"); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for short: " + underflow); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for short: " + overflow); + } + } + + @Test + public void testGetInt() throws Exception { + final String minValue = Integer.MIN_VALUE + ""; + final String underflow = String.valueOf((long) Integer.MIN_VALUE - 1L); + final String maxValue = Integer.MAX_VALUE + ""; + final String overflow = String.valueOf((long) Integer.MAX_VALUE + 1L); + + mockScalarResults(minValue, maxValue, "1.23", null, "NaN", underflow, overflow); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getInt, ResultSet::getInt); + + matcher.nextAndAssertEquals(Integer.MIN_VALUE); + matcher.nextAndAssertEquals(Integer.MAX_VALUE); + matcher.nextAndAssertEquals(1); + // NULL == 0 + matcher.nextAndAssertEquals(0); + matcher.nextAndAssertError(JdbcSqlExceptionImpl.class, "NaN is not a valid number"); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for int: " + underflow); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for int: " + overflow); + } + } + + @Test + public void testGetLong() throws Exception { + final String minValue = Long.MIN_VALUE + ""; + final String underflow = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE).toString(); + final String maxValue = Long.MAX_VALUE + ""; + final String overflow = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE).toString(); + + mockScalarResults(minValue, maxValue, "1.23", null, "NaN", underflow, overflow); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getLong, ResultSet::getLong); + + matcher.nextAndAssertEquals(Long.MIN_VALUE); + matcher.nextAndAssertEquals(Long.MAX_VALUE); + matcher.nextAndAssertEquals(1L); + // NULL == 0 + matcher.nextAndAssertEquals((long) 0); + matcher.nextAndAssertError(JdbcSqlExceptionImpl.class, "NaN is not a valid number"); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for long: " + underflow); + matcher.nextAndAssertError( + JdbcSqlExceptionImpl.class, "Value out of range for long: " + overflow); + } + } + + // TODO(thiagotnunes): Confirm that it is ok to wrap around in under / over flows (like pg) + @Test + public void testGetFloat() throws Exception { + final String minValue = Float.MIN_VALUE + ""; + final String underflow = + BigDecimal.valueOf(Float.MIN_VALUE).subtract(BigDecimal.ONE).toString(); + final String maxValue = (Float.MAX_VALUE - 1) + ""; + final String overflow = BigDecimal.valueOf(Float.MAX_VALUE).add(BigDecimal.ONE).toString(); + + mockScalarResults( + minValue, maxValue, "1.23", null, "NaN", "-Infinity", "+Infinity", underflow, overflow); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getFloat, ResultSet::getFloat); + + matcher.nextAndAssertEquals(Float.MIN_VALUE); + matcher.nextAndAssertEquals(Float.MAX_VALUE); + matcher.nextAndAssertEquals(1.23F); + // NULL == 0 + matcher.nextAndAssertEquals(0F); + matcher.nextAndAssertEquals(Float.NaN); + matcher.nextAndAssertEquals(Float.NEGATIVE_INFINITY); + matcher.nextAndAssertEquals(Float.POSITIVE_INFINITY); + // Value rolls back to 0 + (underflow value) + matcher.nextAndAssertEquals(-1F); + // Value is capped at Float.MAX_VALUE + matcher.nextAndAssertEquals(Float.MAX_VALUE); + } + } + + @Test + public void testGetDouble() throws Exception { + final String minValue = Double.MIN_VALUE + ""; + final String underflow = + BigDecimal.valueOf(Double.MIN_VALUE).subtract(BigDecimal.ONE).toString(); + final String maxValue = (Double.MAX_VALUE - 1) + ""; + final String overflow = BigDecimal.valueOf(Double.MAX_VALUE).add(BigDecimal.ONE).toString(); + + mockScalarResults( + minValue, maxValue, "1.23", null, "NaN", "-Infinity", "+Infinity", underflow, overflow); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getDouble, ResultSet::getDouble); + + matcher.nextAndAssertEquals(Double.MIN_VALUE); + matcher.nextAndAssertEquals(Double.MAX_VALUE); + matcher.nextAndAssertEquals(1.23D); + // NULL == 0 + matcher.nextAndAssertEquals(0D); + matcher.nextAndAssertEquals(Double.NaN); + matcher.nextAndAssertEquals(Double.NEGATIVE_INFINITY); + matcher.nextAndAssertEquals(Double.POSITIVE_INFINITY); + // Value rolls back to 0 + (underflow value) + matcher.nextAndAssertEquals(-1D); + // Value is capped at Double.MAX_VALUE + matcher.nextAndAssertEquals(Double.MAX_VALUE); + } + } + + @Test + public void testGetBigDecimal() throws Exception { + final String maxScale = String.join("", Collections.nCopies(MAX_PG_NUMERIC_SCALE, "1")); + final String maxPrecision = + "0." + String.join("", Collections.nCopies(MAX_PG_NUMERIC_PRECISION, "2")); + + mockScalarResults(maxScale, maxPrecision, "0", "1.23", null, "NaN"); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getBigDecimal, ResultSet::getBigDecimal); + + // Default representation is BigDecimal + matcher.nextAndAssertEquals(new BigDecimal(maxScale)); + matcher.nextAndAssertEquals(new BigDecimal(maxPrecision)); + matcher.nextAndAssertEquals(BigDecimal.ZERO); + matcher.nextAndAssertEquals(new BigDecimal("1.23")); + matcher.nextAndAssertEquals(null); + matcher.nextAndAssertError(JdbcSqlExceptionImpl.class, "NaN is not a valid number"); + } + } + + @Test + public void testGetObject() throws Exception { + final String maxScale = String.join("", Collections.nCopies(MAX_PG_NUMERIC_SCALE, "1")); + final String maxPrecision = + "0." + String.join("", Collections.nCopies(MAX_PG_NUMERIC_PRECISION, "2")); + + mockScalarResults(maxScale, maxPrecision, null, "NaN", "-Infinity", "+Infinity"); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getObject, ResultSet::getObject); + + // Default representation is BigDecimal + matcher.nextAndAssertEquals(new BigDecimal(maxScale)); + matcher.nextAndAssertEquals(new BigDecimal(maxPrecision)); + matcher.nextAndAssertEquals(null); + // Nan is represented as Double + matcher.nextAndAssertEquals(Double.NaN); + // -Infinity is represented as Double + matcher.nextAndAssertEquals(Double.NEGATIVE_INFINITY); + // +Infinity is represented as Double + matcher.nextAndAssertEquals(Double.POSITIVE_INFINITY); + } + } + + @Test + public void testGetDate() throws Exception { + mockScalarResults("1.23", null); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher matcher = + resultSetMatcherFrom(resultSet, ResultSet::getDate, ResultSet::getDate); + + matcher.nextAndAssertError(JdbcSqlExceptionImpl.class, "Invalid column type to get as date"); + matcher.nextAndAssertError(JdbcSqlExceptionImpl.class, "Invalid column type to get as date"); + } + } + + @Test + public void testGetTime() throws Exception { + mockScalarResults("1.23", null); + + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery(QUERY)) { + + final ResultSetMatcher