iface) throws SQLException {
+ if (isWrapperFor(iface)) {
+ return iface.cast(this);
+ }
+ throw JdbcSqlExceptionFactory.of(
+ "Cannot unwrap to " + iface.getName(), com.google.rpc.Code.INVALID_ARGUMENT);
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection.java
new file mode 100644
index 000000000000..50f1f5328f89
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcConnection.java
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.cloud.spanner.AbortedDueToConcurrentModificationException;
+import com.google.cloud.spanner.AbortedException;
+import com.google.cloud.spanner.CommitResponse;
+import com.google.cloud.spanner.CommitStats;
+import com.google.cloud.spanner.DatabaseClient;
+import com.google.cloud.spanner.DatabaseId;
+import com.google.cloud.spanner.Dialect;
+import com.google.cloud.spanner.Mutation;
+import com.google.cloud.spanner.Options.QueryOption;
+import com.google.cloud.spanner.PartitionOptions;
+import com.google.cloud.spanner.Spanner;
+import com.google.cloud.spanner.TimestampBound;
+import com.google.cloud.spanner.connection.AutocommitDmlMode;
+import com.google.cloud.spanner.connection.SavepointSupport;
+import com.google.cloud.spanner.connection.TransactionMode;
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.util.Iterator;
+import javax.annotation.Nonnull;
+
+/**
+ * JDBC connection with a number of additional Cloud Spanner specific methods. JDBC connections that
+ * are returned by the Cloud Spanner {@link JdbcDriver} will implement this interface.
+ *
+ * Calling {@link Connection#unwrap(Class)} with {@code CloudSpannerJdbcConnection} class as
+ * input on a {@link Connection} returned by the Cloud Spanner JDBC Driver will return a {@link
+ * CloudSpannerJdbcConnection} instance.
+ */
+public interface CloudSpannerJdbcConnection extends Connection {
+
+ /**
+ * Returns the {@link DatabaseId} of the database that this {@link Connection} is connected to.
+ */
+ default DatabaseId getDatabaseId() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Returns the underlying {@link DatabaseClient} that is used by this connection. Operations that
+ * are executed on the {@link DatabaseClient} that is returned has no impact on this {@link
+ * Connection}, e.g. starting a read/write transaction on the {@link DatabaseClient} will not
+ * start a transaction on this connection.
+ */
+ default DatabaseClient getDatabaseClient() {
+ throw new UnsupportedOperationException();
+ }
+
+ /** Returns the underlying {@link Spanner} instance that is used by this connection. */
+ default Spanner getSpanner() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets the transaction tag to use for the current transaction. This method may only be called
+ * when in a transaction, and before the transaction is actually started, i.e. before any
+ * statements have been executed in the transaction.
+ *
+ *
The tag will be set as the transaction tag of all statements during the transaction, and as
+ * the transaction tag of the commit.
+ *
+ *
The transaction tag will automatically be cleared after the transaction has ended.
+ *
+ * @param tag The tag to use.
+ */
+ default void setTransactionTag(String tag) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * @return The transaction tag of the current transaction.
+ */
+ default String getTransactionTag() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets the statement tag to use for the next statement that will be executed. The tag is
+ * automatically cleared after the statement is executed. Statement tags can be used both with
+ * autocommit=true and autocommit=false, and can be used for partitioned DML.
+ *
+ * @param tag The statement tag to use with the next statement that will be executed on this
+ * connection.
+ */
+ default void setStatementTag(String tag) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * @return The statement tag that will be used with the next statement that is executed on this
+ * connection.
+ */
+ default String getStatementTag() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets the transaction mode to use for current transaction. This method may only be called when
+ * in a transaction, and before the transaction is actually started, i.e. before any statements
+ * have been executed in the transaction.
+ *
+ * @param transactionMode The transaction mode to use for the current transaction.
+ *
+ * - {@link TransactionMode#READ_ONLY_TRANSACTION} will create a read-only transaction and
+ * prevent any changes to written to the database through this transaction. The read
+ * timestamp to be used will be determined based on the current readOnlyStaleness
+ * setting of this connection. It is recommended to use {@link
+ * TransactionMode#READ_ONLY_TRANSACTION} instead of {@link
+ * TransactionMode#READ_WRITE_TRANSACTION} when possible, as read-only transactions do
+ * not acquire locks on Cloud Spanner, and read-only transactions never abort.
+ *
- {@link TransactionMode#READ_WRITE_TRANSACTION} this value is only allowed when the
+ * connection is not in read-only mode and will create a read-write transaction. If
+ * {@link CloudSpannerJdbcConnection#isRetryAbortsInternally()} is
true,
+ * each read/write transaction will keep track of a running SHA256 checksum for each
+ * {@link ResultSet} that is returned in order to be able to retry the transaction in
+ * case the transaction is aborted by Spanner.
+ *
+ */
+ void setTransactionMode(TransactionMode transactionMode) throws SQLException;
+
+ /**
+ * @return the transaction mode of the current transaction. This method may only be called when
+ * the connection is in a transaction.
+ */
+ TransactionMode getTransactionMode() throws SQLException;
+
+ /**
+ * Sets the mode for executing DML statements in autocommit mode for this connection. This setting
+ * is only used when the connection is in autocommit mode, and may only be set while the
+ * transaction is in autocommit mode and not in a temporary transaction. The autocommit
+ * transaction mode is reset to its default value of {@link AutocommitDmlMode#TRANSACTIONAL} when
+ * autocommit mode is changed on the connection.
+ *
+ * @param mode The DML autocommit mode to use
+ *
+ * - {@link AutocommitDmlMode#TRANSACTIONAL} DML statements are executed as single
+ * read-write transaction. After successful execution, the DML statement is guaranteed
+ * to have been applied exactly once to the database
+ *
- {@link AutocommitDmlMode#PARTITIONED_NON_ATOMIC} DML statements are executed as
+ * partitioned DML transactions. If an error occurs during the execution of the DML
+ * statement, it is possible that the statement has been applied to some but not all of
+ * the rows specified in the statement.
+ *
+ */
+ void setAutocommitDmlMode(AutocommitDmlMode mode) throws SQLException;
+
+ /**
+ * @return the current {@link AutocommitDmlMode} setting for this connection. This method may only
+ * be called on a connection that is in autocommit mode and not while in a temporary
+ * transaction.
+ */
+ AutocommitDmlMode getAutocommitDmlMode() throws SQLException;
+
+ /**
+ * Sets the staleness to use for the current read-only transaction. This method may only be called
+ * when the transaction mode of the current transaction is {@link
+ * TransactionMode#READ_ONLY_TRANSACTION} and there is no transaction that has started, or when
+ * the connection is in read-only and autocommit mode.
+ *
+ * @param staleness The staleness to use for the current but not yet started read-only transaction
+ */
+ void setReadOnlyStaleness(TimestampBound staleness) throws SQLException;
+
+ /**
+ * @return the read-only staleness setting for the current read-only transaction. This method may
+ * only be called when the current transaction is a read-only transaction, or when the
+ * connection is in read-only and autocommit mode.
+ */
+ TimestampBound getReadOnlyStaleness() throws SQLException;
+
+ /**
+ * Sets the query optimizer version to use for this connection.
+ *
+ * @param optimizerVersion The query optimizer version to use. Must be a valid optimizer version
+ * number, the string LATEST or an empty string. The empty string will instruct
+ * the connection to use the optimizer version that is defined in the environment variable
+ * SPANNER_OPTIMIZER_VERSION. If no value is specified in the environment
+ * variable, the default query optimizer of Cloud Spanner is used.
+ */
+ void setOptimizerVersion(String optimizerVersion) throws SQLException;
+
+ /**
+ * Gets the current query optimizer version of this connection.
+ *
+ * @return The query optimizer version that is currently used by this connection.
+ */
+ String getOptimizerVersion() throws SQLException;
+
+ /**
+ * @return true if this connection has a transaction (that has not necessarily
+ * started). This method will only return false when the {@link Connection} is in autocommit
+ * mode and no explicit transaction has been started by executing `BEGIN TRANSACTION`. If the
+ * {@link Connection} is not in autocommit mode, there will always be a transaction.
+ */
+ boolean isInTransaction() throws SQLException;
+
+ /**
+ * @return true if this connection has a transaction that has started. A transaction
+ * is automatically started by the first statement that is executed in the transaction.
+ */
+ boolean isTransactionStarted() throws SQLException;
+
+ /**
+ * @return the commit {@link Timestamp} of the last read/write transaction. If the last
+ * transaction was not a read/write transaction, or a read/write transaction that did not
+ * return a commit timestamp because the transaction was not committed, the method will throw
+ * a {@link SQLException}.
+ */
+ Timestamp getCommitTimestamp() throws SQLException;
+
+ /**
+ * @return the {@link CommitResponse} of the last read/write transaction. If the last transaction
+ * was not a read/write transaction, or a read/write transaction that did not return a {@link
+ * CommitResponse} because the transaction was not committed, the method will throw a {@link
+ * SQLException}. The {@link CommitResponse} will include {@link CommitStats} if {@link
+ * #isReturnCommitStats()} returns true.
+ */
+ CommitResponse getCommitResponse() throws SQLException;
+
+ /**
+ * Sets whether this connection should request commit statistics from Cloud Spanner for read/write
+ * transactions and for DML statements in autocommit mode.
+ */
+ void setReturnCommitStats(boolean returnCommitStats) throws SQLException;
+
+ /**
+ * @return true if this connection requests commit statistics from Cloud Spanner.
+ */
+ boolean isReturnCommitStats() throws SQLException;
+
+ /**
+ * @return the read {@link Timestamp} of the last read-only transaction. If the last transaction
+ * was not a read-only transaction, or a read-only transaction that did not return a read
+ * timestamp because no data was read, the method will throw a {@link SQLException}.
+ */
+ Timestamp getReadTimestamp() throws SQLException;
+
+ /**
+ * @return true if this connection will automatically retry read/write transactions
+ * that abort. This method may only be called when the connection is in read/write
+ * transactional mode and no transaction has been started yet.
+ */
+ boolean isRetryAbortsInternally() throws SQLException;
+
+ /**
+ * Sets whether this connection will internally retry read/write transactions that abort. The
+ * default is true. When internal retry is enabled, the {@link Connection} will keep
+ * track of a running SHA256 checksum of all {@link ResultSet}s that have been returned from Cloud
+ * Spanner. If the checksum that is calculated during an internal retry differs from the original
+ * checksum, the transaction will abort with an {@link
+ * AbortedDueToConcurrentModificationException}.
+ *
+ * Note that retries of a read/write transaction that calls a non-deterministic function on
+ * Cloud Spanner, such as CURRENT_TIMESTAMP(), will never be successful, as the data returned
+ * during the retry will always be different from the original transaction.
+ *
+ *
It is also highly recommended that all queries in a read/write transaction have an ORDER BY
+ * clause that guarantees that the data is returned in the same order as in the original
+ * transaction if the transaction is internally retried. The most efficient way to achieve this is
+ * to always include the primary key columns at the end of the ORDER BY clause.
+ *
+ *
This method may only be called when the connection is in read/write transactional mode and
+ * no transaction has been started yet.
+ *
+ * @param retryAbortsInternally Set to true to internally retry transactions that are
+ * aborted by Spanner. When set to false, any database call on a transaction that
+ * has been aborted by Cloud Spanner will throw an {@link AbortedException} instead of being
+ * retried. Set this to false if your application already uses retry loops to handle {@link
+ * AbortedException}s.
+ */
+ void setRetryAbortsInternally(boolean retryAbortsInternally) throws SQLException;
+
+ /** Returns the current savepoint support for this connection. */
+ SavepointSupport getSavepointSupport() throws SQLException;
+
+ /** Sets how savepoints should be supported on this connection. */
+ void setSavepointSupport(SavepointSupport savepointSupport) throws SQLException;
+
+ /**
+ * Writes the specified mutation directly to the database and commits the change. The value is
+ * readable after the successful completion of this method. Writing multiple mutations to a
+ * database by calling this method multiple times mode is inefficient, as each call will need a
+ * round trip to the database. Instead, you should consider writing the mutations together by
+ * calling {@link CloudSpannerJdbcConnection#write(Iterable)}.
+ *
+ *
Calling this method is only allowed in autocommit mode. See {@link
+ * CloudSpannerJdbcConnection#bufferedWrite(Iterable)} for writing mutations in transactions.
+ *
+ * @param mutation The {@link Mutation} to write to the database.
+ * @throws SQLException if the {@link Connection} is not in autocommit mode or if the {@link
+ * Connection} is closed.
+ */
+ void write(Mutation mutation) throws SQLException;
+
+ /**
+ * Writes the specified mutations directly to the database and commits the changes. The values are
+ * readable after the successful completion of this method.
+ *
+ *
Calling this method is only allowed in autocommit mode. See {@link
+ * CloudSpannerJdbcConnection#bufferedWrite(Iterable)} for writing mutations in transactions.
+ *
+ * @param mutations The {@link Mutation}s to write to the database.
+ * @throws SQLException if the {@link Connection} is not in autocommit mode or if the {@link
+ * Connection} is closed.
+ */
+ void write(Iterable mutations) throws SQLException;
+
+ /**
+ * Buffers the given mutation locally on the current transaction of this {@link Connection}. The
+ * mutation will be written to the database at the next call to {@link Connection#commit()}. The
+ * value will not be readable on this {@link Connection} before the transaction is committed.
+ *
+ * Calling this method is only allowed when not in autocommit mode. See {@link
+ * CloudSpannerJdbcConnection#write(Mutation)} for writing mutations in autocommit mode.
+ *
+ * @param mutation the {@link Mutation} to buffer for writing to the database on the next commit.
+ * @throws SQLException if the {@link Connection} is in autocommit mode or the {@link Connection}
+ * is closed.
+ */
+ void bufferedWrite(Mutation mutation) throws SQLException;
+
+ /**
+ * Buffers the given mutations locally on the current transaction of this {@link Connection}. The
+ * mutations will be written to the database at the next call to {@link Connection#commit()}. The
+ * values will not be readable on this {@link Connection} before the transaction is committed.
+ *
+ *
Calling this method is only allowed when not in autocommit mode. See {@link
+ * CloudSpannerJdbcConnection#write(Iterable)} for writing mutations in autocommit mode.
+ *
+ * @param mutations the {@link Mutation}s to buffer for writing to the database on the next
+ * commit.
+ * @throws SQLException if the {@link Connection} is in autocommit mode or the {@link Connection}
+ * is closed.
+ */
+ void bufferedWrite(Iterable mutations) throws SQLException;
+
+ /**
+ * @return a connection URL that can be used to create a new {@link Connection} that is equal to
+ * the initial state of this connection. If this connection was initially opened in read-only
+ * mode, and later changed to read-write, this will not be reflected in the connection URL
+ * that is returned.
+ */
+ String getConnectionUrl();
+
+ /**
+ * @return The {@link Dialect} that is used by this connection.
+ */
+ default Dialect getDialect() {
+ return Dialect.GOOGLE_STANDARD_SQL;
+ }
+
+ /**
+ * Enable data boost for partitioned queries. See also {@link
+ * CloudSpannerJdbcStatement#partitionQuery(String, PartitionOptions, QueryOption...)} and {@link
+ * CloudSpannerJdbcPreparedStatement#partitionQuery(PartitionOptions, QueryOption...)}.
+ */
+ default void setDataBoostEnabled(boolean dataBoostEnabled) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Returns whether data boost is enabled for partitioned queries. See also {@link
+ * CloudSpannerJdbcStatement#partitionQuery(String, PartitionOptions, QueryOption...)} and {@link
+ * CloudSpannerJdbcPreparedStatement#partitionQuery(PartitionOptions, QueryOption...)}.
+ */
+ default boolean isDataBoostEnabled() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets whether this connection should always use partitioned queries when a query is executed on
+ * this connection. Setting this flag to true and then executing a query that cannot
+ * be partitioned, or executing a query in a read/write transaction, will cause an error. Use this
+ * flag in combination with {@link #setDataBoostEnabled(boolean)} to force all queries on this
+ * connection to use data boost.
+ */
+ default void setAutoPartitionMode(boolean alwaysUsePartitionedQueries) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /** Returns whether this connection will execute all queries as partitioned queries. */
+ default boolean isAutoPartitionMode() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets the maximum number of partitions that should be included as a hint to Cloud Spanner when
+ * partitioning a query on this connection. Note that this is only a hint and Cloud Spanner might
+ * choose to ignore the hint.
+ */
+ default void setMaxPartitions(int maxPartitions) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Gets the maximum number of partitions that should be included as a hint to Cloud Spanner when
+ * partitioning a query on this connection. Note that this is only a hint and Cloud Spanner might
+ * choose to ignore the hint.
+ */
+ default int getMaxPartitions() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets the maximum degree of parallelism that is used when executing a partitioned query. A
+ * partitioned query will use up to maxThreads to execute and retrieve the results
+ * from Cloud Spanner. Set this value to 0> to use the number of available processors
+ * as returned by {@link Runtime#availableProcessors()}.
+ */
+ default void setMaxPartitionedParallelism(int maxThreads) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /** Returns the maximum degree of parallelism that is used for partitioned queries. */
+ default int getMaxPartitionedParallelism() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Enables or disables automatic batching of DML statements. When enabled, DML statements that are
+ * executed on this connection will be buffered in memory instead of actually being executed. The
+ * buffered DML statements are flushed to Spanner when a statement that cannot be part of a DML
+ * batch is executed on the connection. This can be a query, a DDL statement with a THEN RETURN
+ * clause, or a Commit call. The update count that is returned for DML statements that are
+ * buffered is determined by the value that has been set with {@link
+ * #setAutoBatchDmlUpdateCount(long)}. The default is 1. The connection verifies that the update
+ * counts that were returned while buffering DML statements match the actual update counts that
+ * are returned by Spanner when the batch is executed. This verification can be disabled by
+ * calling {@link #setAutoBatchDmlUpdateCountVerification(boolean)}.
+ */
+ default void setAutoBatchDml(boolean autoBatchDml) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /** Returns whether automatic DML batching is enabled on this connection. */
+ default boolean isAutoBatchDml() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets the update count that is returned for DML statements that are buffered during an automatic
+ * DML batch. This value is only used if {@link #isAutoBatchDml()} is enabled.
+ */
+ default void setAutoBatchDmlUpdateCount(long updateCount) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Returns the update count that is returned for DML statements that are buffered during an
+ * automatic DML batch.
+ */
+ default long getAutoBatchDmlUpdateCount() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets whether the update count that is returned by Spanner after executing an automatic DML
+ * batch should be verified against the update counts that were returned during the buffering of
+ * those statements.
+ */
+ default void setAutoBatchDmlUpdateCountVerification(boolean verification) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /** Indicates whether the update counts of automatic DML batches should be verified. */
+ default boolean isAutoBatchDmlUpdateCountVerification() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * @see
+ * com.google.cloud.spanner.connection.Connection#addTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener)
+ * @throws SQLException if the {@link Connection} is closed.
+ */
+ void addTransactionRetryListener(
+ com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException;
+
+ /**
+ * Use {@link
+ * #addTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener)}
+ * instead.
+ */
+ @Deprecated
+ void addTransactionRetryListener(com.google.cloud.spanner.jdbc.TransactionRetryListener listener)
+ throws SQLException;
+
+ /**
+ * @see
+ * com.google.cloud.spanner.connection.Connection#removeTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener)
+ * @throws SQLException if the {@link Connection} is closed.
+ */
+ boolean removeTransactionRetryListener(
+ com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException;
+
+ /**
+ * Use {@link
+ * #removeTransactionRetryListener(com.google.cloud.spanner.connection.TransactionRetryListener)}
+ * instead.
+ */
+ @Deprecated
+ boolean removeTransactionRetryListener(
+ com.google.cloud.spanner.jdbc.TransactionRetryListener listener) throws SQLException;
+
+ /** Use {@link #getTransactionRetryListenersFromConnection()} instead. */
+ @Deprecated
+ Iterator getTransactionRetryListeners()
+ throws SQLException;
+
+ /**
+ * @see com.google.cloud.spanner.connection.Connection#getTransactionRetryListeners()
+ * @throws SQLException if the {@link Connection} is closed.
+ */
+ Iterator
+ getTransactionRetryListenersFromConnection() throws SQLException;
+
+ /**
+ * Sets the proto descriptors to use for the next DDL statement (single or batch) that will be
+ * executed. The proto descriptor is automatically cleared after the statement is executed.
+ *
+ * @param protoDescriptors The proto descriptors to use with the next DDL statement (single or
+ * batch) that will be executed on this connection.
+ */
+ default void setProtoDescriptors(@Nonnull byte[] protoDescriptors) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Sets the proto descriptors to use for the next DDL statement (single or batch) that will be
+ * executed. The proto descriptor is automatically cleared after the statement is executed.
+ *
+ * @param protoDescriptors The proto descriptors to use with the next DDL statement (single or
+ * batch) that will be executed on this connection.
+ */
+ default void setProtoDescriptors(@Nonnull InputStream protoDescriptors)
+ throws SQLException, IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * @return The proto descriptor that will be used with the next DDL statement (single or batch)
+ * that is executed on this connection.
+ */
+ default byte[] getProtoDescriptors() throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPartitionedQueryResultSet.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPartitionedQueryResultSet.java
new file mode 100644
index 000000000000..3b6118fba906
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPartitionedQueryResultSet.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.spanner.jdbc;
+
+import com.google.cloud.spanner.Options.QueryOption;
+import com.google.cloud.spanner.PartitionOptions;
+import java.sql.ResultSet;
+
+/**
+ * Result set that is returned for partitioned queries, e.g. for 'run partitioned query select ...'
+ * or for {@link CloudSpannerJdbcPreparedStatement#runPartitionedQuery(PartitionOptions,
+ * QueryOption...)}.
+ */
+public interface CloudSpannerJdbcPartitionedQueryResultSet extends ResultSet {
+ /** Returns the number of partitions that this result set contains. */
+ int getNumPartitions();
+
+ /** Returns the degree of parallelism that this result set uses. */
+ int getParallelism();
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPreparedStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPreparedStatement.java
new file mode 100644
index 000000000000..8e72d9556b0c
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcPreparedStatement.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.cloud.spanner.Options.QueryOption;
+import com.google.cloud.spanner.PartitionOptions;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+/**
+ * This interface is implemented by {@link PreparedStatement}s that are created on Cloud Spanner
+ * JDBC connections.
+ */
+public interface CloudSpannerJdbcPreparedStatement extends PreparedStatement {
+
+ /**
+ * Partitions this query, so it can be executed in parallel. This method returns a {@link
+ * ResultSet} with a string-representation of the partitions that were created. These strings can
+ * be used to execute a partition either on this connection or an any other connection (on this
+ * host or an any other host) by calling the method {@link #runPartition()}. This method will
+ * automatically enable data boost for the query if {@link
+ * CloudSpannerJdbcConnection#isDataBoostEnabled()} returns true.
+ */
+ ResultSet partitionQuery(PartitionOptions partitionOptions, QueryOption... options)
+ throws SQLException;
+
+ /**
+ * Executes the given partition of a query. The partition that should be executed must be set as a
+ * string parameter on this {@link PreparedStatement} using {@link #setString(int, String)}. The
+ * value should be a string that was returned by {@link #partitionQuery(PartitionOptions,
+ * QueryOption...)}.
+ */
+ ResultSet runPartition() throws SQLException;
+
+ /**
+ * Executes the given query as a partitioned query. The query will first be partitioned using the
+ * {@link #partitionQuery(PartitionOptions, QueryOption...)} method. Each of the partitions will
+ * then be executed in the background, and the results will be merged into a single result set.
+ *
+ * This method will use {@link CloudSpannerJdbcConnection#getMaxPartitionedParallelism()}
+ * threads to execute the partitioned query. Set this variable to a higher/lower value to
+ * increase/decrease the degree of parallelism used for execution.
+ */
+ CloudSpannerJdbcPartitionedQueryResultSet runPartitionedQuery(
+ PartitionOptions partitionOptions, QueryOption... options) throws SQLException;
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcStatement.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcStatement.java
new file mode 100644
index 000000000000..6115c0b23812
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/CloudSpannerJdbcStatement.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.cloud.spanner.Options.QueryOption;
+import com.google.cloud.spanner.PartitionOptions;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+/**
+ * This interface is implemented by {@link Statement}s that are created on Cloud Spanner JDBC
+ * connections.
+ */
+public interface CloudSpannerJdbcStatement extends Statement {
+
+ /**
+ * Partitions the given query, so it can be executed in parallel. This method returns a {@link
+ * ResultSet} with a string-representation of the partitions that were created. These strings can
+ * be used to execute a partition either on this connection or an any other connection (on this
+ * host or an any other host) by calling the method {@link #runPartition(String)}. This method
+ * will automatically enable data boost for the query if {@link
+ * CloudSpannerJdbcConnection#isDataBoostEnabled()} returns true.
+ */
+ ResultSet partitionQuery(String query, PartitionOptions partitionOptions, QueryOption... options)
+ throws SQLException;
+
+ /**
+ * Executes the given partition of a query. The encodedPartitionId should be a string that was
+ * returned by {@link #partitionQuery(String, PartitionOptions, QueryOption...)}.
+ */
+ ResultSet runPartition(String encodedPartitionId) throws SQLException;
+
+ /**
+ * Executes the given query as a partitioned query. The query will first be partitioned using the
+ * {@link #partitionQuery(String, PartitionOptions, QueryOption...)} method. Each of the
+ * partitions will then be executed in the background, and the results will be merged into a
+ * single result set.
+ *
+ *
This method will use {@link CloudSpannerJdbcConnection#getMaxPartitionedParallelism()}
+ * threads to execute the partitioned query. Set this variable to a higher/lower value to
+ * increase/decrease the degree of parallelism used for execution.
+ */
+ CloudSpannerJdbcPartitionedQueryResultSet runPartitionedQuery(
+ String query, PartitionOptions partitionOptions, QueryOption... options) throws SQLException;
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesHelper.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesHelper.java
new file mode 100644
index 000000000000..4f8db86b0bfb
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/ConnectionPropertiesHelper.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.cloud.spanner.connection.ConnectionProperties;
+import com.google.cloud.spanner.connection.ConnectionProperty;
+import com.google.common.collect.ImmutableList;
+import java.sql.DriverPropertyInfo;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Objects;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+class ConnectionPropertiesHelper {
+ static ImmutableList> VALID_CONNECTION_PROPERTIES =
+ ImmutableList.copyOf(
+ ConnectionProperties.VALID_CONNECTION_PROPERTIES.stream()
+ .sorted(Comparator.comparing(ConnectionProperty::getName))
+ .collect(Collectors.toList()));
+
+ static DriverPropertyInfo toDriverPropertyInfo(
+ String connectionUri, ConnectionProperty> connectionProperty) {
+ DriverPropertyInfo result =
+ new DriverPropertyInfo(
+ connectionProperty.getName(),
+ parseUriProperty(
+ connectionUri,
+ connectionProperty.getName(),
+ connectionProperty.getDefaultValue() == null
+ ? null
+ : connectionProperty.getDefaultValue().toString()));
+ result.description = connectionProperty.getDescription();
+ result.choices =
+ connectionProperty.getValidValues() == null
+ ? null
+ : Arrays.stream(connectionProperty.getValidValues())
+ .map(Objects::toString)
+ .toArray(String[]::new);
+ return result;
+ }
+
+ static String getConnectionPropertyName(ConnectionProperty> connectionProperty) {
+ return connectionProperty.getName();
+ }
+
+ private static String parseUriProperty(String uri, String property, String defaultValue) {
+ Pattern pattern = Pattern.compile(String.format("(?is)(?:;|\\?)%s=(.*?)(?:;|$)", property));
+ Matcher matcher = pattern.matcher(uri);
+ if (matcher.find() && matcher.groupCount() == 1) {
+ return matcher.group(1);
+ }
+ return defaultValue;
+ }
+
+ private ConnectionPropertiesHelper() {}
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/IsolationLevelConverter.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/IsolationLevelConverter.java
new file mode 100644
index 000000000000..cabf08057734
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/IsolationLevelConverter.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.spanner.v1.TransactionOptions.IsolationLevel;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+
+class IsolationLevelConverter {
+ static IsolationLevel convertToSpanner(int jdbcIsolationLevel) throws SQLException {
+ switch (jdbcIsolationLevel) {
+ case Connection.TRANSACTION_SERIALIZABLE:
+ return IsolationLevel.SERIALIZABLE;
+ case Connection.TRANSACTION_REPEATABLE_READ:
+ return IsolationLevel.REPEATABLE_READ;
+ case Connection.TRANSACTION_READ_COMMITTED:
+ case Connection.TRANSACTION_READ_UNCOMMITTED:
+ case Connection.TRANSACTION_NONE:
+ throw new SQLFeatureNotSupportedException(
+ "Unsupported JDBC isolation level: " + jdbcIsolationLevel);
+ default:
+ throw new IllegalArgumentException("Invalid JDBC isolation level: " + jdbcIsolationLevel);
+ }
+ }
+
+ static int convertToJdbc(IsolationLevel isolationLevel) {
+ switch (isolationLevel) {
+ // Translate UNSPECIFIED to SERIALIZABLE as that is the default isolation level.
+ case ISOLATION_LEVEL_UNSPECIFIED:
+ case SERIALIZABLE:
+ return Connection.TRANSACTION_SERIALIZABLE;
+ case REPEATABLE_READ:
+ return Connection.TRANSACTION_REPEATABLE_READ;
+ default:
+ throw new IllegalArgumentException(
+ "Unknown or unsupported isolation level: " + isolationLevel);
+ }
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcArray.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcArray.java
new file mode 100644
index 000000000000..3cb523186517
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcArray.java
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.cloud.ByteArray;
+import com.google.cloud.spanner.ResultSets;
+import com.google.cloud.spanner.Struct;
+import com.google.cloud.spanner.Type;
+import com.google.cloud.spanner.Type.StructField;
+import com.google.cloud.spanner.Value;
+import com.google.cloud.spanner.ValueBinder;
+import com.google.common.collect.ImmutableList;
+import com.google.protobuf.AbstractMessage;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Descriptors.Descriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.ProtocolMessageEnum;
+import com.google.rpc.Code;
+import java.math.BigDecimal;
+import java.sql.Array;
+import java.sql.Date;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.sql.Timestamp;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+/** Implementation of java.sql.Array for Google Cloud Spanner */
+class JdbcArray implements Array {
+ private static final String FREE_EXCEPTION =
+ "free() has been called, array is no longer available";
+
+ private final JdbcDataType type;
+ private Object data;
+ private boolean freed = false;
+
+ /**
+ * Create a JDBC {@link Array} from the given type name and array elements.
+ *
+ * @param typeName The Google Cloud Spanner type name to be used as the base type of the array.
+ * @param elements The elements to store in the array.
+ * @return the initialized {@link Array}.
+ * @throws SQLException if the type name is not a valid Cloud Spanner type or if the contents of
+ * the elements array is not compatible with the base type of the array.
+ */
+ static JdbcArray createArray(String typeName, Object[] elements) throws SQLException {
+ if (typeName != null) {
+ for (JdbcDataType type : JdbcDataType.values()) {
+ if (type.matches(typeName)) {
+ return new JdbcArray(type, elements);
+ }
+ }
+ }
+ throw JdbcSqlExceptionFactory.of(
+ "Data type " + typeName + " is unknown", Code.INVALID_ARGUMENT);
+ }
+
+ /**
+ * Create a JDBC {@link Array} from the given type name and list.
+ *
+ * @param type The Google Cloud Spanner type to be used as the base type of the array.
+ * @param elements The elements to store in the array.
+ * @return the initialized {@link Array}.
+ */
+ static JdbcArray createArray(JdbcDataType type, List> elements) {
+ return new JdbcArray(type, elements);
+ }
+
+ private JdbcArray(JdbcDataType type, Object[] elements) throws SQLException {
+ this.type = type;
+ if (elements != null) {
+ if ((type.getCode() == Type.Code.PROTO
+ && AbstractMessage[].class.isAssignableFrom(elements.getClass()))
+ || (type.getCode() == Type.Code.ENUM
+ && ProtocolMessageEnum[].class.isAssignableFrom(elements.getClass()))) {
+ this.data =
+ java.lang.reflect.Array.newInstance(
+ elements.getClass().getComponentType(), elements.length);
+ System.arraycopy(elements, 0, this.data, 0, elements.length);
+ } else if (type == JdbcDataType.INT64 && requiresWideningToLong(elements)) {
+ // Convert Byte[], Short[], and Integer[] to Long[] for INT64 type
+ // since Spanner only supports ARRAY
+ this.data = convertToLongArray(elements);
+ } else {
+ this.data = java.lang.reflect.Array.newInstance(type.getJavaClass(), elements.length);
+ try {
+ System.arraycopy(elements, 0, this.data, 0, elements.length);
+ } catch (Exception e) {
+ throw JdbcSqlExceptionFactory.of(
+ "Could not copy array elements. Make sure the supplied array only contains elements of class "
+ + type.getJavaClass().getName(),
+ Code.UNKNOWN,
+ e);
+ }
+ }
+ }
+ }
+
+ private static boolean requiresWideningToLong(Object[] elements) {
+ Class> componentType = elements.getClass().getComponentType();
+ return componentType == Byte.class
+ || componentType == Short.class
+ || componentType == Integer.class;
+ }
+
+ private static Long[] convertToLongArray(Object[] elements) {
+ Long[] longElements = new Long[elements.length];
+ for (int i = 0; i < elements.length; i++) {
+ if (elements[i] != null) {
+ longElements[i] = ((Number) elements[i]).longValue();
+ }
+ }
+ return longElements;
+ }
+
+ private JdbcArray(JdbcDataType type, List> elements) {
+ this.type = type;
+ if (elements != null) {
+ this.data = java.lang.reflect.Array.newInstance(type.getJavaClass(), elements.size());
+ elements.toArray((Object[]) data);
+ }
+ }
+
+ private void checkFree() throws SQLException {
+ if (freed) {
+ throw JdbcSqlExceptionFactory.of(FREE_EXCEPTION, Code.FAILED_PRECONDITION);
+ }
+ }
+
+ @Override
+ public String getBaseTypeName() throws SQLException {
+ checkFree();
+ return type.getTypeName();
+ }
+
+ @Override
+ public int getBaseType() throws SQLException {
+ checkFree();
+ return type.getSqlType();
+ }
+
+ @Override
+ public Object getArray() throws SQLException {
+ checkFree();
+ return data;
+ }
+
+ @Override
+ public Object getArray(Map> map) throws SQLException {
+ checkFree();
+ return data;
+ }
+
+ @Override
+ public Object getArray(long index, int count) throws SQLException {
+ checkFree();
+ return getArray(index, count, null);
+ }
+
+ @Override
+ public Object getArray(long index, int count, Map> map) throws SQLException {
+ checkFree();
+ if (this.data != null) {
+ Object res;
+ if ((this.type.getCode() == Type.Code.PROTO
+ && AbstractMessage[].class.isAssignableFrom(this.data.getClass()))
+ || (this.type.getCode() == Type.Code.ENUM
+ && ProtocolMessageEnum[].class.isAssignableFrom(this.data.getClass()))) {
+ res = java.lang.reflect.Array.newInstance(this.data.getClass().getComponentType(), count);
+ } else {
+ res = java.lang.reflect.Array.newInstance(this.type.getJavaClass(), count);
+ }
+ System.arraycopy(this.data, (int) index - 1, res, 0, count);
+ return res;
+ }
+ return null;
+ }
+
+ private static final String RESULTSET_WITH_TYPE_MAPPING_NOT_SUPPORTED =
+ "Getting a ResultSet with a custom type mapping from an array is not supported";
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return getResultSet(1L, Integer.MAX_VALUE);
+ }
+
+ @Override
+ public ResultSet getResultSet(Map> map) throws SQLException {
+ throw new SQLFeatureNotSupportedException(RESULTSET_WITH_TYPE_MAPPING_NOT_SUPPORTED);
+ }
+
+ @Override
+ public ResultSet getResultSet(long startIndex, int count) throws SQLException {
+ JdbcPreconditions.checkArgument(
+ startIndex + count - 1L <= Integer.MAX_VALUE,
+ String.format("End index cannot exceed %d", Integer.MAX_VALUE));
+ JdbcPreconditions.checkArgument(startIndex >= 1L, "Start index must be >= 1");
+ JdbcPreconditions.checkArgument(count >= 0, "Count must be >= 0");
+ checkFree();
+ Type spannerTypeForProto = getSpannerTypeForProto();
+ Type spannerType =
+ spannerTypeForProto == null ? this.type.getSpannerType() : spannerTypeForProto;
+
+ ImmutableList.Builder rows = ImmutableList.builder();
+ int added = 0;
+ if (this.data != null) {
+ // Note that array index in JDBC is base-one.
+ for (int index = (int) startIndex;
+ added < count && index <= ((Object[]) this.data).length;
+ index++) {
+ Object value = ((Object[]) this.data)[index - 1];
+ ValueBinder binder =
+ Struct.newBuilder().set("INDEX").to(index).set("VALUE");
+ Struct.Builder builder;
+ switch (this.type.getCode()) {
+ case BOOL:
+ builder = binder.to((Boolean) value);
+ break;
+ case BYTES:
+ builder = binder.to(ByteArray.copyFrom((byte[]) value));
+ break;
+ case PROTO:
+ if (value == null && AbstractMessage[].class.isAssignableFrom(this.data.getClass())) {
+ builder = binder.to((ByteArray) null, spannerType.getProtoTypeFqn());
+ } else if (value instanceof AbstractMessage) {
+ builder = binder.to((AbstractMessage) value);
+ } else {
+ builder = binder.to(value != null ? ByteArray.copyFrom((byte[]) value) : null);
+ }
+ break;
+ case DATE:
+ builder = binder.to(JdbcTypeConverter.toGoogleDate((Date) value));
+ break;
+ case FLOAT32:
+ builder = binder.to((Float) value);
+ break;
+ case FLOAT64:
+ builder = binder.to((Double) value);
+ break;
+ case INT64:
+ builder = binder.to((Long) value);
+ break;
+ case ENUM:
+ if (value == null
+ && ProtocolMessageEnum[].class.isAssignableFrom(this.data.getClass())) {
+ builder = binder.to((Long) null, spannerType.getProtoTypeFqn());
+ } else if (value instanceof ProtocolMessageEnum) {
+ builder = binder.to((ProtocolMessageEnum) value);
+ } else {
+ builder = binder.to((Long) value);
+ }
+ break;
+ case NUMERIC:
+ builder = binder.to((BigDecimal) value);
+ break;
+ case STRING:
+ builder = binder.to((String) value);
+ break;
+ case JSON:
+ builder = binder.to(Value.json((String) value));
+ break;
+ case PG_JSONB:
+ builder = binder.to(Value.pgJsonb((String) value));
+ break;
+ case TIMESTAMP:
+ builder = binder.to(JdbcTypeConverter.toGoogleTimestamp((Timestamp) value));
+ break;
+ case ARRAY:
+ case STRUCT:
+ default:
+ throw new SQLFeatureNotSupportedException(
+ String.format(
+ "Array of type %s cannot be converted to a ResultSet",
+ this.type.getCode().name()));
+ }
+ rows.add(builder.build());
+ added++;
+ if (added == count) {
+ break;
+ }
+ }
+ }
+
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("INDEX", Type.int64()), StructField.of("VALUE", spannerType)),
+ rows.build()));
+ }
+
+ // Returns null if the type is not a PROTO or ENUM
+ private Type getSpannerTypeForProto() throws SQLException {
+ Type spannerType = null;
+ if (this.data != null) {
+ if (this.type.getCode() == Type.Code.PROTO
+ && AbstractMessage[].class.isAssignableFrom(this.data.getClass())) {
+ spannerType = createSpannerProtoType();
+ } else if (this.type.getCode() == Type.Code.ENUM
+ && ProtocolMessageEnum[].class.isAssignableFrom(this.data.getClass())) {
+ spannerType = createSpannerProtoEnumType();
+ }
+ }
+ return spannerType;
+ }
+
+ private Type createSpannerProtoType() throws SQLException {
+ Class> componentType = this.data.getClass().getComponentType();
+ try {
+ Message.Builder builder =
+ (Message.Builder) componentType.getMethod("newBuilder").invoke(null);
+ Descriptor msgDescriptor = builder.getDescriptorForType();
+ return Type.proto(msgDescriptor.getFullName());
+ } catch (Exception e) {
+ throw JdbcSqlExceptionFactory.of(
+ "Error occurred when getting proto message descriptor from data", Code.UNKNOWN, e);
+ }
+ }
+
+ private Type createSpannerProtoEnumType() throws SQLException {
+ Class> componentType = this.data.getClass().getComponentType();
+ try {
+ Descriptors.EnumDescriptor enumDescriptor =
+ (Descriptors.EnumDescriptor) componentType.getMethod("getDescriptor").invoke(null);
+ return Type.protoEnum(enumDescriptor.getFullName());
+ } catch (Exception e) {
+ throw JdbcSqlExceptionFactory.of(
+ "Error occurred when getting proto enum descriptor from data", Code.UNKNOWN, e);
+ }
+ }
+
+ @Override
+ public ResultSet getResultSet(long index, int count, Map> map)
+ throws SQLException {
+ throw new SQLFeatureNotSupportedException(RESULTSET_WITH_TYPE_MAPPING_NOT_SUPPORTED);
+ }
+
+ @Override
+ public void free() {
+ this.freed = true;
+ this.data = null;
+ }
+
+ @Override
+ public String toString() {
+ if (data == null) {
+ return "null";
+ }
+ boolean first = true;
+ StringBuilder builder = new StringBuilder("{");
+ for (Object o : (Object[]) data) {
+ if (!first) {
+ builder.append(",");
+ }
+ first = false;
+ if (o == null) {
+ builder.append("null");
+ } else {
+ builder.append(o);
+ }
+ }
+ builder.append("}");
+ return builder.toString();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof JdbcArray)) return false;
+ JdbcArray array = (JdbcArray) other;
+ return this.type == array.type
+ && Arrays.deepEquals((Object[]) this.data, (Object[]) array.data);
+ }
+
+ @Override
+ public int hashCode() {
+ return this.type.hashCode() ^ Arrays.deepHashCode((Object[]) data);
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcBlob.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcBlob.java
new file mode 100644
index 000000000000..5a1566f7e4f9
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcBlob.java
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.common.base.Preconditions;
+import com.google.rpc.Code;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.sql.Blob;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Simple {@link Blob} implementation for Google Cloud Spanner. The value is mapped to a byte array
+ * in memory. The {@link Blob} data type can be used in combination with the BYTES Cloud Spanner
+ * data type.
+ */
+class JdbcBlob implements Blob {
+ private byte[] value = new byte[0];
+
+ /** Creates an empty blob. */
+ JdbcBlob() {}
+
+ /** Creates a blob with the specified bytes as its value. */
+ JdbcBlob(byte[] value) {
+ this.value = value;
+ }
+
+ private void checkPosition(long pos) {
+ Preconditions.checkArgument(
+ pos + 1 <= Integer.MAX_VALUE,
+ "position larger than " + Integer.MAX_VALUE + " is not supported");
+ }
+
+ private void checkLength(long length) {
+ Preconditions.checkArgument(
+ length <= Integer.MAX_VALUE,
+ "length larger than " + Integer.MAX_VALUE + " is not supported");
+ }
+
+ private void checkPositionPlusLength(long pos, long length) {
+ Preconditions.checkArgument(
+ pos + 1 + length <= Integer.MAX_VALUE,
+ "position+length larger than " + Integer.MAX_VALUE + " is not supported");
+ }
+
+ @Override
+ public long length() {
+ return value.length;
+ }
+
+ @Override
+ public byte[] getBytes(long pos, int length) throws SQLException {
+ JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1");
+ JdbcPreconditions.checkArgument(length >= 0, "length must be >= 0");
+ checkPosition(pos);
+ checkPositionPlusLength(pos, length);
+ int end = (int) pos + length - 1;
+ int blobLength = (int) length();
+ if (end > blobLength) {
+ length = blobLength - (int) pos + 1;
+ }
+ byte[] res = new byte[length];
+ System.arraycopy(value, (int) pos - 1, res, 0, length);
+ return res;
+ }
+
+ @Override
+ public InputStream getBinaryStream() {
+ return new ByteArrayInputStream(value);
+ }
+
+ @Override
+ public long position(byte[] pattern, long start) throws SQLException {
+ JdbcPreconditions.checkArgument(
+ pattern != null && pattern.length > 0, "pattern must not be empty");
+ JdbcPreconditions.checkArgument(start > 0L, "start must be >= 1");
+ checkPosition(start);
+ for (int outerIndex = (int) start - 1; outerIndex < value.length; outerIndex++) {
+ int innerIndex = 0;
+ int valueIndex = outerIndex;
+ while (valueIndex < value.length
+ && innerIndex < pattern.length
+ && value[valueIndex] == pattern[innerIndex]) {
+ innerIndex++;
+ valueIndex++;
+ }
+ if (innerIndex == pattern.length) {
+ return outerIndex + 1;
+ }
+ }
+ return -1;
+ }
+
+ @Override
+ public long position(Blob pattern, long start) throws SQLException {
+ JdbcPreconditions.checkArgument(pattern != null, "pattern must not be empty");
+ JdbcPreconditions.checkArgument(start > 0L, "start must be >= 1");
+ checkPosition(start);
+ byte[] buffer = new byte[1024];
+ int totalSize = 0;
+ List totalBytes = new ArrayList<>();
+ try (InputStream is = pattern.getBinaryStream()) {
+ int bytesRead;
+ while ((bytesRead = is.read(buffer)) > -1) {
+ if (bytesRead == buffer.length) {
+ totalBytes.add(buffer);
+ } else {
+ byte[] dest = new byte[bytesRead];
+ System.arraycopy(buffer, 0, dest, 0, bytesRead);
+ totalBytes.add(dest);
+ }
+ totalSize += bytesRead;
+ buffer = new byte[1024];
+ }
+ } catch (IOException e) {
+ throw JdbcSqlExceptionFactory.of("reading blob failed", Code.UNKNOWN, e);
+ }
+ byte[] bytePattern = new byte[totalSize];
+ int index = 0;
+ for (byte[] b : totalBytes) {
+ System.arraycopy(b, 0, bytePattern, index, b.length);
+ index += b.length;
+ }
+ return position(bytePattern, start);
+ }
+
+ private void setLength(int length) {
+ int prevLength = value.length;
+ byte[] newValue = new byte[length];
+ System.arraycopy(value, 0, newValue, 0, Math.min(prevLength, newValue.length));
+ value = newValue;
+ }
+
+ @Override
+ public int setBytes(long pos, byte[] bytes) throws SQLException {
+ JdbcPreconditions.checkArgument(bytes != null, "bytes must be non-null");
+ JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1");
+ checkPosition(pos);
+ int end = (int) pos + bytes.length - 1;
+ if (end >= value.length) {
+ setLength(end);
+ }
+ System.arraycopy(bytes, 0, value, (int) pos - 1, bytes.length);
+ return bytes.length;
+ }
+
+ @Override
+ public int setBytes(long pos, byte[] bytes, int offset, int len) throws SQLException {
+ JdbcPreconditions.checkArgument(bytes != null, "bytes must be non-null");
+ JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1");
+ JdbcPreconditions.checkArgument(offset >= 0, "offset must be >= 0");
+ JdbcPreconditions.checkArgument(len >= 0, "len must be >= 0");
+ checkPosition(pos);
+ if (offset > bytes.length) {
+ offset = 0;
+ len = 0;
+ } else {
+ if (offset + len > bytes.length) {
+ len = bytes.length - offset;
+ }
+ }
+ int end = (int) pos + len - 1;
+ if (end >= value.length) {
+ setLength(end);
+ }
+ System.arraycopy(bytes, offset, value, (int) pos - 1, len);
+ return len;
+ }
+
+ private final class BlobOutputStream extends OutputStream {
+ private byte[] buffer = new byte[1024];
+ private int currentPos = 0;
+ private int blobPosition;
+
+ private BlobOutputStream(int pos) {
+ blobPosition = pos;
+ }
+
+ @Override
+ public void write(int b) {
+ if (currentPos >= buffer.length) {
+ byte[] newBuffer = new byte[buffer.length * 2];
+ System.arraycopy(buffer, 0, newBuffer, 0, buffer.length);
+ buffer = newBuffer;
+ }
+ buffer[currentPos] = (byte) b;
+ currentPos++;
+ }
+
+ @Override
+ public void flush() throws IOException {
+ try {
+ setBytes(blobPosition, buffer, 0, currentPos);
+ blobPosition += currentPos;
+ currentPos = 0;
+ Arrays.fill(buffer, (byte) 0);
+ } catch (SQLException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ flush();
+ }
+ }
+
+ @Override
+ public OutputStream setBinaryStream(long pos) throws SQLException {
+ JdbcPreconditions.checkArgument(pos > 0L, "pos must be >= 1");
+ checkPosition(pos);
+ return new BlobOutputStream((int) pos);
+ }
+
+ @Override
+ public void truncate(long len) throws SQLException {
+ JdbcPreconditions.checkArgument(len >= 0, "len must be >= 0");
+ checkLength(len);
+ setLength((int) len);
+ }
+
+ @Override
+ public void free() {
+ setLength(0);
+ }
+
+ @Override
+ public InputStream getBinaryStream(long pos, long length) throws SQLException {
+ JdbcPreconditions.checkArgument(pos > 0, "pos must be >= 1");
+ JdbcPreconditions.checkArgument(length >= 0, "length must be >= 0");
+ checkPosition(pos);
+ checkPositionPlusLength(pos, length);
+ if (pos + length > value.length) {
+ length = value.length - pos + 1;
+ }
+ byte[] buffer = new byte[(int) length];
+ System.arraycopy(value, (int) pos - 1, buffer, 0, (int) length);
+ return new ByteArrayInputStream(buffer);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof JdbcBlob)) return false;
+ JdbcBlob blob = (JdbcBlob) other;
+ return Arrays.equals(this.value, blob.value);
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(value);
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcClob.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcClob.java
new file mode 100644
index 000000000000..2939dee1afb9
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcClob.java
@@ -0,0 +1,211 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.io.Writer;
+import java.nio.charset.StandardCharsets;
+import java.sql.Clob;
+import java.sql.NClob;
+import java.sql.SQLException;
+
+/**
+ * Simple implementation of {@link Clob} and {@link NClob} for Google Cloud Spanner. The value is
+ * mapped to a {@link StringBuilder} in memory. {@link Clob} and {@link NClob} can be used with the
+ * STRING data type of Cloud Spanner.
+ */
+class JdbcClob implements NClob {
+ private StringBuilder value = new StringBuilder();
+
+ JdbcClob() {}
+
+ JdbcClob(String value) {
+ this.value.append(value);
+ }
+
+ private void checkPosition(long pos) {
+ Preconditions.checkArgument(
+ pos + 1 <= Integer.MAX_VALUE,
+ "position larger than " + Integer.MAX_VALUE + " is not supported");
+ }
+
+ private void checkLength(long length) {
+ Preconditions.checkArgument(
+ length <= Integer.MAX_VALUE,
+ "length larger than " + Integer.MAX_VALUE + " is not supported");
+ }
+
+ private void checkPositionPlusLength(long pos, long length) {
+ Preconditions.checkArgument(
+ pos + 1 + length <= Integer.MAX_VALUE,
+ "position+length larger than " + Integer.MAX_VALUE + " is not supported");
+ }
+
+ @Override
+ public long length() {
+ return value.length();
+ }
+
+ @Override
+ public String getSubString(long pos, int length) throws SQLException {
+ JdbcPreconditions.checkArgument(pos >= 1, "Start position must be >= 1");
+ JdbcPreconditions.checkArgument(length >= 0, "Length must be >= 0");
+ checkPosition(pos);
+ checkPositionPlusLength(pos, length);
+ if (pos > length()) {
+ return "";
+ }
+ int end = (int) pos + length - 1;
+ if (end >= value.length()) {
+ end = value.length();
+ }
+ return value.substring((int) pos - 1, end);
+ }
+
+ @Override
+ public Reader getCharacterStream() {
+ return new StringReader(value.toString());
+ }
+
+ @Override
+ public InputStream getAsciiStream() {
+ return new ByteArrayInputStream(StandardCharsets.US_ASCII.encode(value.toString()).array());
+ }
+
+ @Override
+ public long position(String searchStr, long start) throws SQLException {
+ JdbcPreconditions.checkArgument(start >= 1, "Start position must be >= 1");
+ JdbcPreconditions.checkArgument(searchStr != null, "searchStr may not be null");
+ checkPosition(start);
+ int res = value.indexOf(searchStr, (int) start - 1);
+ if (res == -1) {
+ return res;
+ }
+ return res + 1;
+ }
+
+ @Override
+ public long position(Clob searchStr, long start) throws SQLException {
+ JdbcPreconditions.checkArgument(start >= 1, "Start position must be >= 1");
+ JdbcPreconditions.checkArgument(searchStr != null, "searchStr may not be null");
+ checkPosition(start);
+ checkPositionPlusLength(start, searchStr.length());
+ int res = value.indexOf(searchStr.getSubString(1L, (int) searchStr.length()), (int) start - 1);
+ if (res == -1) {
+ return res;
+ }
+ return res + 1;
+ }
+
+ @Override
+ public int setString(long pos, String str) throws SQLException {
+ JdbcPreconditions.checkArgument(str != null, "str is null");
+ JdbcPreconditions.checkArgument(pos >= 1, "Position must be >= 1");
+ checkPosition(pos);
+ int ipos = (int) pos;
+ if ((ipos - 1) > value.length()) {
+ value.append(Strings.repeat(" ", ipos - value.length() - 1));
+ }
+ value.replace(ipos - 1, ipos + str.length() - 1, str);
+ return str.length();
+ }
+
+ @Override
+ public int setString(long pos, String str, int offset, int len) throws SQLException {
+ JdbcPreconditions.checkArgument(str != null, "str is null");
+ JdbcPreconditions.checkArgument(pos >= 1, "Position must be >= 1");
+ JdbcPreconditions.checkArgument(offset >= 1, "Offset must be >= 1");
+ JdbcPreconditions.checkArgument(
+ offset + len - 1 <= str.length(), "offset + len is greater than str.length()");
+ checkPosition(pos);
+ return setString(pos, str.substring(offset - 1, offset + len - 1));
+ }
+
+ private class ClobWriter extends StringWriter {
+ private final long startPos;
+
+ private ClobWriter(long startPos) {
+ this.startPos = startPos;
+ }
+
+ @Override
+ public void flush() {
+ try {
+ setString(startPos, getBuffer().toString());
+ } catch (SQLException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void close() {
+ flush();
+ }
+ }
+
+ @Override
+ public OutputStream setAsciiStream(long pos) throws SQLException {
+ throw JdbcSqlExceptionFactory.unsupported(
+ "setAsciiStream is not supported. Use setCharacterStream instead");
+ }
+
+ @Override
+ public Writer setCharacterStream(long pos) throws SQLException {
+ JdbcPreconditions.checkArgument(pos >= 1, "pos must be >= 1");
+ return new ClobWriter(pos);
+ }
+
+ @Override
+ public void truncate(long len) throws SQLException {
+ JdbcPreconditions.checkArgument(len >= 0, "len must be >= 0");
+ checkLength(len);
+ value.setLength((int) len);
+ }
+
+ @Override
+ public void free() {
+ value = new StringBuilder();
+ }
+
+ @Override
+ public Reader getCharacterStream(long pos, long length) throws SQLException {
+ JdbcPreconditions.checkArgument(pos >= 1, "pos must be >= 1");
+ JdbcPreconditions.checkArgument(length >= 0, "length must be >= 0");
+ checkPosition(pos);
+ checkPositionPlusLength(pos, length);
+ return new StringReader(value.substring((int) pos - 1, (int) pos + (int) length - 1));
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof JdbcClob)) return false;
+ JdbcClob blob = (JdbcClob) other;
+ return value.toString().equals(blob.value.toString());
+ }
+
+ @Override
+ public int hashCode() {
+ return value.toString().hashCode();
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConnection.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConnection.java
new file mode 100644
index 000000000000..8041e8d68e3a
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConnection.java
@@ -0,0 +1,933 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import static com.google.cloud.spanner.jdbc.JdbcStatement.ALL_COLUMNS;
+import static com.google.cloud.spanner.jdbc.JdbcStatement.isNullOrEmpty;
+
+import com.google.api.client.util.Preconditions;
+import com.google.cloud.ByteArray;
+import com.google.cloud.spanner.CommitResponse;
+import com.google.cloud.spanner.DatabaseId;
+import com.google.cloud.spanner.Mutation;
+import com.google.cloud.spanner.SpannerException;
+import com.google.cloud.spanner.TimestampBound;
+import com.google.cloud.spanner.connection.AutocommitDmlMode;
+import com.google.cloud.spanner.connection.Connection;
+import com.google.cloud.spanner.connection.ConnectionOptions;
+import com.google.cloud.spanner.connection.ConnectionProperties;
+import com.google.cloud.spanner.connection.SavepointSupport;
+import com.google.cloud.spanner.connection.TransactionMode;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterators;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.common.AttributesBuilder;
+import java.io.IOException;
+import java.io.InputStream;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.Clob;
+import java.sql.DatabaseMetaData;
+import java.sql.NClob;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Savepoint;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.UUID;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import javax.annotation.Nonnull;
+
+/** Jdbc Connection class for Google Cloud Spanner */
+class JdbcConnection extends AbstractJdbcConnection {
+ private static final String ONLY_RS_FORWARD_ONLY =
+ "Only result sets of type TYPE_FORWARD_ONLY are supported";
+ private static final String ONLY_CONCUR_READ_ONLY =
+ "Only result sets with concurrency CONCUR_READ_ONLY are supported";
+ private static final String ONLY_CLOSE_CURSORS_AT_COMMIT =
+ "Only result sets with holdability CLOSE_CURSORS_AT_COMMIT are supported";
+
+ /**
+ * This query is used to check the aliveness of the connection if legacy alive check has been
+ * enabled. As Cloud Spanner JDBC connections do not maintain a physical or logical connection to
+ * Cloud Spanner, there is also no point in repeatedly executing a simple query to check whether a
+ * connection is alive. Instead, we rely on the result from the initial query to Spanner that
+ * determines the dialect to determine whether the connection is alive or not. This result is
+ * cached for all JDBC connections using the same {@link com.google.cloud.spanner.Spanner}
+ * instance.
+ *
+ * The legacy {@link #isValid(int)} check using a SELECT 1 statement can be enabled by setting
+ * the System property spanner.jdbc.use_legacy_is_valid_check to true or setting the environment
+ * variable SPANNER_JDBC_USE_LEGACY_IS_VALID_CHECK to true.
+ */
+ static final String LEGACY_IS_VALID_QUERY = "SELECT 1";
+
+ static final ImmutableList NO_GENERATED_KEY_COLUMNS = ImmutableList.of();
+
+ private Map> typeMap = new HashMap<>();
+
+ private final boolean useLegacyIsValidCheck;
+
+ private final Metrics metrics;
+
+ private final Attributes openTelemetryMetricsAttributes;
+
+ JdbcConnection(String connectionUrl, ConnectionOptions options) throws SQLException {
+ super(connectionUrl, options);
+ this.useLegacyIsValidCheck = useLegacyValidCheck();
+ OpenTelemetry openTelemetry = getSpanner().getOptions().getOpenTelemetry();
+ this.openTelemetryMetricsAttributes =
+ createOpenTelemetryAttributes(getConnectionOptions().getDatabaseId(), false);
+ this.metrics = new Metrics(openTelemetry);
+ }
+
+ static boolean useLegacyValidCheck() {
+ String value = System.getProperty("spanner.jdbc.use_legacy_is_valid_check");
+ if (Strings.isNullOrEmpty(value)) {
+ value = System.getenv("SPANNER_JDBC_USE_LEGACY_IS_VALID_CHECK");
+ }
+ if (!Strings.isNullOrEmpty(value)) {
+ return Boolean.parseBoolean(value);
+ }
+ return false;
+ }
+
+ @VisibleForTesting
+ static Attributes createOpenTelemetryAttributes(
+ DatabaseId databaseId, boolean includeConnectionId) {
+ AttributesBuilder attributesBuilder = Attributes.builder();
+ // A unique connection ID should only be included for tracing and not for metrics.
+ if (includeConnectionId) {
+ attributesBuilder.put("connection_id", UUID.randomUUID().toString());
+ }
+ attributesBuilder.put("database", databaseId.getDatabase());
+ attributesBuilder.put("instance_id", databaseId.getInstanceId().getInstance());
+ attributesBuilder.put("project_id", databaseId.getInstanceId().getProject());
+ return attributesBuilder.build();
+ }
+
+ public void recordClientLibLatencyMetric(long value) {
+ metrics.recordClientLibLatency(value, openTelemetryMetricsAttributes);
+ }
+
+ @Override
+ public Statement createStatement() throws SQLException {
+ checkClosed();
+ return new JdbcStatement(this);
+ }
+
+ @Override
+ public JdbcPreparedStatement prepareStatement(String sql) throws SQLException {
+ return prepareStatement(sql, NO_GENERATED_KEY_COLUMNS);
+ }
+
+ private JdbcPreparedStatement prepareStatement(
+ String sql, ImmutableList generatedKeyColumns) throws SQLException {
+ checkClosed();
+ return new JdbcPreparedStatement(this, sql, generatedKeyColumns);
+ }
+
+ @Override
+ public String nativeSQL(String sql) throws SQLException {
+ checkClosed();
+ return getParser()
+ .convertPositionalParametersToNamedParameters('?', getParser().removeCommentsAndTrim(sql))
+ .sqlWithNamedParameters;
+ }
+
+ @Override
+ public String getStatementTag() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getStatementTag();
+ }
+
+ @Override
+ public void setStatementTag(String tag) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().setStatementTag(tag);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public String getTransactionTag() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getTransactionTag();
+ }
+
+ @Override
+ public void setTransactionTag(String tag) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().setTransactionTag(tag);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void setTransactionMode(TransactionMode mode) throws SQLException {
+ checkClosed();
+ getSpannerConnection().setTransactionMode(mode);
+ }
+
+ @Override
+ public TransactionMode getTransactionMode() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getTransactionMode();
+ }
+
+ @Override
+ public void setAutocommitDmlMode(AutocommitDmlMode mode) throws SQLException {
+ checkClosed();
+ getSpannerConnection().setAutocommitDmlMode(mode);
+ }
+
+ @Override
+ public AutocommitDmlMode getAutocommitDmlMode() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getAutocommitDmlMode();
+ }
+
+ @Override
+ public void setReadOnlyStaleness(TimestampBound staleness) throws SQLException {
+ checkClosed();
+ getSpannerConnection().setReadOnlyStaleness(staleness);
+ }
+
+ @Override
+ public TimestampBound getReadOnlyStaleness() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getReadOnlyStaleness();
+ }
+
+ @Override
+ public void setOptimizerVersion(String optimizerVersion) throws SQLException {
+ checkClosed();
+ getSpannerConnection().setOptimizerVersion(optimizerVersion);
+ }
+
+ @Override
+ public String getOptimizerVersion() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getOptimizerVersion();
+ }
+
+ /** Returns the value that should be returned for column types with an unknown length. */
+ int getColumnTypeUnknownLength() {
+ return getSpannerConnection().getConnectionPropertyValue(ConnectionProperties.UNKNOWN_LENGTH);
+ }
+
+ @Override
+ public boolean isInTransaction() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().isInTransaction();
+ }
+
+ @Override
+ public boolean isTransactionStarted() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().isTransactionStarted();
+ }
+
+ @Override
+ public void setAutoCommit(boolean autoCommit) throws SQLException {
+ checkClosed();
+ try {
+ // According to the JDBC spec's we need to commit the current transaction when changing
+ // autocommit mode.
+ if (getSpannerConnection().isAutocommit() != autoCommit
+ && getSpannerConnection().isTransactionStarted()) {
+ commit();
+ }
+ getSpannerConnection().setAutocommit(autoCommit);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public boolean getAutoCommit() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().isAutocommit();
+ }
+
+ @Override
+ public void commit() throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().commit();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void rollback() throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().rollback();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void close() throws SQLException {
+ try {
+ getSpannerConnection().close();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public boolean isClosed() {
+ return getSpannerConnection().isClosed();
+ }
+
+ @Override
+ public DatabaseMetaData getMetaData() throws SQLException {
+ checkClosed();
+ return new JdbcDatabaseMetaData(this);
+ }
+
+ @Override
+ public void setReadOnly(boolean readOnly) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().setReadOnly(readOnly);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public boolean isReadOnly() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().isReadOnly();
+ }
+
+ @Override
+ public Statement createStatement(int resultSetType, int resultSetConcurrency)
+ throws SQLException {
+ checkClosed();
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY);
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY);
+ return createStatement();
+ }
+
+ @Override
+ public Statement createStatement(
+ int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException {
+ checkClosed();
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY);
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY);
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetHoldability == ResultSet.CLOSE_CURSORS_AT_COMMIT, ONLY_CLOSE_CURSORS_AT_COMMIT);
+ return createStatement();
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency)
+ throws SQLException {
+ checkClosed();
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY);
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY);
+ return prepareStatement(sql);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(
+ String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability)
+ throws SQLException {
+ checkClosed();
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetType == ResultSet.TYPE_FORWARD_ONLY, ONLY_RS_FORWARD_ONLY);
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetConcurrency == ResultSet.CONCUR_READ_ONLY, ONLY_CONCUR_READ_ONLY);
+ JdbcPreconditions.checkSqlFeatureSupported(
+ resultSetHoldability == ResultSet.CLOSE_CURSORS_AT_COMMIT, ONLY_CLOSE_CURSORS_AT_COMMIT);
+ return prepareStatement(sql);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException {
+ return prepareStatement(
+ sql,
+ autoGeneratedKeys == Statement.RETURN_GENERATED_KEYS
+ ? ALL_COLUMNS
+ : NO_GENERATED_KEY_COLUMNS);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException {
+ // This should preferably have returned an error, but the initial version of the driver just
+ // accepted and ignored this. Starting to throw an error now would be a breaking change.
+ // TODO: Consider throwing an Unsupported error for the next major version bump.
+ return prepareStatement(sql, NO_GENERATED_KEY_COLUMNS);
+ }
+
+ @Override
+ public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException {
+ return prepareStatement(
+ sql,
+ isNullOrEmpty(columnNames) ? NO_GENERATED_KEY_COLUMNS : ImmutableList.copyOf(columnNames));
+ }
+
+ @Override
+ public Map> getTypeMap() throws SQLException {
+ checkClosed();
+ return new HashMap<>(typeMap);
+ }
+
+ @Override
+ public void setTypeMap(Map> map) throws SQLException {
+ checkClosed();
+ this.typeMap = new HashMap<>(map);
+ }
+
+ boolean isUseLegacyIsValidCheck() {
+ return useLegacyIsValidCheck;
+ }
+
+ @Override
+ public boolean isValid(int timeout) throws SQLException {
+ JdbcPreconditions.checkArgument(timeout >= 0, "timeout must be >= 0");
+ if (!isClosed()) {
+ if (isUseLegacyIsValidCheck()) {
+ return legacyIsValid(timeout);
+ }
+ try {
+ return getDialect() != null;
+ } catch (Exception ignore) {
+ // ignore and fall through.
+ }
+ }
+ return false;
+ }
+
+ private boolean legacyIsValid(int timeout) throws SQLException {
+ try (Statement statement = createStatement()) {
+ statement.setQueryTimeout(timeout);
+ try (ResultSet rs = statement.executeQuery(LEGACY_IS_VALID_QUERY)) {
+ if (rs.next()) {
+ if (rs.getLong(1) == 1L) {
+ return true;
+ }
+ }
+ }
+ } catch (SQLException e) {
+ // ignore and fall through.
+ }
+ return false;
+ }
+
+ @Override
+ public Blob createBlob() throws SQLException {
+ checkClosed();
+ return new JdbcBlob();
+ }
+
+ @Override
+ public Clob createClob() throws SQLException {
+ checkClosed();
+ return new JdbcClob();
+ }
+
+ @Override
+ public NClob createNClob() throws SQLException {
+ checkClosed();
+ return new JdbcClob();
+ }
+
+ @Override
+ public Array createArrayOf(String typeName, Object[] elements) throws SQLException {
+ checkClosed();
+ return JdbcArray.createArray(typeName, elements);
+ }
+
+ @Override
+ public void setCatalog(String catalog) throws SQLException {
+ // This method could be changed to allow the user to change to another database.
+ // For now, we only support setting the default catalog in order to support frameworks
+ // and applications that set this when no catalog has been specified in the connection
+ // URL.
+ checkClosed();
+ checkValidCatalog(catalog);
+ }
+
+ void checkValidCatalog(String catalog) throws SQLException {
+ String defaultCatalog = getDefaultCatalog();
+ JdbcPreconditions.checkArgument(
+ defaultCatalog.equals(catalog),
+ String.format("Only catalog %s is supported", defaultCatalog));
+ }
+
+ @Override
+ public String getCatalog() throws SQLException {
+ checkClosed();
+ return getDefaultCatalog();
+ }
+
+ @Nonnull
+ String getDefaultCatalog() {
+ switch (getDialect()) {
+ case POSTGRESQL:
+ String database = getConnectionOptions().getDatabaseName();
+ // It should not be possible that database is null, but it's better to be safe than sorry.
+ return database == null ? "" : database;
+ case GOOGLE_STANDARD_SQL:
+ default:
+ return "";
+ }
+ }
+
+ @Override
+ public void setSchema(String schema) throws SQLException {
+ checkClosed();
+ checkValidSchema(schema);
+ }
+
+ void checkValidSchema(String schema) throws SQLException {
+ String defaultSchema = getDefaultSchema();
+ JdbcPreconditions.checkArgument(
+ defaultSchema.equals(schema), String.format("Only schema %s is supported", defaultSchema));
+ }
+
+ @Override
+ public String getSchema() throws SQLException {
+ checkClosed();
+ return getDefaultSchema();
+ }
+
+ @Nonnull
+ String getDefaultSchema() {
+ return getDialect().getDefaultSchema();
+ }
+
+ @Override
+ public SavepointSupport getSavepointSupport() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getSavepointSupport();
+ }
+
+ @Override
+ public void setSavepointSupport(SavepointSupport savepointSupport) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().setSavepointSupport(savepointSupport);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public Savepoint setSavepoint() throws SQLException {
+ checkClosed();
+ try {
+ JdbcSavepoint savepoint = JdbcSavepoint.unnamed();
+ getSpannerConnection().savepoint(savepoint.internalGetSavepointName());
+ return savepoint;
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public Savepoint setSavepoint(String name) throws SQLException {
+ checkClosed();
+ try {
+ JdbcSavepoint savepoint = JdbcSavepoint.named(name);
+ getSpannerConnection().savepoint(savepoint.internalGetSavepointName());
+ return savepoint;
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void rollback(Savepoint savepoint) throws SQLException {
+ checkClosed();
+ JdbcPreconditions.checkArgument(savepoint instanceof JdbcSavepoint, savepoint);
+ JdbcSavepoint jdbcSavepoint = (JdbcSavepoint) savepoint;
+ try {
+ getSpannerConnection().rollbackToSavepoint(jdbcSavepoint.internalGetSavepointName());
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void releaseSavepoint(Savepoint savepoint) throws SQLException {
+ checkClosed();
+ JdbcPreconditions.checkArgument(savepoint instanceof JdbcSavepoint, savepoint);
+ JdbcSavepoint jdbcSavepoint = (JdbcSavepoint) savepoint;
+ try {
+ getSpannerConnection().releaseSavepoint(jdbcSavepoint.internalGetSavepointName());
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public Timestamp getCommitTimestamp() throws SQLException {
+ checkClosed();
+ try {
+ return getSpannerConnection().getCommitTimestamp().toSqlTimestamp();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public CommitResponse getCommitResponse() throws SQLException {
+ checkClosed();
+ try {
+ return getSpannerConnection().getCommitResponse();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void setReturnCommitStats(boolean returnCommitStats) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().setReturnCommitStats(returnCommitStats);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public boolean isReturnCommitStats() throws SQLException {
+ checkClosed();
+ try {
+ return getSpannerConnection().isReturnCommitStats();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public Timestamp getReadTimestamp() throws SQLException {
+ checkClosed();
+ try {
+ return getSpannerConnection().getReadTimestamp().toSqlTimestamp();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public boolean isRetryAbortsInternally() throws SQLException {
+ checkClosed();
+ try {
+ return getSpannerConnection().isRetryAbortsInternally();
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void setRetryAbortsInternally(boolean retryAbortsInternally) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().setRetryAbortsInternally(retryAbortsInternally);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void write(Mutation mutation) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().write(mutation);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void write(Iterable mutations) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().write(mutations);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void bufferedWrite(Mutation mutation) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().bufferedWrite(mutation);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void bufferedWrite(Iterable mutations) throws SQLException {
+ checkClosed();
+ try {
+ getSpannerConnection().bufferedWrite(mutations);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ /**
+ * Convenience method for calling a setter and translating any {@link SpannerException} to a
+ * {@link SQLException}.
+ */
+ private void set(BiConsumer setter, T value) throws SQLException {
+ checkClosed();
+ try {
+ setter.accept(getSpannerConnection(), value);
+ } catch (SpannerException spannerException) {
+ throw JdbcSqlExceptionFactory.of(spannerException);
+ }
+ }
+
+ /**
+ * Convenience method for calling a getter and translating any {@link SpannerException} to a
+ * {@link SQLException}.
+ */
+ private R get(Function getter) throws SQLException {
+ checkClosed();
+ try {
+ return getter.apply(getSpannerConnection());
+ } catch (SpannerException spannerException) {
+ throw JdbcSqlExceptionFactory.of(spannerException);
+ }
+ }
+
+ @Override
+ public void setDataBoostEnabled(boolean dataBoostEnabled) throws SQLException {
+ set(Connection::setDataBoostEnabled, dataBoostEnabled);
+ }
+
+ @Override
+ public boolean isDataBoostEnabled() throws SQLException {
+ return get(Connection::isDataBoostEnabled);
+ }
+
+ @Override
+ public void setAutoPartitionMode(boolean autoPartitionMode) throws SQLException {
+ set(Connection::setAutoPartitionMode, autoPartitionMode);
+ }
+
+ @Override
+ public boolean isAutoPartitionMode() throws SQLException {
+ return get(Connection::isAutoPartitionMode);
+ }
+
+ @Override
+ public void setMaxPartitions(int maxPartitions) throws SQLException {
+ set(Connection::setMaxPartitions, maxPartitions);
+ }
+
+ @Override
+ public int getMaxPartitions() throws SQLException {
+ return get(Connection::getMaxPartitions);
+ }
+
+ @Override
+ public void setMaxPartitionedParallelism(int maxThreads) throws SQLException {
+ set(Connection::setMaxPartitionedParallelism, maxThreads);
+ }
+
+ @Override
+ public int getMaxPartitionedParallelism() throws SQLException {
+ return get(Connection::getMaxPartitionedParallelism);
+ }
+
+ @Override
+ public void setAutoBatchDml(boolean autoBatchDml) throws SQLException {
+ set(Connection::setAutoBatchDml, autoBatchDml);
+ }
+
+ @Override
+ public boolean isAutoBatchDml() throws SQLException {
+ return get(Connection::isAutoBatchDml);
+ }
+
+ @Override
+ public void setAutoBatchDmlUpdateCount(long updateCount) throws SQLException {
+ set(Connection::setAutoBatchDmlUpdateCount, updateCount);
+ }
+
+ @Override
+ public long getAutoBatchDmlUpdateCount() throws SQLException {
+ return get(Connection::getAutoBatchDmlUpdateCount);
+ }
+
+ @Override
+ public void setAutoBatchDmlUpdateCountVerification(boolean verification) throws SQLException {
+ set(Connection::setAutoBatchDmlUpdateCountVerification, verification);
+ }
+
+ @Override
+ public boolean isAutoBatchDmlUpdateCountVerification() throws SQLException {
+ return get(Connection::isAutoBatchDmlUpdateCountVerification);
+ }
+
+ @SuppressWarnings("deprecation")
+ private static final class JdbcToSpannerTransactionRetryListener
+ implements com.google.cloud.spanner.connection.TransactionRetryListener {
+ private final TransactionRetryListener delegate;
+
+ JdbcToSpannerTransactionRetryListener(TransactionRetryListener delegate) {
+ this.delegate = Preconditions.checkNotNull(delegate);
+ }
+
+ @Override
+ public void retryStarting(
+ com.google.cloud.Timestamp transactionStarted, long transactionId, int retryAttempt) {
+ delegate.retryStarting(transactionStarted, transactionId, retryAttempt);
+ }
+
+ @Override
+ public void retryFinished(
+ com.google.cloud.Timestamp transactionStarted,
+ long transactionId,
+ int retryAttempt,
+ RetryResult result) {
+ delegate.retryFinished(
+ transactionStarted,
+ transactionId,
+ retryAttempt,
+ TransactionRetryListener.RetryResult.valueOf(result.name()));
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (!(o instanceof JdbcToSpannerTransactionRetryListener)) {
+ return false;
+ }
+ JdbcToSpannerTransactionRetryListener other = (JdbcToSpannerTransactionRetryListener) o;
+ return this.delegate.equals(other.delegate);
+ }
+
+ @Override
+ public int hashCode() {
+ return delegate.hashCode();
+ }
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public void addTransactionRetryListener(TransactionRetryListener listener) throws SQLException {
+ checkClosed();
+ getSpannerConnection()
+ .addTransactionRetryListener(new JdbcToSpannerTransactionRetryListener(listener));
+ }
+
+ @Override
+ public void addTransactionRetryListener(
+ com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException {
+ checkClosed();
+ getSpannerConnection().addTransactionRetryListener(listener);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public boolean removeTransactionRetryListener(TransactionRetryListener listener)
+ throws SQLException {
+ checkClosed();
+ return getSpannerConnection()
+ .removeTransactionRetryListener(new JdbcToSpannerTransactionRetryListener(listener));
+ }
+
+ @Override
+ public boolean removeTransactionRetryListener(
+ com.google.cloud.spanner.connection.TransactionRetryListener listener) throws SQLException {
+ checkClosed();
+ return getSpannerConnection().removeTransactionRetryListener(listener);
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public Iterator getTransactionRetryListeners() throws SQLException {
+ checkClosed();
+ return Iterators.transform(
+ getSpannerConnection().getTransactionRetryListeners(),
+ input -> {
+ if (input instanceof JdbcToSpannerTransactionRetryListener) {
+ return ((JdbcToSpannerTransactionRetryListener) input).delegate;
+ }
+ return null;
+ });
+ }
+
+ @Override
+ public Iterator
+ getTransactionRetryListenersFromConnection() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getTransactionRetryListeners();
+ }
+
+ @Override
+ public void setProtoDescriptors(@Nonnull byte[] protoDescriptors) throws SQLException {
+ Preconditions.checkNotNull(protoDescriptors);
+ checkClosed();
+ try {
+ getSpannerConnection().setProtoDescriptors(protoDescriptors);
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public void setProtoDescriptors(@Nonnull InputStream protoDescriptors)
+ throws SQLException, IOException {
+ Preconditions.checkNotNull(protoDescriptors);
+ checkClosed();
+ try {
+ getSpannerConnection()
+ .setProtoDescriptors(ByteArray.copyFrom(protoDescriptors).toByteArray());
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ }
+ }
+
+ @Override
+ public byte[] getProtoDescriptors() throws SQLException {
+ checkClosed();
+ return getSpannerConnection().getProtoDescriptors();
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConstants.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConstants.java
new file mode 100644
index 000000000000..8de8c7f0ee58
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcConstants.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import java.sql.ResultSet;
+import java.sql.Statement;
+
+/** Constants for special values used by the Cloud Spanner JDBC driver. */
+public final class JdbcConstants {
+ /**
+ * Special value that is used to indicate that a statement returned a {@link ResultSet}. The
+ * method {@link Statement#getUpdateCount()} will return this value if the previous statement that
+ * was executed with {@link Statement#execute(String)} returned a {@link ResultSet}.
+ */
+ public static final int STATEMENT_RESULT_SET = -1;
+
+ /**
+ * Special value that is used to indicate that a statement had no result. The method {@link
+ * Statement#getUpdateCount()} will return this value if the previous statement that was executed
+ * with {@link Statement#execute(String)} returned {@link
+ * com.google.cloud.spanner.connection.StatementResult.ResultType#NO_RESULT}, such as DDL
+ * statements.
+ */
+ public static final int STATEMENT_NO_RESULT = -2;
+
+ /** No instantiation */
+ private JdbcConstants() {}
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataSource.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataSource.java
new file mode 100644
index 000000000000..f0f073aad29c
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataSource.java
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import static com.google.cloud.spanner.jdbc.JdbcDriver.appendPropertiesToUrl;
+import static com.google.cloud.spanner.jdbc.JdbcDriver.buildConnectionOptions;
+import static com.google.cloud.spanner.jdbc.JdbcDriver.maybeAddUserAgent;
+
+import com.google.cloud.spanner.connection.ConnectionOptions;
+import com.google.rpc.Code;
+import java.io.PrintWriter;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.util.Properties;
+import java.util.logging.Logger;
+import javax.sql.DataSource;
+
+/** {@link DataSource} implementation for Google Cloud Spanner. */
+public class JdbcDataSource extends AbstractJdbcWrapper implements DataSource {
+ private String url;
+ private String credentials;
+ private Boolean autocommit;
+ private Boolean readonly;
+ private Boolean retryAbortsInternally;
+
+ private volatile ConnectionOptions cachedConnectionOptions;
+
+ // Make sure the JDBC driver class is loaded.
+ static {
+ try {
+ Class.forName("com.google.cloud.spanner.jdbc.JdbcDriver");
+ } catch (ClassNotFoundException e) {
+ throw new IllegalStateException(
+ "JdbcDataSource failed to load com.google.cloud.spanner.jdbc.JdbcDriver", e);
+ }
+ }
+
+ @Override
+ public PrintWriter getLogWriter() {
+ return null;
+ }
+
+ @Override
+ public void setLogWriter(PrintWriter out) {
+ // no-op
+ }
+
+ @Override
+ public void setLoginTimeout(int seconds) {
+ // no-op
+ }
+
+ @Override
+ public int getLoginTimeout() {
+ return 0;
+ }
+
+ @Override
+ public Logger getParentLogger() throws SQLFeatureNotSupportedException {
+ throw JdbcSqlExceptionFactory.unsupported("java.util.logging is not used");
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ if (getUrl() == null) {
+ throw JdbcSqlExceptionFactory.of(
+ "There is no URL specified for this data source", Code.FAILED_PRECONDITION);
+ }
+ if (cachedConnectionOptions == null) {
+ synchronized (this) {
+ if (cachedConnectionOptions == null) {
+ if (!JdbcDriver.getRegisteredDriver().acceptsURL(getUrl())) {
+ throw JdbcSqlExceptionFactory.of(
+ "The URL " + getUrl() + " is not valid for the data source " + getClass().getName(),
+ Code.FAILED_PRECONDITION);
+ }
+ Properties properties = createProperties();
+ maybeAddUserAgent(properties);
+ String connectionUri = appendPropertiesToUrl(url.substring(5), properties);
+ cachedConnectionOptions = buildConnectionOptions(connectionUri, properties);
+ }
+ }
+ }
+ return new JdbcConnection(getUrl(), cachedConnectionOptions);
+ }
+
+ @Override
+ public Connection getConnection(String username, String password) throws SQLException {
+ return getConnection();
+ }
+
+ private Properties createProperties() {
+ Properties props = new Properties();
+ if (this.credentials != null) {
+ props.setProperty(ConnectionOptions.CREDENTIALS_PROPERTY_NAME, this.credentials);
+ }
+ if (this.autocommit != null) {
+ props.setProperty(
+ ConnectionOptions.AUTOCOMMIT_PROPERTY_NAME, String.valueOf(this.autocommit));
+ }
+ if (this.readonly != null) {
+ props.setProperty(ConnectionOptions.READONLY_PROPERTY_NAME, String.valueOf(this.readonly));
+ }
+ if (this.retryAbortsInternally != null) {
+ props.setProperty(
+ ConnectionOptions.RETRY_ABORTS_INTERNALLY_PROPERTY_NAME,
+ String.valueOf(this.retryAbortsInternally));
+ }
+ return props;
+ }
+
+ @Override
+ public boolean isClosed() {
+ return false;
+ }
+
+ private void clearCachedConnectionOptions() {
+ synchronized (this) {
+ cachedConnectionOptions = null;
+ }
+ }
+
+ /**
+ * @return the JDBC URL to use for this {@link DataSource}.
+ */
+ public String getUrl() {
+ return url;
+ }
+
+ /**
+ * @param url The JDBC URL to use for this {@link DataSource}.
+ */
+ public void setUrl(String url) {
+ clearCachedConnectionOptions();
+ this.url = url;
+ }
+
+ /**
+ * @return the credentials URL to use for this {@link DataSource}. If a credentials URL is
+ * specified in both the connection URL and using this property, the value in the connection
+ * URL will be used.
+ */
+ public String getCredentials() {
+ return credentials;
+ }
+
+ /**
+ * @param credentials The credentials URL to use for this {@link DataSource}. If a credentials URL
+ * is specified in both the connection URL and using this property, the value in the
+ * connection URL will be used.
+ */
+ public void setCredentials(String credentials) {
+ clearCachedConnectionOptions();
+ this.credentials = credentials;
+ }
+
+ /**
+ * @return the initial autocommit setting to use for this {@link DataSource}. If autocommit is
+ * specified in both the connection URL and using this property, the value in the connection
+ * URL will be used.
+ */
+ public Boolean getAutocommit() {
+ return autocommit;
+ }
+
+ /**
+ * @param autocommit The initial autocommit setting to use for this {@link DataSource}. If
+ * autocommit is specified in both the connection URL and using this property, the value in
+ * the connection URL will be used.
+ */
+ public void setAutocommit(Boolean autocommit) {
+ clearCachedConnectionOptions();
+ this.autocommit = autocommit;
+ }
+
+ /**
+ * @return the initial readonly setting to use for this {@link DataSource}. If readonly is
+ * specified in both the connection URL and using this property, the value in the connection
+ * URL will be used.
+ */
+ public Boolean getReadonly() {
+ return readonly;
+ }
+
+ /**
+ * @param readonly The initial readonly setting to use for this {@link DataSource}. If readonly is
+ * specified in both the connection URL and using this property, the value in the connection
+ * URL will be used.
+ */
+ public void setReadonly(Boolean readonly) {
+ clearCachedConnectionOptions();
+ this.readonly = readonly;
+ }
+
+ /**
+ * @return the initial retryAbortsInternally setting to use for this {@link DataSource}. If
+ * retryAbortsInternally is specified in both the connection URL and using this property, the
+ * value in the connection URL will be used.
+ */
+ public Boolean getRetryAbortsInternally() {
+ return retryAbortsInternally;
+ }
+
+ /**
+ * @param retryAbortsInternally The initial retryAbortsInternally setting to use for this {@link
+ * DataSource}. If retryAbortsInternally is specified in both the connection URL and using
+ * this property, the value in the connection URL will be used.
+ */
+ public void setRetryAbortsInternally(Boolean retryAbortsInternally) {
+ clearCachedConnectionOptions();
+ this.retryAbortsInternally = retryAbortsInternally;
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataType.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataType.java
new file mode 100644
index 000000000000..892c0057ce60
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDataType.java
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.cloud.spanner.Dialect;
+import com.google.cloud.spanner.ResultSet;
+import com.google.cloud.spanner.Struct;
+import com.google.cloud.spanner.Type;
+import com.google.cloud.spanner.Type.Code;
+import java.math.BigDecimal;
+import java.sql.Date;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+
+/** Enum for mapping Cloud Spanner data types to Java classes and JDBC SQL {@link Types}. */
+enum JdbcDataType {
+ BOOL {
+ @Override
+ public int getSqlType() {
+ return Types.BOOLEAN;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Boolean.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.BOOL;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getBooleanList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.bool();
+ }
+ },
+ BYTES {
+ @Override
+ public int getSqlType() {
+ return Types.BINARY;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return byte[].class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.BYTES;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return JdbcTypeConverter.toJavaByteArrays(rs.getBytesList(columnIndex));
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.bytes();
+ }
+ },
+ DATE {
+ @Override
+ public int getSqlType() {
+ return Types.DATE;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Date.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.DATE;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return JdbcTypeConverter.toSqlDates(rs.getDateList(columnIndex));
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.date();
+ }
+ },
+ FLOAT32 {
+ private final Set aliases = new HashSet<>(Collections.singletonList("float4"));
+
+ @Override
+ public int getSqlType() {
+ return Types.REAL;
+ }
+
+ @Override
+ public int getScale() {
+ return 7;
+ }
+
+ @Override
+ public int getPrecision() {
+ return 7;
+ }
+
+ @Override
+ public int getDefaultColumnDisplaySize() {
+ return 7;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Float.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.FLOAT32;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getFloatList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.float32();
+ }
+
+ @Override
+ public Set getPostgreSQLAliases() {
+ return aliases;
+ }
+ },
+ FLOAT64 {
+ private final Set> classes = new HashSet<>(Arrays.asList(Float.class, Double.class));
+ private final Set aliases = new HashSet<>(Collections.singletonList("float8"));
+
+ @Override
+ public int getSqlType() {
+ return Types.DOUBLE;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Double.class;
+ }
+
+ @Override
+ public Set> getSupportedJavaClasses() {
+ return classes;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.FLOAT64;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getDoubleList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.float64();
+ }
+
+ @Override
+ public Set getPostgreSQLAliases() {
+ return aliases;
+ }
+ },
+ INT64 {
+ private final Set> classes =
+ new HashSet<>(Arrays.asList(Byte.class, Short.class, Integer.class, Long.class));
+
+ @Override
+ public int getSqlType() {
+ return Types.BIGINT;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Long.class;
+ }
+
+ @Override
+ public Set> getSupportedJavaClasses() {
+ return classes;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.INT64;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getLongList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.int64();
+ }
+ },
+ NUMERIC {
+
+ private final Set aliases = new HashSet<>(Collections.singletonList("decimal"));
+
+ @Override
+ public int getSqlType() {
+ return Types.NUMERIC;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return BigDecimal.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.NUMERIC;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getBigDecimalList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.numeric();
+ }
+
+ @Override
+ public Set getPostgreSQLAliases() {
+ return aliases;
+ }
+ },
+ PG_NUMERIC {
+ @Override
+ public int getSqlType() {
+ return Types.NUMERIC;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return BigDecimal.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.PG_NUMERIC;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getValue(columnIndex).getNumericArray();
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.pgNumeric();
+ }
+ },
+ STRING {
+ private final Set aliases = new HashSet<>(Arrays.asList("varchar", "text"));
+
+ @Override
+ public int getSqlType() {
+ return Types.NVARCHAR;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return String.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.STRING;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getStringList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.string();
+ }
+
+ @Override
+ public Set getPostgreSQLAliases() {
+ return aliases;
+ }
+ },
+ JSON {
+ @Override
+ public int getSqlType() {
+ return JsonType.VENDOR_TYPE_NUMBER;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return String.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.JSON;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getJsonList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.json();
+ }
+ },
+ PG_JSONB {
+ @Override
+ public int getSqlType() {
+ return PgJsonbType.VENDOR_TYPE_NUMBER;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return String.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.PG_JSONB;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getPgJsonbList(columnIndex);
+ }
+
+ @Override
+ public String getTypeName() {
+ return "JSONB";
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.pgJsonb();
+ }
+ },
+ TIMESTAMP {
+ @Override
+ public int getSqlType() {
+ return Types.TIMESTAMP;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Timestamp.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.TIMESTAMP;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return JdbcTypeConverter.toSqlTimestamps(rs.getTimestampList(columnIndex));
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.timestamp();
+ }
+ },
+ UUID {
+ @Override
+ public int getSqlType() {
+ return UuidType.VENDOR_TYPE_NUMBER;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return UUID.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.UUID;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getUuidList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.uuid();
+ }
+ },
+ STRUCT {
+ @Override
+ public int getSqlType() {
+ return Types.STRUCT;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Struct.class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.STRUCT;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getStructList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.struct();
+ }
+ },
+ PROTO {
+ @Override
+ public int getSqlType() {
+ return ProtoMessageType.VENDOR_TYPE_NUMBER;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return byte[].class;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.PROTO;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return JdbcTypeConverter.toJavaByteArrays(rs.getBytesList(columnIndex));
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.bytes();
+ }
+ },
+ ENUM {
+ private final Set> classes = new HashSet<>(Arrays.asList(Integer.class, Long.class));
+
+ @Override
+ public int getSqlType() {
+ return ProtoEnumType.VENDOR_TYPE_NUMBER;
+ }
+
+ @Override
+ public Class getJavaClass() {
+ return Long.class;
+ }
+
+ @Override
+ public Set> getSupportedJavaClasses() {
+ return classes;
+ }
+
+ @Override
+ public Code getCode() {
+ return Code.ENUM;
+ }
+
+ @Override
+ public List getArrayElements(ResultSet rs, int columnIndex) {
+ return rs.getLongList(columnIndex);
+ }
+
+ @Override
+ public Type getSpannerType() {
+ return Type.int64();
+ }
+ };
+
+ public abstract int getSqlType();
+
+ public abstract Code getCode();
+
+ public abstract Type getSpannerType();
+
+ public Set getPostgreSQLAliases() {
+ return Collections.emptySet();
+ }
+
+ /***
+ * @param typeName type of the column
+ * @return true if type name matches current type name or matches with one of postgres aliases
+ * or if it matches equivalent postgres type.
+ */
+ boolean matches(String typeName) {
+ return getTypeName().equalsIgnoreCase(typeName)
+ || getPostgreSQLAliases().contains(typeName.toLowerCase())
+ || getSpannerType().getSpannerTypeName(Dialect.POSTGRESQL).equalsIgnoreCase(typeName);
+ }
+
+ // TODO: Implement and use this method for all types.
+ public int getPrecision() {
+ throw new UnsupportedOperationException();
+ }
+
+ // TODO: Implement and use this method for all types.
+ public int getScale() {
+ throw new UnsupportedOperationException();
+ }
+
+ // TODO: Implement and use this method for all types.
+ public int getDefaultColumnDisplaySize() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * @param rs the result set to look up the elements
+ * @param columnIndex zero based column index
+ * @return The corresponding array elements of the type in the given result set
+ */
+ public abstract List> getArrayElements(ResultSet rs, int columnIndex);
+
+ public String getTypeName() {
+ return name();
+ }
+
+ public abstract Class> getJavaClass();
+
+ public Set extends Class>> getSupportedJavaClasses() {
+ return Collections.singleton(getJavaClass());
+ }
+
+ public static JdbcDataType getType(Class> clazz) {
+ for (JdbcDataType type : JdbcDataType.values()) {
+ if (type.getSupportedJavaClasses().contains(clazz)) {
+ return type;
+ }
+ }
+ return null;
+ }
+
+ public static JdbcDataType getType(Code code) {
+ for (JdbcDataType type : JdbcDataType.values()) {
+ if (type.getCode() == code) {
+ return type;
+ }
+ }
+ return null;
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaData.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaData.java
new file mode 100644
index 000000000000..7168e2f9204b
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDatabaseMetaData.java
@@ -0,0 +1,1771 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.auth.Credentials;
+import com.google.auth.ServiceAccountSigner;
+import com.google.auth.oauth2.UserCredentials;
+import com.google.cloud.spanner.Dialect;
+import com.google.cloud.spanner.ResultSets;
+import com.google.cloud.spanner.Struct;
+import com.google.cloud.spanner.Type;
+import com.google.cloud.spanner.Type.StructField;
+import com.google.cloud.spanner.connection.Connection.InternalMetadataQuery;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableSet;
+import java.io.BufferedReader;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.ResultSet;
+import java.sql.RowIdLifetime;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Properties;
+import java.util.Scanner;
+
+/** {@link DatabaseMetaData} implementation for Cloud Spanner */
+class JdbcDatabaseMetaData extends AbstractJdbcWrapper implements DatabaseMetaData {
+ private static final int JDBC_MAJOR_VERSION = 4;
+ private static final int JDBC_MINOR_VERSION = 1;
+ private static final int DATABASE_MAJOR_VERSION = 1;
+ private static final int DATABASE_MINOR_VERSION = 0;
+ private static final String PRODUCT_NAME = "Google Cloud Spanner";
+ private static final String POSTGRESQL_PRODUCT_NAME = PRODUCT_NAME + " PostgreSQL";
+
+ @VisibleForTesting
+ static String readSqlFromFile(String filename, Dialect dialect) {
+ InputStream in;
+ switch (dialect) {
+ case POSTGRESQL:
+ in = JdbcDatabaseMetaData.class.getResourceAsStream("postgresql/" + filename);
+ break;
+ case GOOGLE_STANDARD_SQL:
+ default:
+ in = JdbcDatabaseMetaData.class.getResourceAsStream(filename);
+ }
+ BufferedReader reader = new BufferedReader(new InputStreamReader(in));
+ StringBuilder builder = new StringBuilder();
+ try (Scanner scanner = new Scanner(reader)) {
+ while (scanner.hasNextLine()) {
+ String line = scanner.nextLine();
+ builder.append(line).append("\n");
+ }
+ }
+ return builder.toString();
+ }
+
+ private final JdbcConnection connection;
+
+ JdbcDatabaseMetaData(JdbcConnection connection) {
+ this.connection = connection;
+ }
+
+ @Override
+ public boolean isClosed() {
+ return false;
+ }
+
+ @Override
+ public boolean allProceduresAreCallable() {
+ return true;
+ }
+
+ @Override
+ public boolean allTablesAreSelectable() {
+ return true;
+ }
+
+ @Override
+ public String getURL() {
+ return connection.getConnectionUrl();
+ }
+
+ @Override
+ public String getUserName() {
+ Credentials credentials = connection.getConnectionOptions().getCredentials();
+ if (credentials != null) {
+ if (credentials instanceof ServiceAccountSigner) {
+ return ((ServiceAccountSigner) credentials).getAccount();
+ } else if (credentials instanceof UserCredentials) {
+ return ((UserCredentials) credentials).getClientId();
+ }
+ }
+ return "";
+ }
+
+ @Override
+ public boolean isReadOnly() {
+ return false;
+ }
+
+ @Override
+ public boolean nullsAreSortedHigh() {
+ return false;
+ }
+
+ @Override
+ public boolean nullsAreSortedLow() {
+ return true;
+ }
+
+ @Override
+ public boolean nullsAreSortedAtStart() {
+ return false;
+ }
+
+ @Override
+ public boolean nullsAreSortedAtEnd() {
+ return false;
+ }
+
+ @Override
+ public String getDatabaseProductName() {
+ return connection.getDialect() == Dialect.POSTGRESQL ? POSTGRESQL_PRODUCT_NAME : PRODUCT_NAME;
+ }
+
+ @Override
+ public String getDatabaseProductVersion() {
+ return getDatabaseMajorVersion() + "." + getDatabaseMinorVersion();
+ }
+
+ @Override
+ public String getDriverName() {
+ return JdbcDriver.class.getName();
+ }
+
+ @Override
+ public String getDriverVersion() {
+ return getDriverMajorVersion() + "." + getDriverMinorVersion();
+ }
+
+ @Override
+ public int getDriverMajorVersion() {
+ return JdbcDriver.MAJOR_VERSION;
+ }
+
+ @Override
+ public int getDriverMinorVersion() {
+ return JdbcDriver.MINOR_VERSION;
+ }
+
+ @Override
+ public boolean usesLocalFiles() {
+ return false;
+ }
+
+ @Override
+ public boolean usesLocalFilePerTable() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsMixedCaseIdentifiers() {
+ return false;
+ }
+
+ @Override
+ public boolean storesUpperCaseIdentifiers() {
+ return false;
+ }
+
+ @Override
+ public boolean storesLowerCaseIdentifiers() {
+ return connection.getDialect() == Dialect.POSTGRESQL;
+ }
+
+ @Override
+ public boolean storesMixedCaseIdentifiers() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsMixedCaseQuotedIdentifiers() {
+ return false;
+ }
+
+ @Override
+ public boolean storesUpperCaseQuotedIdentifiers() {
+ return false;
+ }
+
+ @Override
+ public boolean storesLowerCaseQuotedIdentifiers() {
+ return false;
+ }
+
+ @Override
+ public boolean storesMixedCaseQuotedIdentifiers() {
+ return true;
+ }
+
+ @Override
+ public String getIdentifierQuoteString() {
+ return "`";
+ }
+
+ @Override
+ public String getSQLKeywords() {
+ return "ASSERT_ROWS_MODIFIED,ENUM,GROUPS,HASH,IGNORE,LOOKUP,PROTO,RESPECT,STRUCT,WINDOW";
+ }
+
+ @Override
+ public String getNumericFunctions() {
+ return "ABS,SIGN,IS_INF,IS_NAN,IEEE_DIVIDE,SQRT,POW,POWER,EXP,LN,LOG,LOG10,GREATEST,LEAST,DIV,MOD,ROUND,TRUNC,CEIL,CEILING,FLOOR,COS,COSH,ACOS,ACOSH,SIN,SINH,ASIN,ASINH,TAN,TANH,ATAN,ATANH,ATAN2,FARM_FINGERPRINT,SHA1,SHA256,SHA512";
+ }
+
+ @Override
+ public String getStringFunctions() {
+ return "BYTE_LENGTH,CHAR_LENGTH,CHARACTER_LENGTH,CODE_POINTS_TO_BYTES,CODE_POINTS_TO_STRING,CONCAT,ENDS_WITH,FORMAT,FROM_BASE64,FROM_HEX,LENGTH,LPAD,LOWER,LTRIM,REGEXP_CONTAINS,REGEXP_EXTRACT,REGEXP_EXTRACT_ALL,REGEXP_REPLACE,REPLACE,REPEAT,REVERSE,RPAD,RTRIM,SAFE_CONVERT_BYTES_TO_STRING,SPLIT,STARTS_WITH,STRPOS,SUBSTR,TO_BASE64,TO_CODE_POINTS,TO_HEX,TRIM,UPPER,JSON_QUERY,JSON_VALUE";
+ }
+
+ @Override
+ public String getSystemFunctions() {
+ return "";
+ }
+
+ @Override
+ public String getTimeDateFunctions() {
+ return "CURRENT_DATE,EXTRACT,DATE,DATE_ADD,DATE_SUB,DATE_DIFF,DATE_TRUNC,DATE_FROM_UNIX_DATE,FORMAT_DATE,PARSE_DATE,UNIX_DATE,CURRENT_TIMESTAMP,STRING,TIMESTAMP,TIMESTAMP_ADD,TIMESTAMP_SUB,TIMESTAMP_DIFF,TIMESTAMP_TRUNC,FORMAT_TIMESTAMP,PARSE_TIMESTAMP,TIMESTAMP_SECONDS,TIMESTAMP_MILLIS,TIMESTAMP_MICROS,UNIX_SECONDS,UNIX_MILLIS,UNIX_MICROS";
+ }
+
+ @Override
+ public String getSearchStringEscape() {
+ return "\\";
+ }
+
+ @Override
+ public String getExtraNameCharacters() {
+ return "";
+ }
+
+ @Override
+ public boolean supportsAlterTableWithAddColumn() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsAlterTableWithDropColumn() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsColumnAliasing() {
+ return true;
+ }
+
+ @Override
+ public boolean nullPlusNonNullIsNull() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsConvert() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsConvert(int fromType, int toType) {
+ return false;
+ }
+
+ @Override
+ public boolean supportsTableCorrelationNames() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsDifferentTableCorrelationNames() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsExpressionsInOrderBy() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsOrderByUnrelated() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsGroupBy() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsGroupByUnrelated() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsGroupByBeyondSelect() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsLikeEscapeClause() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsMultipleResultSets() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsMultipleTransactions() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsNonNullableColumns() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsMinimumSQLGrammar() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsCoreSQLGrammar() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsExtendedSQLGrammar() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsANSI92EntryLevelSQL() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsANSI92IntermediateSQL() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsANSI92FullSQL() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsIntegrityEnhancementFacility() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsOuterJoins() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsFullOuterJoins() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsLimitedOuterJoins() {
+ return true;
+ }
+
+ @Override
+ public String getSchemaTerm() {
+ return "SCHEMA";
+ }
+
+ @Override
+ public String getProcedureTerm() {
+ return "PROCEDURE";
+ }
+
+ @Override
+ public String getCatalogTerm() {
+ // Spanner does not support catalogs, but the term is included for compatibility with the SQL
+ // standard
+ return "CATALOG";
+ }
+
+ @Override
+ public boolean isCatalogAtStart() {
+ return false;
+ }
+
+ @Override
+ public String getCatalogSeparator() {
+ return ".";
+ }
+
+ @Override
+ public boolean supportsSchemasInDataManipulation() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsSchemasInProcedureCalls() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsSchemasInTableDefinitions() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsSchemasInIndexDefinitions() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsSchemasInPrivilegeDefinitions() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsCatalogsInDataManipulation() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsCatalogsInProcedureCalls() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsCatalogsInTableDefinitions() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsCatalogsInIndexDefinitions() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsCatalogsInPrivilegeDefinitions() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsPositionedDelete() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsPositionedUpdate() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsSelectForUpdate() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsStoredProcedures() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsSubqueriesInComparisons() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsSubqueriesInExists() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsSubqueriesInIns() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsSubqueriesInQuantifieds() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsCorrelatedSubqueries() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsUnion() {
+ // Note that Cloud Spanner requires the user to specify 'UNION DISTINCT' or 'UNION ALL' in a
+ // query. 'UNION DISTINCT' is equal to the SQL operation 'UNION'.
+ return true;
+ }
+
+ @Override
+ public boolean supportsUnionAll() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsOpenCursorsAcrossCommit() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsOpenCursorsAcrossRollback() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsOpenStatementsAcrossCommit() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsOpenStatementsAcrossRollback() {
+ return true;
+ }
+
+ @Override
+ public int getMaxBinaryLiteralLength() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxCharLiteralLength() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxColumnNameLength() {
+ return 128;
+ }
+
+ @Override
+ public int getMaxColumnsInGroupBy() {
+ return 1000;
+ }
+
+ @Override
+ public int getMaxColumnsInIndex() {
+ return 16;
+ }
+
+ @Override
+ public int getMaxColumnsInOrderBy() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxColumnsInSelect() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxColumnsInTable() {
+ return 1024;
+ }
+
+ @Override
+ public int getMaxConnections() {
+ // there is a max number of sessions, but that is not the same as the max number of connections
+ return 0;
+ }
+
+ @Override
+ public int getMaxCursorNameLength() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxIndexLength() {
+ return 8000;
+ }
+
+ @Override
+ public int getMaxSchemaNameLength() {
+ return 128;
+ }
+
+ @Override
+ public int getMaxProcedureNameLength() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxCatalogNameLength() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxRowSize() {
+ // The limit is 1024 columns per table * 10MB per column, which is more than fits in an int.
+ // We therefore return 0 to indicate no limit (or an unknown limit).
+ return 0;
+ }
+
+ @Override
+ public boolean doesMaxRowSizeIncludeBlobs() {
+ return true;
+ }
+
+ @Override
+ public int getMaxStatementLength() {
+ return 1000000;
+ }
+
+ @Override
+ public int getMaxStatements() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxTableNameLength() {
+ return 128;
+ }
+
+ @Override
+ public int getMaxTablesInSelect() {
+ return 0;
+ }
+
+ @Override
+ public int getMaxUserNameLength() {
+ return 0;
+ }
+
+ @Override
+ public int getDefaultTransactionIsolation() {
+ return Connection.TRANSACTION_SERIALIZABLE;
+ }
+
+ @Override
+ public boolean supportsTransactions() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsTransactionIsolationLevel(int level) {
+ return supportsIsolationLevel(level);
+ }
+
+ static boolean supportsIsolationLevel(int level) {
+ return Connection.TRANSACTION_SERIALIZABLE == level
+ || Connection.TRANSACTION_REPEATABLE_READ == level;
+ }
+
+ @Override
+ public boolean supportsDataDefinitionAndDataManipulationTransactions() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsDataManipulationTransactionsOnly() {
+ return true;
+ }
+
+ @Override
+ public boolean dataDefinitionCausesTransactionCommit() {
+ return false;
+ }
+
+ @Override
+ public boolean dataDefinitionIgnoredInTransactions() {
+ return false;
+ }
+
+ @Override
+ public ResultSet getProcedures(
+ String catalog, String schemaPattern, String procedureNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("PROCEDURE_CAT", Type.string()),
+ StructField.of("PROCEDURE_SCHEM", Type.string()),
+ StructField.of("PROCEDURE_NAME", Type.string()),
+ StructField.of("reserved1", Type.string()),
+ StructField.of("reserved2", Type.string()),
+ StructField.of("reserved3", Type.string()),
+ StructField.of("REMARKS", Type.string()),
+ StructField.of("PROCEDURE_TYPE", Type.int64()),
+ StructField.of("SPECIFIC_NAME", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getProcedureColumns(
+ String catalog, String schemaPattern, String procedureNamePattern, String columnNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("PROCEDURE_CAT", Type.string()),
+ StructField.of("PROCEDURE_SCHEM", Type.string()),
+ StructField.of("PROCEDURE_NAME", Type.string()),
+ StructField.of("COLUMN_NAME", Type.string()),
+ StructField.of("COLUMN_TYPE", Type.int64()),
+ StructField.of("DATA_TYPE", Type.int64()),
+ StructField.of("TYPE_NAME", Type.string()),
+ StructField.of("PRECISION", Type.string()),
+ StructField.of("LENGTH", Type.int64()),
+ StructField.of("SCALE", Type.int64()),
+ StructField.of("RADIX", Type.int64()),
+ StructField.of("NULLABLE", Type.int64()),
+ StructField.of("REMARKS", Type.string()),
+ StructField.of("COLUMN_DEF", Type.string()),
+ StructField.of("SQL_DATA_TYPE", Type.int64()),
+ StructField.of("SQL_DATETIME_SUB", Type.int64()),
+ StructField.of("CHAR_OCTET_LENGTH", Type.int64()),
+ StructField.of("ORDINAL_POSITION", Type.int64()),
+ StructField.of("IS_NULLABLE", Type.string()),
+ StructField.of("SPECIFIC_NAME", Type.string())),
+ Collections.emptyList()));
+ }
+
+ private JdbcPreparedStatement prepareStatementReplaceNullWithAnyString(
+ String sql, String... params) throws SQLException {
+ JdbcPreparedStatement statement = connection.prepareStatement(sql);
+ int paramIndex = 1;
+ for (String param : params) {
+ if (param == null) {
+ statement.setString(paramIndex, "%");
+ } else {
+ statement.setString(paramIndex, param.toUpperCase());
+ }
+ paramIndex++;
+ }
+ return statement;
+ }
+
+ @Override
+ public ResultSet getTables(
+ String catalog, String schemaPattern, String tableNamePattern, String[] types)
+ throws SQLException {
+ String sql = readSqlFromFile("DatabaseMetaData_GetTables.sql", connection.getDialect());
+ String type1;
+ String type2;
+ if (types == null || types.length == 0) {
+ type1 = "TABLE";
+ type2 = "VIEW";
+ } else if (types.length == 1) {
+ type1 = types[0];
+ type2 = "NON_EXISTENT_TYPE";
+ } else {
+ type1 = types[0];
+ type2 = types[1];
+ }
+ JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(
+ sql, catalog, schemaPattern, tableNamePattern, type1, type2);
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+
+ @Override
+ public ResultSet getSchemas() throws SQLException {
+ return getSchemas(null, null);
+ }
+
+ @Override
+ public ResultSet getCatalogs() throws SQLException {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(StructField.of("TABLE_CAT", Type.string())),
+ Collections.singletonList(
+ Struct.newBuilder().set("TABLE_CAT").to(getConnection().getCatalog()).build())));
+ }
+
+ @Override
+ public ResultSet getTableTypes() {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(StructField.of("TABLE_TYPE", Type.string())),
+ Arrays.asList(
+ Struct.newBuilder().set("TABLE_TYPE").to("TABLE").build(),
+ Struct.newBuilder().set("TABLE_TYPE").to("VIEW").build())));
+ }
+
+ @Override
+ public ResultSet getColumns(
+ String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern)
+ throws SQLException {
+ String sql = readSqlFromFile("DatabaseMetaData_GetColumns.sql", connection.getDialect());
+ JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(
+ sql, catalog, schemaPattern, tableNamePattern, columnNamePattern);
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+
+ @Override
+ public ResultSet getColumnPrivileges(
+ String catalog, String schema, String table, String columnNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TABLE_CAT", Type.string()),
+ StructField.of("TABLE_SCHEM", Type.string()),
+ StructField.of("TABLE_NAME", Type.string()),
+ StructField.of("COLUMN_NAME", Type.string()),
+ StructField.of("GRANTOR", Type.string()),
+ StructField.of("GRANTEE", Type.string()),
+ StructField.of("PRIVILEGE", Type.string()),
+ StructField.of("IS_GRANTABLE", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getTablePrivileges(
+ String catalog, String schemaPattern, String tableNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TABLE_CAT", Type.string()),
+ StructField.of("TABLE_SCHEM", Type.string()),
+ StructField.of("TABLE_NAME", Type.string()),
+ StructField.of("GRANTOR", Type.string()),
+ StructField.of("GRANTEE", Type.string()),
+ StructField.of("PRIVILEGE", Type.string()),
+ StructField.of("IS_GRANTABLE", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getBestRowIdentifier(
+ String catalog, String schema, String table, int scope, boolean nullable) {
+ return getEmptyColumnsResultSet();
+ }
+
+ @Override
+ public ResultSet getVersionColumns(String catalog, String schema, String table) {
+ return getEmptyColumnsResultSet();
+ }
+
+ private ResultSet getEmptyColumnsResultSet() {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("SCOPE", Type.int64()),
+ StructField.of("COLUMN_NAME", Type.string()),
+ StructField.of("DATA_TYPE", Type.int64()),
+ StructField.of("TYPE_NAME", Type.string()),
+ StructField.of("COLUMN_SIZE", Type.int64()),
+ StructField.of("BUFFER_LENGTH", Type.int64()),
+ StructField.of("DECIMAL_DIGITS", Type.int64()),
+ StructField.of("PSEUDO_COLUMN", Type.int64())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException {
+ JdbcPreconditions.checkArgument(table != null, "table may not be null");
+ String sql = readSqlFromFile("DatabaseMetaData_GetPrimaryKeys.sql", connection.getDialect());
+ JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(sql, catalog, schema, table);
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+
+ @Override
+ public ResultSet getImportedKeys(String catalog, String schema, String table)
+ throws SQLException {
+ JdbcPreconditions.checkArgument(table != null, "table may not be null");
+ String sql = readSqlFromFile("DatabaseMetaData_GetImportedKeys.sql", connection.getDialect());
+ JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(sql, catalog, schema, table);
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+
+ @Override
+ public ResultSet getExportedKeys(String catalog, String schema, String table)
+ throws SQLException {
+ JdbcPreconditions.checkArgument(table != null, "table may not be null");
+ String sql = readSqlFromFile("DatabaseMetaData_GetExportedKeys.sql", connection.getDialect());
+ JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(sql, catalog, schema, table);
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+
+ @Override
+ public ResultSet getCrossReference(
+ String parentCatalog,
+ String parentSchema,
+ String parentTable,
+ String foreignCatalog,
+ String foreignSchema,
+ String foreignTable)
+ throws SQLException {
+ String sql =
+ readSqlFromFile("DatabaseMetaData_GetCrossReferences.sql", connection.getDialect());
+ JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(
+ sql,
+ parentCatalog,
+ parentSchema,
+ parentTable,
+ foreignCatalog,
+ foreignSchema,
+ foreignTable);
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+
+ @Override
+ public ResultSet getTypeInfo() {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TYPE_NAME", Type.string()),
+ StructField.of("DATA_TYPE", Type.int64()),
+ StructField.of("PRECISION", Type.int64()),
+ StructField.of("LITERAL_PREFIX", Type.string()),
+ StructField.of("LITERAL_SUFFIX", Type.string()),
+ StructField.of("CREATE_PARAMS", Type.string()),
+ StructField.of("NULLABLE", Type.int64()),
+ StructField.of("CASE_SENSITIVE", Type.bool()),
+ StructField.of("SEARCHABLE", Type.int64()),
+ StructField.of("UNSIGNED_ATTRIBUTE", Type.bool()),
+ StructField.of("FIXED_PREC_SCALE", Type.bool()),
+ StructField.of("AUTO_INCREMENT", Type.bool()),
+ StructField.of("LOCAL_TYPE_NAME", Type.string()),
+ StructField.of("MINIMUM_SCALE", Type.int64()),
+ StructField.of("MAXIMUM_SCALE", Type.int64()),
+ StructField.of("SQL_DATA_TYPE", Type.int64()),
+ StructField.of("SQL_DATETIME_SUB", Type.int64()),
+ StructField.of("NUM_PREC_RADIX", Type.int64())),
+ Arrays.asList(
+ // TODO(#925): Make these dialect-dependent (i.e. 'timestamptz' for PostgreSQL.
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("STRING")
+ .set("DATA_TYPE")
+ .to(Types.NVARCHAR) // -9
+ .set("PRECISION")
+ .to(2621440L)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to("(length)")
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(true)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typeSearchable)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(true)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("STRING")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to((Long) null)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("INT64")
+ .set("DATA_TYPE")
+ .to(Types.BIGINT) // -5
+ .set("PRECISION")
+ .to(19L)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(false)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("INT64")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to(10)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("BYTES")
+ .set("DATA_TYPE")
+ .to(Types.BINARY) // -2
+ .set("PRECISION")
+ .to(10485760L)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to("(length)")
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(true)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("BYTES")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to((Long) null)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("FLOAT32")
+ .set("DATA_TYPE")
+ .to(Types.REAL) // 8
+ .set("PRECISION")
+ .to(7L)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(false)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("FLOAT32")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to(2)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("FLOAT64")
+ .set("DATA_TYPE")
+ .to(Types.DOUBLE) // 8
+ .set("PRECISION")
+ .to(15L)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(false)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("FLOAT64")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to(2)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("BOOL")
+ .set("DATA_TYPE")
+ .to(Types.BOOLEAN) // 16
+ .set("PRECISION")
+ .to((Long) null)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(true)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("BOOL")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to((Long) null)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("DATE")
+ .set("DATA_TYPE")
+ .to(Types.DATE) // 91
+ .set("PRECISION")
+ .to(10L)
+ .set("LITERAL_PREFIX")
+ .to("DATE ")
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(true)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("DATE")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to((Long) null)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("TIMESTAMP")
+ .set("DATA_TYPE")
+ .to(Types.TIMESTAMP) // 93
+ .set("PRECISION")
+ .to(35L)
+ .set("LITERAL_PREFIX")
+ .to("TIMESTAMP ")
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(true)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("TIMESTAMP")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to((Long) null)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("NUMERIC")
+ .set("DATA_TYPE")
+ .to(Types.NUMERIC) // 2
+ .set("PRECISION")
+ .to(2621440L)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typePredBasic)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(false)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("NUMERIC")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to(10)
+ .build(),
+ Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to("UUID")
+ .set("DATA_TYPE")
+ .to(Types.OTHER) // There's no JDBC-specific type code for UUID.
+ .set("PRECISION")
+ .to((Long) null)
+ .set("LITERAL_PREFIX")
+ .to("UUID ")
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(false)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typeSearchable)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(true)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to("UUID")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to((Long) null)
+ .build(),
+ getJsonType(connection.getDialect()))),
+ // Allow column 2 to be cast to short without any range checks.
+ ImmutableSet.of(2));
+ }
+
+ private Struct getJsonType(Dialect dialect) {
+ return Struct.newBuilder()
+ .set("TYPE_NAME")
+ .to(dialect == Dialect.POSTGRESQL ? "JSONB" : "JSON")
+ .set("DATA_TYPE")
+ .to(
+ dialect == Dialect.POSTGRESQL
+ ? PgJsonbType.VENDOR_TYPE_NUMBER
+ : JsonType.VENDOR_TYPE_NUMBER)
+ .set("PRECISION")
+ .to(2621440L)
+ .set("LITERAL_PREFIX")
+ .to((String) null)
+ .set("LITERAL_SUFFIX")
+ .to((String) null)
+ .set("CREATE_PARAMS")
+ .to((String) null)
+ .set("NULLABLE")
+ .to(DatabaseMetaData.typeNullable)
+ .set("CASE_SENSITIVE")
+ .to(true)
+ .set("SEARCHABLE")
+ .to(DatabaseMetaData.typeSearchable)
+ .set("UNSIGNED_ATTRIBUTE")
+ .to(true)
+ .set("FIXED_PREC_SCALE")
+ .to(false)
+ .set("AUTO_INCREMENT")
+ .to(false)
+ .set("LOCAL_TYPE_NAME")
+ .to(dialect == Dialect.POSTGRESQL ? "JSONB" : "JSON")
+ .set("MINIMUM_SCALE")
+ .to(0)
+ .set("MAXIMUM_SCALE")
+ .to(0)
+ .set("SQL_DATA_TYPE")
+ .to((Long) null)
+ .set("SQL_DATETIME_SUB")
+ .to((Long) null)
+ .set("NUM_PREC_RADIX")
+ .to((Long) null)
+ .build();
+ }
+
+ @Override
+ public ResultSet getIndexInfo(
+ String catalog, String schema, String table, boolean unique, boolean approximate)
+ throws SQLException {
+ return getIndexInfo(catalog, schema, table, null, unique);
+ }
+
+ public ResultSet getIndexInfo(String catalog, String schema, String indexName)
+ throws SQLException {
+ return getIndexInfo(catalog, schema, null, indexName, false);
+ }
+
+ private ResultSet getIndexInfo(
+ String catalog, String schema, String table, String indexName, boolean unique)
+ throws SQLException {
+ String sql = readSqlFromFile("DatabaseMetaData_GetIndexInfo.sql", connection.getDialect());
+ JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(
+ sql, catalog, schema, table, indexName, unique ? "YES" : "%");
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+
+ @Override
+ public boolean supportsResultSetType(int type) {
+ return type == ResultSet.TYPE_FORWARD_ONLY;
+ }
+
+ @Override
+ public boolean supportsResultSetConcurrency(int type, int concurrency) {
+ return type == ResultSet.TYPE_FORWARD_ONLY && concurrency == ResultSet.CONCUR_READ_ONLY;
+ }
+
+ @Override
+ public boolean ownUpdatesAreVisible(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean ownDeletesAreVisible(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean ownInsertsAreVisible(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean othersUpdatesAreVisible(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean othersDeletesAreVisible(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean othersInsertsAreVisible(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean updatesAreDetected(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean deletesAreDetected(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean insertsAreDetected(int type) {
+ return false;
+ }
+
+ @Override
+ public boolean supportsBatchUpdates() {
+ return true;
+ }
+
+ @Override
+ public ResultSet getUDTs(
+ String catalog, String schemaPattern, String typeNamePattern, int[] types) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TYPE_CAT", Type.string()),
+ StructField.of("TYPE_SCHEM", Type.string()),
+ StructField.of("TYPE_NAME", Type.string()),
+ StructField.of("CLASS_NAME", Type.string()),
+ StructField.of("DATA_TYPE", Type.int64()),
+ StructField.of("REMARKS", Type.string()),
+ StructField.of("BASE_TYPE", Type.int64())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public Connection getConnection() {
+ return connection;
+ }
+
+ @Override
+ public boolean supportsSavepoints() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsNamedParameters() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsMultipleOpenResults() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsGetGeneratedKeys() {
+ return false;
+ }
+
+ @Override
+ public ResultSet getSuperTypes(String catalog, String schemaPattern, String typeNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TYPE_CAT", Type.string()),
+ StructField.of("TYPE_SCHEM", Type.string()),
+ StructField.of("TYPE_NAME", Type.string()),
+ StructField.of("SUPERTYPE_CAT", Type.string()),
+ StructField.of("SUPERTYPE_SCHEM", Type.string()),
+ StructField.of("SUPERTYPE_NAME", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getSuperTables(String catalog, String schemaPattern, String tableNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TABLE_CAT", Type.string()),
+ StructField.of("TABLE_SCHEM", Type.string()),
+ StructField.of("TABLE_NAME", Type.string()),
+ StructField.of("SUPERTABLE_NAME", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getAttributes(
+ String catalog, String schemaPattern, String typeNamePattern, String attributeNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TYPE_CAT", Type.string()),
+ StructField.of("TYPE_SCHEM", Type.string()),
+ StructField.of("TYPE_NAME", Type.string()),
+ StructField.of("ATTR_NAME", Type.string()),
+ StructField.of("DATA_TYPE", Type.int64()),
+ StructField.of("ATTR_TYPE_NAME", Type.string()),
+ StructField.of("ATTR_SIZE", Type.int64()),
+ StructField.of("DECIMAL_DIGITS", Type.int64()),
+ StructField.of("NUM_PREC_RADIX", Type.int64()),
+ StructField.of("NULLABLE", Type.int64()),
+ StructField.of("REMARKS", Type.string()),
+ StructField.of("ATTR_DEF", Type.string()),
+ StructField.of("SQL_DATA_TYPE", Type.int64()),
+ StructField.of("SQL_DATETIME_SUB", Type.int64()),
+ StructField.of("CHAR_OCTET_LENGTH", Type.int64()),
+ StructField.of("ORDINAL_POSITION", Type.int64()),
+ StructField.of("IS_NULLABLE", Type.string()),
+ StructField.of("SCOPE_CATALOG", Type.string()),
+ StructField.of("SCOPE_SCHEMA", Type.string()),
+ StructField.of("SCOPE_TABLE", Type.string()),
+ StructField.of("SOURCE_DATA_TYPE", Type.int64())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public boolean supportsResultSetHoldability(int holdability) {
+ return holdability == ResultSet.CLOSE_CURSORS_AT_COMMIT;
+ }
+
+ @Override
+ public int getResultSetHoldability() {
+ return ResultSet.CLOSE_CURSORS_AT_COMMIT;
+ }
+
+ @Override
+ public int getDatabaseMajorVersion() {
+ return DATABASE_MAJOR_VERSION;
+ }
+
+ @Override
+ public int getDatabaseMinorVersion() {
+ return DATABASE_MINOR_VERSION;
+ }
+
+ @Override
+ public int getJDBCMajorVersion() {
+ return JDBC_MAJOR_VERSION;
+ }
+
+ @Override
+ public int getJDBCMinorVersion() {
+ return JDBC_MINOR_VERSION;
+ }
+
+ @Override
+ public int getSQLStateType() {
+ return sqlStateSQL;
+ }
+
+ @Override
+ public boolean locatorsUpdateCopy() {
+ return true;
+ }
+
+ @Override
+ public boolean supportsStatementPooling() {
+ return false;
+ }
+
+ @Override
+ public RowIdLifetime getRowIdLifetime() {
+ return RowIdLifetime.ROWID_UNSUPPORTED;
+ }
+
+ @Override
+ public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLException {
+ String sql = readSqlFromFile("DatabaseMetaData_GetSchemas.sql", connection.getDialect());
+ try (JdbcPreparedStatement statement =
+ prepareStatementReplaceNullWithAnyString(sql, catalog, schemaPattern)) {
+ return statement.executeQueryWithOptions(InternalMetadataQuery.INSTANCE);
+ }
+ }
+
+ @Override
+ public boolean supportsStoredFunctionsUsingCallSyntax() {
+ return false;
+ }
+
+ @Override
+ public boolean autoCommitFailureClosesAllResultSets() {
+ return false;
+ }
+
+ /**
+ * The max length for client info values is 63 to make them fit in Cloud Spanner session labels.
+ */
+ static final int MAX_CLIENT_INFO_VALUE_LENGTH = 63;
+
+ static Properties getDefaultClientInfoProperties() throws SQLException {
+ Properties info = new Properties();
+ try (ResultSet rs = getDefaultClientInfo()) {
+ while (rs.next()) {
+ info.put(rs.getString("NAME"), rs.getString("DEFAULT_VALUE"));
+ }
+ }
+ return info;
+ }
+
+ private static ResultSet getDefaultClientInfo() {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("NAME", Type.string()),
+ StructField.of("MAX_LEN", Type.int64()),
+ StructField.of("DEFAULT_VALUE", Type.string()),
+ StructField.of("DESCRIPTION", Type.string())),
+ Arrays.asList(
+ Struct.newBuilder()
+ .set("NAME")
+ .to("APPLICATIONNAME")
+ .set("MAX_LEN")
+ .to(MAX_CLIENT_INFO_VALUE_LENGTH)
+ .set("DEFAULT_VALUE")
+ .to("")
+ .set("DESCRIPTION")
+ .to("The name of the application currently utilizing the connection.")
+ .build(),
+ Struct.newBuilder()
+ .set("NAME")
+ .to("CLIENTHOSTNAME")
+ .set("MAX_LEN")
+ .to(MAX_CLIENT_INFO_VALUE_LENGTH)
+ .set("DEFAULT_VALUE")
+ .to("")
+ .set("DESCRIPTION")
+ .to(
+ "The hostname of the computer the application using the connection is running on.")
+ .build(),
+ Struct.newBuilder()
+ .set("NAME")
+ .to("CLIENTUSER")
+ .set("MAX_LEN")
+ .to(MAX_CLIENT_INFO_VALUE_LENGTH)
+ .set("DEFAULT_VALUE")
+ .to("")
+ .set("DESCRIPTION")
+ .to(
+ "The name of the user that the application using the connection is performing work for. "
+ + "This may not be the same as the user name that was used in establishing the connection.")
+ .build())));
+ }
+
+ @Override
+ public ResultSet getClientInfoProperties() {
+ return getDefaultClientInfo();
+ }
+
+ @Override
+ public ResultSet getFunctions(String catalog, String schemaPattern, String functionNamePattern) {
+ // TODO: return system functions
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("FUNCTION_CAT", Type.string()),
+ StructField.of("FUNCTION_SCHEM", Type.string()),
+ StructField.of("FUNCTION_NAME", Type.string()),
+ StructField.of("REMARKS", Type.string()),
+ StructField.of("FUNCTION_TYPE", Type.int64()),
+ StructField.of("SPECIFIC_NAME", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getFunctionColumns(
+ String catalog, String schemaPattern, String functionNamePattern, String columnNamePattern) {
+ // TODO: return system functions
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("FUNCTION_CAT", Type.string()),
+ StructField.of("FUNCTION_SCHEM", Type.string()),
+ StructField.of("FUNCTION_NAME", Type.string()),
+ StructField.of("COLUMN_NAME", Type.string()),
+ StructField.of("COLUMN_TYPE", Type.int64()),
+ StructField.of("DATA_TYPE", Type.int64()),
+ StructField.of("TYPE_NAME", Type.string()),
+ StructField.of("PRECISION", Type.int64()),
+ StructField.of("LENGTH", Type.int64()),
+ StructField.of("SCALE", Type.int64()),
+ StructField.of("RADIX", Type.int64()),
+ StructField.of("NULLABLE", Type.int64()),
+ StructField.of("REMARKS", Type.string()),
+ StructField.of("CHAR_OCTET_LENGTH", Type.int64()),
+ StructField.of("ORDINAL_POSITION", Type.int64()),
+ StructField.of("IS_NULLABLE", Type.string()),
+ StructField.of("SPECIFIC_NAME", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public ResultSet getPseudoColumns(
+ String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) {
+ return JdbcResultSet.of(
+ ResultSets.forRows(
+ Type.struct(
+ StructField.of("TABLE_CAT", Type.string()),
+ StructField.of("TABLE_SCHEM", Type.string()),
+ StructField.of("TABLE_NAME", Type.string()),
+ StructField.of("COLUMN_NAME", Type.string()),
+ StructField.of("DATA_TYPE", Type.int64()),
+ StructField.of("COLUMN_SIZE", Type.int64()),
+ StructField.of("DECIMAL_DIGITS", Type.int64()),
+ StructField.of("NUM_PREC_RADIX", Type.int64()),
+ StructField.of("COLUMN_USAGE", Type.string()),
+ StructField.of("REMARKS", Type.string()),
+ StructField.of("CHAR_OCTET_LENGTH", Type.int64()),
+ StructField.of("IS_NULLABLE", Type.string())),
+ Collections.emptyList()));
+ }
+
+ @Override
+ public boolean generatedKeyAlwaysReturned() {
+ return false;
+ }
+
+ @Override
+ public long getMaxLogicalLobSize() {
+ // BYTES(MAX)
+ return 10485760L;
+ }
+
+ @Override
+ public boolean supportsRefCursors() {
+ return false;
+ }
+}
diff --git a/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDriver.java b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDriver.java
new file mode 100644
index 000000000000..8e8bd0726317
--- /dev/null
+++ b/java-spanner-jdbc/src/main/java/com/google/cloud/spanner/jdbc/JdbcDriver.java
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2019 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.cloud.spanner.jdbc;
+
+import com.google.api.core.InternalApi;
+import com.google.auth.oauth2.GoogleCredentials;
+import com.google.cloud.spanner.SessionPoolOptions;
+import com.google.cloud.spanner.SessionPoolOptionsHelper;
+import com.google.cloud.spanner.SpannerException;
+import com.google.cloud.spanner.connection.ConnectionOptions;
+import com.google.cloud.spanner.connection.ConnectionOptionsHelper;
+import com.google.cloud.spanner.connection.ConnectionProperty;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Suppliers;
+import com.google.rpc.Code;
+import io.opentelemetry.api.OpenTelemetry;
+import java.sql.Connection;
+import java.sql.Driver;
+import java.sql.DriverManager;
+import java.sql.DriverPropertyInfo;
+import java.sql.SQLException;
+import java.sql.SQLFeatureNotSupportedException;
+import java.sql.SQLWarning;
+import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.function.Supplier;
+import java.util.logging.Logger;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * JDBC {@link Driver} for Google Cloud Spanner.
+ *
+ * Usage:
+ *
+ *
+ *
+ * {@code
+ * String url = "jdbc:cloudspanner:/projects/my_project_id/"
+ * + "instances/my_instance_id/databases/my_database_name?"
+ * + "credentials=/home/cloudspanner-keys/my-key.json;autocommit=false";
+ * try (Connection connection = DriverManager.getConnection(url)) {
+ * try(ResultSet rs = connection.createStatement().executeQuery("SELECT SingerId, AlbumId, MarketingBudget FROM Albums")) {
+ * while(rs.next()) {
+ * // do something
+ * }
+ * }
+ * }
+ * }
+ *
+ *
+ *
+ * The connection that is returned will implement the interface {@link CloudSpannerJdbcConnection}.
+ * The JDBC connection URL must be specified in the following format:
+ *
+ *
+ * jdbc:cloudspanner:[//host[:port]]/projects/project-id[/instances/instance-id[/databases/database-name]][\?property-name=property-value[;property-name=property-value]*]?
+ *
+ *
+ * The property-value strings should be url-encoded.
+ *
+ * The project-id part of the URI may be filled with the placeholder DEFAULT_PROJECT_ID. This
+ * placeholder is replaced by the default project id of the environment that is requesting a
+ * connection.
+ *
+ *
The supported properties are:
+ *
+ *
+ * - credentials (String): URL for the credentials file to use for the connection. If you do not
+ * specify any credentials at all, the default credentials of the environment as returned by
+ * {@link GoogleCredentials#getApplicationDefault()} is used.
+ *
- autocommit (boolean): Sets the initial autocommit mode for the connection. Default is true.
+ *
- readonly (boolean): Sets the initial readonly mode for the connection. Default is false.
+ *
- autoConfigEmulator (boolean): Automatically configure the connection to try to connect to
+ * the Cloud Spanner emulator. You do not need to specify any host or port in the connection
+ * string as long as the emulator is running on the default host/port (localhost:9010). The
+ * instance and database in the connection string will automatically be created if these do
+ * not yet exist on the emulator. This means that you do not need to execute any `gcloud`
+ * commands on the emulator to create the instance and database before you can connect to it.
+ * Setting this property to true also enables running concurrent transactions on the emulator.
+ * The emulator aborts any concurrent transaction on the emulator, and the JDBC driver works
+ * around this by automatically setting a savepoint after each statement that is executed.
+ * When the transaction has been aborted by the emulator and the JDBC connection wants to
+ * continue with that transaction, the transaction is replayed up until the savepoint that had
+ * automatically been set after the last statement that was executed before the transaction
+ * was aborted by the emulator.
+ *
- endpoint (string): Set this property to specify a custom endpoint that the JDBC driver
+ * should connect to. You can use this property in combination with the autoConfigEmulator
+ * property to instruct the JDBC driver to connect to an emulator instance that uses a
+ * randomly assigned port numer. See ConcurrentTransactionOnEmulatorTest
+ * for a concrete example of how to use this property.
+ *
- usePlainText (boolean): Sets whether the JDBC connection should establish an unencrypted
+ * connection to the server. This option can only be used when connecting to a local emulator
+ * that does not require an encrypted connection, and that does not require authentication.
+ *
- optimizerVersion (string): The query optimizer version to use for the connection. The value
+ * must be either a valid version number or
LATEST. If no value is specified, the
+ * query optimizer version specified in the environment variable
+ * SPANNER_OPTIMIZER_VERSION is used. If no query optimizer version is specified in the
+ * connection URL or in the environment variable, the default query optimizer version of Cloud
+ * Spanner is used.
+ * - oauthtoken (String): A valid OAuth2 token to use for the JDBC connection. The token must
+ * have been obtained with one or both of the scopes
+ * 'https://www.googleapis.com/auth/spanner.admin' and/or
+ * 'https://www.googleapis.com/auth/spanner.data'. If you specify both a credentials file and
+ * an OAuth token, the JDBC driver will throw an exception when you try to obtain a
+ * connection.
+ *
- retryAbortsInternally (boolean): Sets the initial retryAbortsInternally mode for the
+ * connection. Default is true. @see {@link
+ * CloudSpannerJdbcConnection#setRetryAbortsInternally(boolean)} for more information.
+ *
- minSessions (int): Sets the minimum number of sessions in the backing session pool.
+ * Defaults to 100.
+ *
- maxSessions (int): Sets the maximum number of sessions in the backing session pool.
+ * Defaults to 400.
+ *
- numChannels (int): Sets the number of gRPC channels to use. Defaults to 4.
+ *
- rpcPriority (String): Sets the priority for all RPC invocations from this connection.
+ * Defaults to HIGH.
+ *
+ */
+public class JdbcDriver implements Driver {
+ /**
+ * The info {@link Properties} object that is passed to the JDBC driver may contain an entry with
+ * this key and an {@link io.opentelemetry.api.OpenTelemetry} instance as its value. This {@link
+ * io.opentelemetry.api.OpenTelemetry} instance will be used for tracing and metrics in the JDBC
+ * connection.
+ */
+ public static final String OPEN_TELEMETRY_PROPERTY_KEY = "openTelemetry";
+
+ private static final String JDBC_API_CLIENT_LIB_TOKEN = "sp-jdbc";
+ // Updated to version 2 when upgraded to Java 8 (JDBC 4.2)
+ static final int MAJOR_VERSION = 2;
+ static final int MINOR_VERSION = 0;
+ private static final String JDBC_URL_FORMAT =
+ "jdbc:" + ConnectionOptions.Builder.SPANNER_URI_FORMAT;
+ private static final Pattern URL_PATTERN = Pattern.compile(JDBC_URL_FORMAT);
+ private static final String JDBC_EXTERNAL_HOST_FORMAT =
+ "jdbc:" + ConnectionOptions.Builder.EXTERNAL_HOST_FORMAT;
+
+ @VisibleForTesting
+ static final Pattern EXTERNAL_HOST_URL_PATTERN = Pattern.compile(JDBC_EXTERNAL_HOST_FORMAT);
+
+ @InternalApi
+ public static String getClientLibToken() {
+ return JDBC_API_CLIENT_LIB_TOKEN;
+ }
+
+ static {
+ try {
+ register();
+ } catch (SQLException e) {
+ java.sql.DriverManager.println("Registering driver failed: " + e.getMessage());
+ }
+ }
+
+ private static JdbcDriver registeredDriver;
+
+ static void register() throws SQLException {
+ if (isRegistered()) {
+ throw new IllegalStateException(
+ "Driver is already registered. It can only be registered once.");
+ }
+ JdbcDriver registeredDriver = new JdbcDriver();
+ DriverManager.registerDriver(registeredDriver);
+ JdbcDriver.registeredDriver = registeredDriver;
+ }
+
+ /**
+ * According to JDBC specification, this driver is registered against {@link DriverManager} when
+ * the class is loaded. To avoid leaks, this method allow unregistering the driver so that the
+ * class can be gc'ed if necessary.
+ *
+ * @throws IllegalStateException if the driver is not registered
+ * @throws SQLException if deregistering the driver fails
+ */
+ static void deregister() throws SQLException {
+ if (!isRegistered()) {
+ throw new IllegalStateException(
+ "Driver is not registered (or it has not been registered using Driver.register() method)");
+ }
+ ConnectionOptions.closeSpanner();
+ DriverManager.deregisterDriver(registeredDriver);
+ registeredDriver = null;
+ }
+
+ /**
+ * @return {@code true} if the driver is registered against {@link DriverManager}
+ */
+ static boolean isRegistered() {
+ return registeredDriver != null;
+ }
+
+ /**
+ * @return the registered JDBC driver for Cloud Spanner.
+ * @throws SQLException if the driver has not been registered.
+ */
+ static JdbcDriver getRegisteredDriver() throws SQLException {
+ if (isRegistered()) {
+ return registeredDriver;
+ }
+ throw JdbcSqlExceptionFactory.of(
+ "The driver has not been registered", Code.FAILED_PRECONDITION);
+ }
+
+ public JdbcDriver() {}
+
+ @Override
+ public Connection connect(String url, Properties info) throws SQLException {
+ if (url != null && (url.startsWith("jdbc:cloudspanner") || url.startsWith("jdbc:spanner"))) {
+ try {
+ Matcher matcher = URL_PATTERN.matcher(url);
+ Matcher matcherExternalHost = EXTERNAL_HOST_URL_PATTERN.matcher(url);
+ if (matcher.matches() || matcherExternalHost.matches()) {
+ // strip 'jdbc:' from the URL, add any extra properties and pass on to the generic
+ // Connection API. Also set the user-agent if we detect that the connection
+ // comes from known framework like Hibernate, and there is no other user-agent set.
+ maybeAddUserAgent(info);
+ String connectionUri = appendPropertiesToUrl(url.substring(5), info);
+ ConnectionOptions options = buildConnectionOptions(connectionUri, info);
+ JdbcConnection connection = new JdbcConnection(url, options);
+ if (options.getWarnings() != null) {
+ connection.pushWarning(new SQLWarning(options.getWarnings()));
+ }
+ return connection;
+ }
+ } catch (SpannerException e) {
+ throw JdbcSqlExceptionFactory.of(e);
+ } catch (IllegalArgumentException e) {
+ throw JdbcSqlExceptionFactory.of(e.getMessage(), Code.INVALID_ARGUMENT, e);
+ } catch (Exception e) {
+ throw JdbcSqlExceptionFactory.of(e.getMessage(), Code.UNKNOWN, e);
+ }
+ throw JdbcSqlExceptionFactory.of("invalid url: " + url, Code.INVALID_ARGUMENT);
+ }
+ return null;
+ }
+
+ static ConnectionOptions buildConnectionOptions(String connectionUrl, Properties info) {
+ ConnectionOptions.Builder builder =
+ ConnectionOptions.newBuilder().setTracingPrefix("JDBC").setUri(connectionUrl);
+ if (info.containsKey(OPEN_TELEMETRY_PROPERTY_KEY)
+ && info.get(OPEN_TELEMETRY_PROPERTY_KEY) instanceof OpenTelemetry) {
+ builder.setOpenTelemetry((OpenTelemetry) info.get(OPEN_TELEMETRY_PROPERTY_KEY));
+ }
+ // Enable multiplexed sessions by default for the JDBC driver.
+ builder.setSessionPoolOptions(
+ SessionPoolOptionsHelper.useMultiplexedSessions(SessionPoolOptions.newBuilder()).build());
+ // Enable direct executor for JDBC, as we don't use the async API.
+ builder =
+ ConnectionOptionsHelper.useDirectExecutorIfNotUseVirtualThreads(connectionUrl, builder);
+ return builder.build();
+ }
+
+ static void maybeAddUserAgent(Properties properties) {
+ if (properties.containsKey("userAgent")) {
+ return;
+ }
+ if (isHibernate()) {
+ properties.setProperty("userAgent", "sp-hib");
+ }
+ }
+
+ private static final Supplier isHibernate =
+ Suppliers.memoize(
+ () -> {
+ try {
+ // First check if the Spanner Hibernate dialect is on the classpath. If it is, then
+ // we assume that Hibernate will (eventually) be used.
+ Class.forName(
+ "com.google.cloud.spanner.hibernate.SpannerDialect",
+ /* initialize= */ false,
+ JdbcDriver.class.getClassLoader());
+ return true;
+ } catch (Throwable ignore) {
+ }
+
+ // If we did not find the Spanner Hibernate dialect on the classpath, then do a
+ // check if the connection is still being created by Hibernate using the built-in
+ // Spanner dialect in Hibernate.
+ try {
+ StackTraceElement[] callStack = Thread.currentThread().getStackTrace();
+ for (StackTraceElement element : callStack) {
+ if (element.getClassName().contains(".hibernate.")) {
+ return true;
+ }
+ }
+ } catch (Throwable ignore) {
+ }
+ return false;
+ });
+
+ static boolean isHibernate() {
+ // Cache the result as the check is relatively expensive, and we also don't want to create
+ // multiple different Spanner instances just to get the correct user-agent in every case.
+ return isHibernate.get();
+ }
+
+ static String appendPropertiesToUrl(String url, Properties info) {
+ StringBuilder res = new StringBuilder(url);
+ for (Entry