diff --git a/.gitignore b/.gitignore
index 4ab57f89..32f9c2ea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,5 @@ target
*.diff
Fortify*
logging.properties
+.idea
+.oca
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 15f68fa4..692c88a1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,10 @@
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/).
+## [6.0.0] [Unreleased]
+
+### TODO
+
## [5.4.18] 2025-10-01
### Added
diff --git a/README.md b/README.md
index cff205cb..cb24ece5 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@ project. The version changes with each release.
com.oracle.nosql.sdknosqldriver
- 5.4.18
+ 6.0.0
```
diff --git a/driver/pom.xml b/driver/pom.xml
index 5ca2a03d..fe494720 100644
--- a/driver/pom.xml
+++ b/driver/pom.xml
@@ -29,7 +29,7 @@
com.oracle.nosql.sdknosqldriver
- 5.4.18
+ 6.0.0jar
@@ -39,8 +39,8 @@
UTF-8
- 1.8
- 1.8
+ 11
+ 11${maven.build.timestamp}d-MMMM-yyyyCopyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
@@ -255,7 +255,18 @@
4.13.1test
-
+
+ org.reactivestreams
+ reactive-streams-tck-flow
+ 1.0.4
+ test
+
+
+ io.projectreactor
+ reactor-core
+ 3.8.1
+ test
+
@@ -274,8 +285,8 @@
3.11.0true
- 1.8
- 1.8
+ ${maven.compiler.source}
+ ${maven.compiler.target}truetrue-Xlint:all
@@ -287,6 +298,13 @@
maven-surefire-plugin3.1.2
+
+
+ org.apache.maven.surefire
+ surefire-junit47
+ 3.5.4
+
+
diff --git a/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java b/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java
index a6c491af..d01730a1 100644
--- a/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java
+++ b/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java
@@ -12,6 +12,8 @@
import io.netty.handler.codec.http.HttpHeaders;
import oracle.nosql.driver.ops.Request;
+import java.util.concurrent.CompletableFuture;
+
/**
* A callback interface used by the driver to obtain an authorization string
* for a request. {@link NoSQLHandle} calls this interface when and
@@ -34,6 +36,21 @@ public interface AuthorizationProvider {
*/
public String getAuthorizationString(Request request);
+ /**
+ * Returns an authorization string for specified request. This is sent to
+ * the server in the request for authorization. Authorization information
+ * can be request-dependent.
+ *
+ * @param request the request being processed
+ *
+ * @return a CompletableFuture of a string indicating that the application
+ * is authorized to perform the request
+ */
+ public default CompletableFuture
+ getAuthorizationStringAsync(Request request) {
+ return CompletableFuture.completedFuture(null);
+ }
+
/**
* Release resources provider is using.
*/
@@ -75,6 +92,27 @@ public default void setRequiredHeaders(String authString,
}
}
+ /**
+ * Set HTTP headers required by the provider asynchronously.
+ *
+ * @param authString the authorization string for the request
+ *
+ * @param request the request being processed
+ *
+ * @param headers the HTTP headers
+ *
+ * @param content the request content bytes
+ */
+ default CompletableFuture setRequiredHeadersAsync(String authString,
+ Request request,
+ HttpHeaders headers,
+ byte[] content) {
+ if (authString != null) {
+ headers.set(AUTHORIZATION, authString);
+ }
+ return CompletableFuture.completedFuture(null);
+ }
+
/**
* Invalidate any cached authorization strings.
*/
diff --git a/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java b/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java
index b7e4be88..684cc681 100644
--- a/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java
+++ b/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java
@@ -85,6 +85,13 @@ public void delay(Request request,
request.addRetryDelayMs(delayMs);
}
+ @Override
+ public int delayTime(Request request,
+ int numRetries,
+ RetryableException re) {
+ return Math.max(0, computeBackoffDelay(request, fixedDelayMs));
+ }
+
/**
* Compute an incremental backoff delay in milliseconds.
* This method also checks the request's timeout and ensures the
diff --git a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleAsync.java b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleAsync.java
new file mode 100644
index 00000000..868ce6e4
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleAsync.java
@@ -0,0 +1,1159 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.Flow;
+
+import oracle.nosql.driver.ops.AddReplicaRequest;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.DropReplicaRequest;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetIndexesResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.MultiDeleteResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryPaginatorResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.ReplicaStatsRequest;
+import oracle.nosql.driver.ops.ReplicaStatsResult;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.ops.SystemRequest;
+import oracle.nosql.driver.ops.SystemResult;
+import oracle.nosql.driver.ops.SystemStatusRequest;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.TableUsageResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.values.MapValue;
+
+/**
+ * NoSQLHandleAsync is an asynchronous handle that can be used to access Oracle
+ * NoSQL tables. To create a connection represented by NoSQLHandleAsync,
+ * request an instance using {@link NoSQLHandleFactory#createNoSQLHandleAsync}
+ * and {@link NoSQLHandleConfig}, which allows an application to specify
+ * default values and other configuration information to be used by the handle.
+ *
+ * The same interface is available to both users of the Oracle NoSQL Database
+ * Cloud Service and the on-premises Oracle NoSQL Database; however, some
+ * methods and/or parameters are specific to each environment. The
+ * documentation has notes about whether a class, method, or parameter is
+ * environment-specific. Unless otherwise noted they are applicable to both
+ * environments.
+ *
+ * A handle has memory and network resources associated with it. Consequently,
+ * the {@link NoSQLHandleAsync#close} method must be invoked to free up the
+ * resources when the application is done using the handle.
+ *
+ * To minimize network activity as well as resource allocation and
+ * deallocation overheads, it's best to avoid repeated creation and closing of
+ * handles. For example, creating and closing a handle around each operation,
+ * would incur large resource allocation overheads resulting in poor
+ * application performance.
+ *
+ *
+ * A handle permits concurrent operations, so a single handle is sufficient to
+ * access tables in a multi-threaded application. The creation of multiple
+ * handles incurs additional resource overheads without providing any
+ * performance benefit.
+ *
+ *
+ * With the exception of {@link #close} the operations on this interface follow
+ * a similar pattern. They accept a {@link Request} object containing
+ * parameters, both required and optional. They return a {@link CompletableFuture}
+ * which returns a {@link Result} object containing results. Operation failures
+ * throw exceptions. Unique subclasses of {@link Request} and {@link Result}
+ * exist for most operations, containing information specific to the operation.
+ * All of these operations result in remote calls across a network.
+ *
+ *
+ * All {@link Request} instances support specification of parameters for the
+ * operation as well as the ability to override default parameters which may
+ * have been specified in {@link NoSQLHandleConfig}, such as request timeouts,
+ * {@link Consistency}, etc.
+ *
+ *
+ * {@link Request} objects
+ * are not copied and must not be modified by the application while a method
+ * on this interface is using them.
+ *
+ *
Error and Exception Handling
+ *
+ * On success all methods in this interface return {@link CompletableFuture}
+ * which completes with {@link Result} objects.
+ * On Error, return {@link CompletableFuture} completes with
+ * {@link java.util.concurrent.CompletionException} that wraps the original
+ * exception as its cause.
+ * Some Java exceptions, such as {@link IllegalArgumentException} and
+ * {@link NullPointerException} are thrown directly. All other exceptions are
+ * instances of {@link NoSQLException}, which serves as a base class for NoSQL
+ * Database exceptions.
+ *
+ *
+ * {@link NoSQLException} instances are split into two broad categories:
+ *
+ *
Exceptions that may be retried with the expectation that they
+ * may succeed on retry. These are instances of {@link RetryableException}
+ *
Exceptions that may not be retried and if retried, will fail again
+ *
+ *
+ * Exceptions that may be retried return true for
+ * {@link NoSQLException#okToRetry} while those that may not will return false.
+ * Examples of retryable exceptions are those which indicate resource
+ * consumption violations such as {@link ThrottlingException}.
+ * Examples of exceptions that should not be
+ * retried are {@link IllegalArgumentException},
+ * {@link TableNotFoundException}, and any other exception indicating a
+ * syntactic or semantic error.
+ *
+ *
+ * Instances of NoSQLHandleAsync are thread-safe and expected to be shared among
+ * threads.
+ *
+ * The async APIs are non-blocking in that they return without waiting for any
+ * events such as network read and write, or security handshake. The actual
+ * handling of such events happens inside an internal thread pool which has a
+ * fixed number of threads. These async methods return widely accepted
+ * asynchronous flow-control or computation classes, namely, the
+ * {@link CompletableFuture} and {@link Flow.Publisher}, from which
+ * user-supplied actions are triggered after the execution results are
+ * available. We implement these interfaces in a way such that user-supplied
+ * actions will be performed by a thread in the internal thread pool.
+ * Therefore, these actions must be non-blocking to avoid interfering with
+ * internal event processing.
+ * This requirement corresponds to those defined in the async classes (see the
+ * policies for implementing {@code CompletionStage} in
+ * {@link CompletableFuture}). If the triggered method needs to perform a
+ * blocking action or heavy CPU bound task, use a separate executor to perform
+ * the action. For example:
+ *
+ * @since 6.0.0
+ */
+public interface NoSQLHandleAsync extends AutoCloseable {
+
+ /**
+ * Deletes a row from a table asynchronously. The row is identified using a
+ * primary key value supplied in {@link DeleteRequest#setKey}
+ *
+ * By default, a delete operation is unconditional and will succeed if the
+ * specified row exists. Delete operations can be made conditional based
+ * on whether the {@link Version} of an existing row matches that supplied
+ * by {@link DeleteRequest#setMatchVersion}.
+ *
+ * It is also possible to return information about the existing
+ * row. The row, including its {@link Version} and modification time
+ * can be optionally returned.
+ * The existing row information will only be returned if
+ * {@link DeleteRequest#setReturnRow} is true and one of the following
+ * occurs:
+ *
+ *
The {@link DeleteRequest#setMatchVersion} is used and the operation
+ * fails because the row exists and its version does not match.
+ *
+ *
The {@link DeleteRequest#setMatchVersion} is not used and the
+ * operation succeeds provided that the server supports providing the
+ * existing row.
+ *
+ *
+ * Use of {@link DeleteRequest#setReturnRow} may result in additional
+ * consumed read capacity.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture delete(DeleteRequest request);
+
+ /**
+ * Gets the row associated with a primary key asynchronously. On success the
+ * value of the row is available using the {@link GetResult#getValue}
+ * operation. If there are no matching rows that method will return null.
+ *
+ * The default {@link Consistency} used for the operation is
+ * {@link Consistency#EVENTUAL} unless an explicit value is has been set
+ * using {@link NoSQLHandleConfig#setConsistency} or
+ * {@link GetRequest#setConsistency}. Use of {@link Consistency#ABSOLUTE}
+ * may affect latency of the operation and may result in additional cost
+ * for the operation.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture get(GetRequest request);
+
+ /**
+ * Puts a row into a table asynchronously. This method creates a new row or
+ * overwrites an existing row entirely. The value used for the put is in
+ * the {@link PutRequest} object and must contain a complete primary key and
+ * all required fields.
+ *
+ * It is not possible to put part of a row. Any fields that are not
+ * provided will be defaulted, overwriting any existing value. Fields that
+ * are not nullable or defaulted must be provided or an exception will be
+ * thrown.
+ *
+ * By default a put operation is unconditional, but put operations can be
+ * conditional based on existence, or not, of a previous value as well as
+ * conditional on the {@link Version} of the existing value.
+ *
+ *
Use {@link PutRequest.Option#IfAbsent} to do a put only if there is
+ * no existing row that matches the primary key
+ *
Use {@link PutRequest.Option#IfPresent} to do a put only if there
+ * is an existing row that matches the primary key
+ *
Use {@link PutRequest.Option#IfVersion} to do a put only if there is
+ * an existing row that matches the primary key and its
+ * {@link Version} matches that provided
+ *
+ *
+ * It is also possible to return information about the existing
+ * row. The existing row, including its {@link Version} and modification
+ * time can be optionally returned.
+ * The existing row information will only be returned if
+ * {@link PutRequest#setReturnRow} is true and one of the following occurs:
+ *
+ *
The {@link PutRequest.Option#IfAbsent} is used and the operation
+ * fails because the row already exists.
+ *
The {@link PutRequest.Option#IfVersion} is used and the operation
+ * fails because the row exists and its version does not match.
+ *
The {@link PutRequest.Option#IfPresent} is used and the operation
+ * succeeds provided that the server supports providing the existing row.
+ *
The {@link PutRequest.Option} is not used and put operation replaces
+ * the existing row provided that the server supports providing the existing
+ * row.
+ *
+ * Use of {@link PutRequest#setReturnRow} may result in additional consumed
+ * read capacity.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture put(PutRequest request);
+
+ /**
+ * Executes a sequence of operations associated with a table that share the
+ * same shard key portion of their primary keys, all the specified
+ * operations are executed within the scope of a single transaction.
+ * {@link WriteMultipleRequest}.
+ *
+ * There are some size-based limitations on this operation:
+ *
+ *
The max number of individual operations (put, delete) in a single
+ * WriteMultiple request is 50.
+ *
The total request size is limited to 25MB.
+ *
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link RowSizeLimitException} if the size of the request exceeds
+ * the maximum limit.
+ *
+ *
+ * {@link BatchOperationNumberLimitException} if the number of operations in
+ * the request exceeds the maximum limit.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture writeMultiple(WriteMultipleRequest request);
+
+ /**
+ * Deletes multiple rows from a table in an atomic operation asynchronously.
+ * The key used may be partial but must contain all of the fields that are
+ * in the shard key. A range may be specified to delete a range of keys.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture multiDelete(MultiDeleteRequest request);
+
+ /**
+ * Queries a table based on the query statement specified in the
+ * {@link QueryRequest} asynchronously.
+ *
+ * Queries that include a full shard key will execute much more efficiently
+ * than more distributed queries that must go to multiple shards.
+ *
+ * Table- and system-style queries such as "CREATE TABLE ..." or "DROP TABLE .."
+ * are not supported by this interface. Those operations must be performed
+ * using {@link #tableRequest} or {@link #systemRequest} as appropriate.
+ *
+ * The amount of data read by a single query request is limited by a system
+ * default and can be further limited using
+ * {@link QueryRequest#setMaxReadKB}. This limits the amount of data
+ * read and not the amount of data returned, which means
+ * that a query can return zero results but still have more data to read.
+ * This situation is detected by checking if the {@link QueryRequest} is
+ * completed, using {@link QueryRequest#isDone()}. For this
+ * reason queries should always operate in a loop, acquiring more results,
+ * until the {@link QueryRequest#isDone()} is true, indicating that the
+ * query is done.
+ *
+ * Note: Since query might use resources until they reach the end, it
+ * is necessary to close the QueryRequest.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture query(QueryRequest request);
+
+ /**
+ * Queries a table based on the query statement specified in the
+ * {@link QueryRequest} while returning an {@link QueryPaginatorResult}
+ * which provides a {@code Flow.Publisher>} to stream
+ * the result set.
+ *
+ * If {@link QueryRequest#setLimit(int)} is set, the publisher will send
+ * at most limit items in
+ * {@link java.util.concurrent.Flow.Subscriber#onNext(Object)} call.
+ *
+ * Queries that include a full shard key will execute much more efficiently
+ * than more distributed queries that must go to multiple shards.
+ *
+ * Remote calls, including preparation of a query statement, will not
+ * occur until the subscription happens.
+ *
+ * Table- and system-style queries such as "CREATE TABLE ..." or "DROP TABLE .."
+ * are not supported by this interface. Those operations must be performed using
+ * {@link #tableRequest} or {@link #systemRequest} as appropriate.
+ *
+ * Note:
+ *
+ *
+ * Publisher will close the {@link QueryRequest} upon completion or
+ * cancellation or error
+ *
+ *
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return The {@link QueryPaginatorResult}
+ *
+ * @throws IllegalArgumentException if any of the parameters are invalid or
+ * required parameters are missing
+ *
+ * @throws NoSQLException if the operation cannot be performed for any other
+ * reason
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ QueryPaginatorResult queryPaginator(QueryRequest request);
+ /**
+ * Prepares a query for execution and reuse asynchronously. See
+ * {@link #query} for general information and restrictions. It is
+ * recommended that prepared queries are used when the same query will run
+ * multiple times as execution is much more efficient than starting with a
+ * query string every time. The query language and API support query
+ * variables to assist with re-use.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture prepare(PrepareRequest request);
+
+ /**
+ * Performs an operation on a table asynchronously. This method is used for
+ * creating and dropping tables and indexes as well as altering tables.
+ * Only one operation is allowed on a table at any one time.
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link TableResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture tableRequest(TableRequest request);
+
+ /**
+ * A convenience method that performs a TableRequest and waits for
+ * completion of the operation. This is the same as calling
+ * {@link #tableRequest} then calling {@link TableResult#waitForCompletion}.
+ * If the operation fails an exception is thrown. All parameters are
+ * required.
+ *
+ * @param request the {@link TableRequest} to perform.
+ *
+ * @param timeoutMs the amount of time to wait for completion, in
+ * milliseconds.
+ *
+ * @param pollIntervalMs the polling interval for the wait operation.
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link RequestTimeoutException} if the operation times out.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture doTableRequest(TableRequest request,
+ int timeoutMs,
+ int pollIntervalMs);
+
+ /**
+ * On-premises only.
+ *
+ * Performs a system operation on the system asynchronously, such as
+ * administrative operations that don't affect a specific table. For
+ * table-specific operations use {@link #tableRequest} or
+ * {@link #doTableRequest}.
+ *
+ * Examples of statements in the {@link SystemRequest} passed to this
+ * method include:
+ *
+ *
CREATE NAMESPACE mynamespace
+ *
CREATE USER some_user IDENTIFIED BY password
+ *
CREATE ROLE some_role
+ *
GRANT ROLE some_role TO USER some_user
+ *
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link SystemResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Checks the status of an operation previously performed using
+ * {@link #systemRequest} asynchronously.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture systemStatus(SystemStatusRequest request);
+
+ /**
+ * Gets static information about the specified table asynchronously
+ * including its state, provisioned throughput and capacity and schema.
+ * Dynamic information such as usage is obtained using {@link #getTableUsage}.
+ * Throughput, capacity and usage information is only available when using
+ * the Cloud Service and will be null or not defined on-premises.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link TableNotFoundException} if the specified table does not exist.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Gets dynamic information about the specified table asynchronously such as
+ * the current throughput usage. Usage information is collected in time
+ * slices and returned in individual usage records. It is possible to
+ * specify a time-based range of usage records using input parameters.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link TableNotFoundException} if the specified table does not exist.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture getTableUsage(TableUsageRequest request);
+
+ /**
+ * Lists tables asynchronously, returning table names. If further information
+ * about a specific table is desired the {@link #getTable} interface may be
+ * used. If a given identity has access to a large number of tables the
+ * list may be paged using input parameters.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture listTables(ListTablesRequest request);
+
+ /**
+ * Returns information about an index, or indexes on a table asynchronously.
+ * If no index name is specified in the {@link GetIndexesRequest}, then
+ * information on all indexes is returned.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Returns the namespaces in a store as an array of String.
+ *
+ * @return A {@link CompletableFuture} which completes with the namespaces
+ * or null if none are found.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Returns the roles in a store as an array of String.
+ *
+ * @return A {@link CompletableFuture} which completes with the list of
+ * roles or null if none are found.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Returns the users in a store as an array of {@link UserInfo}.
+ *
+ * @return A {@link CompletableFuture} which completes with the users
+ * or null if none are found.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * A convenience method that performs a SystemRequest and waits for
+ * completion of the operation. This is the same as calling {@link
+ * #systemRequest} then calling {@link SystemResult#waitForCompletion}. If
+ * the operation fails an exception is thrown. All parameters are required.
+ *
+ * System requests are those related to namespaces and security and are
+ * generally independent of specific tables. Examples of statements include:
+ *
+ *
CREATE NAMESPACE mynamespace
+ *
CREATE USER some_user IDENTIFIED BY password
+ *
CREATE ROLE some_role
+ *
GRANT ROLE some_role TO USER some_user
+ *
+ *
+ * @param statement the system statement for the operation.
+ *
+ * @param timeoutMs the amount of time to wait for completion, in
+ * milliseconds.
+ *
+ * @param pollIntervalMs the polling interval for the wait operation.
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link RequestTimeoutException} if the operation times out.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture doSystemRequest(String statement,
+ int timeoutMs,
+ int pollIntervalMs);
+
+ /**
+ * Cloud service only.
+ *
+ * Add replica to a table asynchronously.
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link TableResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link TableResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Gets replica statistics information asynchronously
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link TableNotFoundException} if the specified table does not exist.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture getReplicaStats(ReplicaStatsRequest request);
+
+ /**
+ * Returns an object that allows control over how SDK statistics
+ * are collected.
+ *
+ * @return the StatsControl object
+ *
+ * @since 5.2.30
+ */
+ StatsControl getStatsControl();
+
+ /**
+ * Closes the handle, releasing its memory and network resources. Once
+ * this method is closed the handle is no longer usable. Any attempt to
+ * use a closed handle will throw {@link IllegalArgumentException}.
+ */
+ void close();
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java
index 28ea4769..664181c5 100644
--- a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java
+++ b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java
@@ -84,6 +84,18 @@ public class NoSQLHandleConfig implements Cloneable {
public static final String STATS_ENABLE_LOG_PROPERTY =
"com.oracle.nosql.sdk.nosqldriver.stats.enable-log";
+ /**
+ * Java property for connection pool size
+ */
+ public static final String CONNECTION_SIZE_PROPERTY =
+ "com.oracle.nosql.sdk.nosqldriver.connection.size";
+
+ /**
+ * Java property for connection pool pending size
+ */
+ public static final String CONNECTION_PENDING_PROPERTY =
+ "com.oracle.nosql.sdk.nosqldriver.connection.pending";
+
/**
* Statistics logging interval in seconds. Default 600 sec, ie. 10 min.
*/
@@ -102,6 +114,8 @@ public class NoSQLHandleConfig implements Cloneable {
*/
public static final boolean DEFAULT_ENABLE_LOG = true;
+ static final int DEFAULT_CONNECTION_POOL_SIZE = 100;
+ static final int DEFAULT_CONNECTION_PENDING_SIZE = 10_000;
/*
* The url used to contact an HTTP proxy
@@ -277,6 +291,20 @@ public class NoSQLHandleConfig implements Cloneable {
*/
private String extensionUserAgent;
+ /**
+ * Maximum size of the connection pool
+ */
+ private int connectionPoolSize =
+ getAndVerifyPropertyPositive(CONNECTION_SIZE_PROPERTY,
+ DEFAULT_CONNECTION_POOL_SIZE);
+
+ /**
+ * The maximum number of pending acquires for the pool
+ */
+ private int poolMaxPending =
+ getAndVerifyPropertyPositive(CONNECTION_PENDING_PROPERTY,
+ DEFAULT_CONNECTION_PENDING_SIZE);
+
/**
* Specifies an endpoint or region id to use to connect to the Oracle
* NoSQL Database Cloud Service or, if on-premise, the Oracle NoSQL
@@ -724,19 +752,23 @@ public NoSQLHandleConfig setNumThreads(int numThreads) {
* Sets the maximum number of individual connections to use to connect
* to the service. Each request/response pair uses a connection. The
* pool exists to allow concurrent requests and will bound the number of
- * concurrent requests. Additional requests will wait for a connection to
- * become available. If requests need to wait for a significant time
- * additional connections may be created regardless of the pool size.
- * The default value if not set is number of available CPUs * 2.
+ * concurrent requests. Additional requests upto
+ * {@link NoSQLHandleConfig#poolMaxPending} will wait for a connection
+ * to become available.
+ * Default value is {@value DEFAULT_CONNECTION_POOL_SIZE}
*
* @param poolSize the pool size
*
* @return this
- * @deprecated The connection pool no longer supports a size setting.
- * It will expand as needed based on concurrent demand.
+ *
+ * @since 6.0.0
*/
- @Deprecated
public NoSQLHandleConfig setConnectionPoolSize(int poolSize) {
+ if (poolSize <= 0) {
+ throw new IllegalArgumentException(
+ "Connection pool size must be positive");
+ }
+ this.connectionPoolSize = poolSize;
return this;
}
@@ -789,16 +821,20 @@ public NoSQLHandleConfig setConnectionPoolInactivityPeriod(
/**
* Sets the maximum number of pending acquire operations allowed on the
* connection pool. This number is used if the degree of concurrency
- * desired exceeds the size of the connection pool temporarily. The
- * default value is 3.
+ * desired exceeds the size of the connection pool temporarily.
+ * Default value is {@value DEFAULT_CONNECTION_PENDING_SIZE}.
*
* @param poolMaxPending the maximum number allowed
*
* @return this
- * @deprecated The connection pool no longer supports pending requests.
+ * @since 6.0.0
*/
- @Deprecated
public NoSQLHandleConfig setPoolMaxPending(int poolMaxPending) {
+ if (poolMaxPending <= 0) {
+ throw new IllegalArgumentException("pool max pending value must " +
+ "be positive");
+ }
+ this.poolMaxPending = poolMaxPending;
return this;
}
@@ -869,13 +905,12 @@ public int getMaxChunkSize() {
* concurrent requests. Additional requests will wait for a connection to
* become available.
*
- * @return 0
- * @deprecated The connection pool no longer supports a size setting.
- * It will expand as needed based on concurrent demand.
+ * @return the pool size
+ *
+ * @since 6.0.0
*/
- @Deprecated
public int getConnectionPoolSize() {
- return 0;
+ return connectionPoolSize;
}
/**
@@ -908,12 +943,11 @@ public int getConnectionPoolInactivityPeriod() {
* Returns the maximum number of pending acquire operations allowed on
* the connection pool.
*
- * @return 0
- * @deprecated The connection pool no longer supports pending requests.
+ * @return the max pool pending
+ * @since 6.0.0
*/
- @Deprecated
public int getPoolMaxPending() {
- return 0;
+ return poolMaxPending;
}
/**
@@ -1687,4 +1721,15 @@ public void setExtensionUserAgent(String extensionUserAgent) {
}
this.extensionUserAgent = extensionUserAgent;
}
+
+ static int getAndVerifyPropertyPositive(String property,
+ int defaultVal) {
+ final int val = Integer.getInteger(property, defaultVal);
+ if (val <= 0) {
+ final String msg =
+ String.format("Property %s must be larger than zero", property);
+ throw new IllegalArgumentException(msg);
+ }
+ return val;
+ }
}
diff --git a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java
index a6bf2033..c3edfd8b 100644
--- a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java
+++ b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java
@@ -9,6 +9,7 @@
import static oracle.nosql.driver.util.CheckNull.requireNonNull;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
import oracle.nosql.driver.http.NoSQLHandleImpl;
/**
@@ -43,4 +44,34 @@ public static NoSQLHandle createNoSQLHandle(NoSQLHandleConfig config) {
}
return new NoSQLHandleImpl(configCopy);
}
+
+ /**
+ * Creates a async handle that can be used to access tables.
+ * The application must invoke {@link NoSQLHandleAsync#close},
+ * when it is done accessing the system to
+ * free up resources associated with the handle.
+ *
+ * @param config the NoSQLHandle configuration parameters
+ *
+ * @return a valid {@link NoSQLHandleAsync} instance, ready for use
+ *
+ * @throws IllegalArgumentException if an illegal configuration parameter
+ * is specified.
+ *
+ * @see NoSQLHandleAsync#close
+ */
+ public static NoSQLHandleAsync createNoSQLHandleAsync(
+ NoSQLHandleConfig config) {
+ requireNonNull(
+ config,
+ "NoSQLHandleFactory.createNoSQLHandleAsync: config cannot be null");
+ NoSQLHandleConfig configCopy = config.clone();
+ if (configCopy.getRetryHandler() == null) {
+ /*
+ * Default retry handler: 10 retries, default backoff
+ */
+ configCopy.configureDefaultRetryHandler(10, 0);
+ }
+ return new NoSQLHandleAsyncImpl(configCopy);
+ }
}
diff --git a/driver/src/main/java/oracle/nosql/driver/RetryHandler.java b/driver/src/main/java/oracle/nosql/driver/RetryHandler.java
index 3d625c27..11842169 100644
--- a/driver/src/main/java/oracle/nosql/driver/RetryHandler.java
+++ b/driver/src/main/java/oracle/nosql/driver/RetryHandler.java
@@ -69,4 +69,21 @@ public interface RetryHandler {
* @param re the exception that was thrown
*/
void delay(Request request, int numRetries, RetryableException re);
+
+ /**
+ * This method is called when a {@link RetryableException} is thrown and it
+ * is determined that the request will be retried based on the return value
+ * of {@link #doRetry}. It returns the number of milliseconds to delay
+ * before retrying the request.
+ *
+ * @param request the Request that has triggered the exception
+ *
+ * @param numRetries the number of retries that have occurred for the
+ * operation
+ *
+ * @param re the exception that was thrown
+ *
+ * @return Retry delay time in milliseconds
+ */
+ int delayTime(Request request, int numRetries, RetryableException re);
}
diff --git a/driver/src/main/java/oracle/nosql/driver/SDKVersion.java b/driver/src/main/java/oracle/nosql/driver/SDKVersion.java
index 5fad9116..77d294d5 100644
--- a/driver/src/main/java/oracle/nosql/driver/SDKVersion.java
+++ b/driver/src/main/java/oracle/nosql/driver/SDKVersion.java
@@ -12,5 +12,5 @@ public class SDKVersion {
/**
* The full X.Y.Z version of the current SDK
*/
- public static final String VERSION = "5.4.18";
+ public static final String VERSION = "6.0.0";
}
diff --git a/driver/src/main/java/oracle/nosql/driver/http/Client.java b/driver/src/main/java/oracle/nosql/driver/http/Client.java
index 3a0f2ce9..989329ce 100644
--- a/driver/src/main/java/oracle/nosql/driver/http/Client.java
+++ b/driver/src/main/java/oracle/nosql/driver/http/Client.java
@@ -40,23 +40,34 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URL;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Function;
+import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
+import io.netty.buffer.Unpooled;
+import io.netty.handler.codec.http.FullHttpResponse;
+import io.netty.handler.codec.http.HttpRequest;
import oracle.nosql.driver.AuthorizationProvider;
import oracle.nosql.driver.DefaultRetryHandler;
import oracle.nosql.driver.InvalidAuthorizationException;
@@ -76,7 +87,6 @@
import oracle.nosql.driver.UnsupportedQueryVersionException;
import oracle.nosql.driver.WriteThrottlingException;
import oracle.nosql.driver.httpclient.HttpClient;
-import oracle.nosql.driver.httpclient.ResponseHandler;
import oracle.nosql.driver.kv.AuthenticationException;
import oracle.nosql.driver.kv.StoreAccessTokenProvider;
import oracle.nosql.driver.ops.AddReplicaRequest;
@@ -106,15 +116,16 @@
import oracle.nosql.driver.query.QueryDriver;
import oracle.nosql.driver.query.TopologyInfo;
import oracle.nosql.driver.util.ByteInputStream;
+import oracle.nosql.driver.util.ConcurrentUtil;
import oracle.nosql.driver.util.HttpConstants;
import oracle.nosql.driver.util.NettyByteInputStream;
import oracle.nosql.driver.util.NettyByteOutputStream;
import oracle.nosql.driver.util.RateLimiterMap;
import oracle.nosql.driver.util.SerializationUtil;
+import oracle.nosql.driver.util.SimpleRateLimiter;
import oracle.nosql.driver.values.MapValue;
import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.HttpHeaderNames;
@@ -153,7 +164,7 @@ public class Client {
/**
* Tracks the unique client scoped request id.
*/
- private final AtomicInteger maxRequestId = new AtomicInteger(1);
+ private final AtomicLong maxRequestId = new AtomicLong(1);
private final HttpClient httpClient;
@@ -200,7 +211,7 @@ public class Client {
/**
* config for statistics
*/
- private StatsControlImpl statsControl;
+ private final StatsControlImpl statsControl;
/**
* list of Request instances to refresh when auth changes. This will only
@@ -219,15 +230,86 @@ public class Client {
private final String SESSION_COOKIE_FIELD = "session=";
/* for keeping track of SDKs usage */
- private String userAgent;
+ private final String userAgent;
private volatile TopologyInfo topology;
/* for internal testing */
private final String prepareFilename;
+ /* thread-pool for scheduling tasks */
+ private final ScheduledExecutorService taskExecutor;
+
+ /* Lock to access data structures */
+ private final ReentrantLock lock = new ReentrantLock();
+
+ /*
+ * Centralized error handling for request execution.
+ * This class maps specific {@link Throwable} types to error-handling
+ * strategies (retry, fail, or protocol downgrade).
+ * It uses a HashMap of {@link ErrorHandler} functions
+ * to keep {@link #handleError(RequestContext, Throwable)} short and
+ * maintainable.
+ */
+ private final Map, ErrorHandler>
+ errorHandlers = new HashMap<>();
+
+ /*
+ * Functional interface for all error handlers.
+ * Each handler inspects the exception and decides whether
+ * to retry the request or fail with an exception.
+ */
+ @FunctionalInterface
+ private interface ErrorHandler {
+ CompletableFuture handle(RequestContext ctx, Throwable error);
+ }
+
+ /**
+ * RequestContext class to encapsulate request-specific data.
+ * This helps in passing context through asynchronous chains.
+ * It now includes requestId and a Supplier to generate new IDs for retries.
+ */
+ private static class RequestContext {
+ private final Request kvRequest;
+ private final String requestClass;
+ private volatile String requestId;
+ private final long startNanos;
+ private final int timeoutMs;
+ private final Supplier nextIdSupplier;
+ private volatile Throwable exception;
+ private final AtomicInteger rateDelayedMs = new AtomicInteger(0);
+ private volatile RateLimiter readLimiter;
+ private volatile RateLimiter writeLimiter;
+ private volatile boolean checkReadUnits;
+ private volatile boolean checkWriteUnits;
+ private volatile int reqSize;
+ private volatile int resSize;
+ private volatile short serialVersionUsed;
+ private volatile short queryVersionUsed;
+ private volatile long latencyNanos;
+ private volatile long networkLatency;
+
+ RequestContext(Request kvRequest, long startNanos, int timeoutMs,
+ Supplier nextIdSupplier, RateLimiter readLimiter,
+ RateLimiter writeLimiter, boolean checkReadUnits,
+ boolean checkWriteUnits) {
+ this.kvRequest = kvRequest;
+ this.startNanos = startNanos;
+ this.timeoutMs = timeoutMs;
+ this.nextIdSupplier = nextIdSupplier;
+ this.readLimiter = readLimiter;
+ this.writeLimiter = writeLimiter;
+ this.checkReadUnits = checkReadUnits;
+ this.checkWriteUnits = checkWriteUnits;
+
+ this.requestId = Long.toString(nextIdSupplier.get());
+ this.requestClass = kvRequest.getClass().getSimpleName();
+ }
+ }
+
public Client(Logger logger,
- NoSQLHandleConfig httpConfig) {
+ NoSQLHandleConfig httpConfig,
+ ScheduledExecutorService taskExecutor) {
this.logger = logger;
this.config = httpConfig;
@@ -266,7 +348,7 @@ public Client(Logger logger,
httpClient.configureProxy(httpConfig);
}
- authProvider= config.getAuthorizationProvider();
+ authProvider = config.getAuthorizationProvider();
if (authProvider == null) {
throw new IllegalArgumentException(
"Must configure AuthorizationProvider to use HttpClient");
@@ -302,6 +384,8 @@ public Client(Logger logger,
/* for internal testing */
prepareFilename = System.getProperty("test.preparefilename");
+ this.taskExecutor = taskExecutor;
+ initErrorHandlers();
}
/**
@@ -326,7 +410,9 @@ protected HttpClient createHttpClient(URL url,
sslCtx,
httpConfig.getSSLHandshakeTimeout(),
"NoSQL Driver",
- logger);
+ logger,
+ httpConfig.getConnectionPoolSize(),
+ httpConfig.getPoolMaxPending());
}
/**
@@ -347,6 +433,9 @@ public void shutdown() {
if (threadPool != null) {
threadPool.shutdown();
}
+ if (taskExecutor != null) {
+ taskExecutor.shutdown();
+ }
}
public int getAcquiredChannelCount() {
@@ -365,13 +454,13 @@ public int getFreeChannelCount() {
* Get the next client-scoped request id. It needs to be combined with the
* client id to obtain a globally unique scope.
*/
- private int nextRequestId() {
+ private long nextRequestId() {
return maxRequestId.addAndGet(1);
}
/**
- * Execute the KV request and return the response. This is the top-level
- * method for request execution.
+ * Execute the KV request and return the future response. This is the
+ * top-level method for request execution.
*
* This method handles exceptions to distinguish between what can be retried
* and what cannot, making sure that root cause exceptions are
@@ -387,9 +476,9 @@ private int nextRequestId() {
*
* @param kvRequest the KV request to be executed by the server
*
- * @return the Result of the request
+ * @return the future representing the result of the request
*/
- public Result execute(Request kvRequest) {
+ public CompletableFuture execute(Request kvRequest) {
requireNonNull(kvRequest, "NoSQLHandle: request must be non-null");
@@ -406,7 +495,11 @@ public Result execute(Request kvRequest) {
* fails for a given Request instance it will throw
* IllegalArgumentException.
*/
- kvRequest.validate();
+ try {
+ kvRequest.validate();
+ } catch (Throwable t) {
+ return CompletableFuture.failedFuture(t);
+ }
/* clear any retry stats that may exist on this request object */
kvRequest.setRetryStats(null);
@@ -434,7 +527,8 @@ public Result execute(Request kvRequest) {
*/
if (qreq.hasDriver()) {
trace("QueryRequest has QueryDriver", 2);
- return new QueryResult(qreq, false);
+ return CompletableFuture.completedFuture(
+ new QueryResult(qreq, false));
}
/*
@@ -449,7 +543,8 @@ public Result execute(Request kvRequest) {
trace("QueryRequest has no QueryDriver, but is prepared", 2);
QueryDriver driver = new QueryDriver(qreq);
driver.setClient(this);
- return new QueryResult(qreq, false);
+ return CompletableFuture.completedFuture(
+ new QueryResult(qreq, false));
}
/*
@@ -467,10 +562,6 @@ public Result execute(Request kvRequest) {
qreq.incBatchCounter();
}
- int timeoutMs = kvRequest.getTimeoutInternal();
-
- Throwable exception = null;
-
/*
* If the request doesn't set an explicit compartment, use
* the config default if provided.
@@ -480,7 +571,6 @@ public Result execute(Request kvRequest) {
config.getDefaultCompartment());
}
- int rateDelayedMs = 0;
boolean checkReadUnits = false;
boolean checkWriteUnits = false;
@@ -515,527 +605,679 @@ public Result execute(Request kvRequest) {
}
}
- final long startNanos = System.nanoTime();
- kvRequest.setStartNanos(startNanos);
- final String requestClass = kvRequest.getClass().getSimpleName();
+ kvRequest.setStartNanos(System.nanoTime());
+ RequestContext ctx = new RequestContext(kvRequest,
+ kvRequest.getStartNanos(), kvRequest.getTimeoutInternal(),
+ this::nextRequestId, readLimiter, writeLimiter,
+ checkReadUnits, checkWriteUnits);
- /*
- * boolean that indicates whether content must be signed. Cross
- * region operations must include content when signing. See comment
- * on the method
- */
- final boolean signContent = requireContentSigned(kvRequest);
- String requestId = "";
- int thisIterationTimeoutMs = 0;
+ return executeWithRetry(ctx);
+ }
- do {
- thisIterationTimeoutMs =
- getIterationTimeoutMs(timeoutMs, startNanos);
- /*
- * Check rate limiters before executing the request.
- * Wait for read and/or write limiters to be below their limits
- * before continuing. Be aware of the timeout given.
- */
- if (readLimiter != null && checkReadUnits == true) {
- try {
- /*
- * this may sleep for a while, up to thisIterationTimeoutMs
- * and may throw TimeoutException
- */
- rateDelayedMs += readLimiter.consumeUnitsWithTimeout(
- 0, thisIterationTimeoutMs, false);
- } catch (Exception e) {
- exception = e;
- break;
- }
+ /*
+ * Core method which creates the request and send to the server.
+ * If the request fails, it performs retry.
+ */
+ private CompletableFuture executeWithRetry(RequestContext ctx) {
+
+ final Request kvRequest = ctx.kvRequest;
+ final int timeoutMs = ctx.timeoutMs;
+ final long startNanos = ctx.startNanos;
+ final int thisIterationTimeoutMs =
+ getIterationTimeoutMs(timeoutMs, startNanos);
+
+ /* Check for over all request timeout first */
+ if (thisIterationTimeoutMs <= 0) {
+ RequestTimeoutException rte = new RequestTimeoutException(timeoutMs,
+ ctx.requestClass + " timed out:" +
+ (ctx.requestId.isEmpty() ? "" : " requestId=" + ctx.requestId) +
+ " nextRequestId=" + nextRequestId() +
+ " iterationTimeout=" + thisIterationTimeoutMs + "ms " +
+ (kvRequest.getRetryStats() != null ?
+ kvRequest.getRetryStats() : ""), ctx.exception);
+ return CompletableFuture.failedFuture(rte);
+ }
+
+ /* Log retry */
+ if (kvRequest.getNumRetries() > 0) {
+ logRetries(kvRequest.getNumRetries(), ctx.exception);
+ }
+
+ if (serialVersion < 3 && kvRequest instanceof DurableRequest) {
+ if (((DurableRequest)kvRequest).getDurability() != null) {
+ oneTimeMessage("The requested feature is not supported " +
+ "by the connected server: Durability");
}
- if (writeLimiter != null && checkWriteUnits == true) {
- try {
- /*
- * this may sleep for a while, up to thisIterationTimeoutMs
- * and may throw TimeoutException
- */
- rateDelayedMs += writeLimiter.consumeUnitsWithTimeout(
- 0, thisIterationTimeoutMs, false);
- } catch (Exception e) {
- exception = e;
- break;
- }
+ }
+ if (serialVersion < 3 && kvRequest instanceof TableRequest) {
+ TableLimits limits = ((TableRequest)kvRequest).getTableLimits();
+ if (limits != null &&
+ limits.getMode() == CapacityMode.ON_DEMAND) {
+ oneTimeMessage("The requested feature is not supported " +
+ "by the connected server: on demand " +
+ "capacity table");
}
+ }
- /* update iteration timeout in case limiters slept for some time */
- thisIterationTimeoutMs =
- getIterationTimeoutMs(timeoutMs, startNanos);
+ return handlePreRateLimit(ctx)
+ .thenCompose((Integer delay) -> getAuthString(ctx, authProvider))
+ .thenCompose((String authString) -> createRequest(ctx, authString))
+ .thenCompose((FullHttpRequest request) -> submitRequest(ctx, request))
+ .thenApply((FullHttpResponse response) -> handleResponse(ctx, response))
+ .thenApply((Result result) -> handleResult(ctx, result))
+ .thenCompose((Result result) -> handlePostRateLimit(ctx, result))
+ .handle((Result result, Throwable err) -> {
+ /* Handle error and retry */
+ if (err != null) {
+ return handleError(ctx, err);
+ } else {
+ return CompletableFuture.completedFuture(result);
+ }
+ })
+ .thenCompose(Function.identity());
+ }
- /* ensure limiting didn't throw us over the timeout */
- if (thisIterationTimeoutMs <= 0) {
- break;
- }
+ private CompletableFuture handlePreRateLimit(RequestContext ctx) {
+ /*
+ * Check rate limiters before executing the request.
+ * Wait for read and/or write limiters to be below their limits
+ * before continuing. Be aware of the timeout given.
+ */
+ int preRateLimitDelayMs = 0;
+ if (ctx.readLimiter != null && ctx.checkReadUnits) {
+ preRateLimitDelayMs += ((SimpleRateLimiter) ctx.readLimiter)
+ .consumeExternally(0);
+ }
+ if (ctx.writeLimiter != null && ctx.checkWriteUnits) {
+ preRateLimitDelayMs += ((SimpleRateLimiter) ctx.writeLimiter)
+ .consumeExternally(0);
+ }
+
+ int thisIterationTimeoutMs =
+ getIterationTimeoutMs(ctx.timeoutMs, ctx.startNanos);
+
+ /* If rate limit result in timeout, complete with exception. */
+ if (thisIterationTimeoutMs <= preRateLimitDelayMs) {
+ final TimeoutException ex = new TimeoutException(
+ "timed out waiting "
+ + thisIterationTimeoutMs
+ + "ms due to rate limiting");
+ return createDelayFuture(thisIterationTimeoutMs)
+ .thenCompose(d -> CompletableFuture.failedFuture(ex));
+ }
+ /* sleep for delay ms */
+ return createDelayFuture(preRateLimitDelayMs)
+ .whenComplete((delay, err) -> ctx.rateDelayedMs.addAndGet(delay));
+ }
- final String authString =
- authProvider.getAuthorizationString(kvRequest);
+ /**
+ * Get auth token from auth provider.
+ * This may contact the server to get the token.
+ */
+ private CompletableFuture getAuthString(RequestContext ctx,
+ AuthorizationProvider authProvider) {
+ final Request kvRequest = ctx.kvRequest;
+ return authProvider.getAuthorizationStringAsync(kvRequest)
+ .thenApply(authString -> {
+ /* Check whether timed out while acquiring the auth token */
+ if (timeoutRequest(kvRequest.getStartNanos(),
+ kvRequest.getTimeoutInternal(),
+ null /* exception */)) {
+ TimeoutException ex = new TimeoutException(
+ "timed out during auth token acquisition");
+ throw new CompletionException(ex);
+ }
+ /* validate the token is valid or not */
authProvider.validateAuthString(authString);
+ return authString;
+ });
+ }
+ /**
+ * Create Netty HTTP request.
+ * This will serialize the request body and fill the HTTP headers and
+ * body.
+ * This may contact the server to sign the request body.
+ */
+ private CompletableFuture createRequest(RequestContext ctx,
+ String authString) {
+ ByteBuf buffer = null;
+ try {
+ buffer = Unpooled.buffer();
+ final Request kvRequest = ctx.kvRequest;
+ /*
+ * we expressly check size limit below based on onprem versus
+ * cloud. Set the request to not check size limit inside
+ * writeContent().
+ */
+ kvRequest.setCheckRequestSize(false);
- if (kvRequest.getNumRetries() > 0) {
- logRetries(kvRequest.getNumRetries(), exception);
+ /* Set the topo seq num in the request, if it has not been set
+ * already */
+ if (!(kvRequest instanceof QueryRequest) ||
+ kvRequest.isQueryRequest()) {
+ kvRequest.setTopoSeqNum(getTopoSeqNum());
}
- if (serialVersion < 3 && kvRequest instanceof DurableRequest) {
- if (((DurableRequest)kvRequest).getDurability() != null) {
- oneTimeMessage("The requested feature is not supported " +
- "by the connected server: Durability");
- }
- }
+ /*
+ * Temporarily change the timeout in the request object so
+ * the serialized timeout sent to the server is correct for
+ * this iteration. After serializing the request, set the
+ * timeout back to the overall request timeout so that other
+ * processing (retry delays, etc) work correctly.
+ */
+ kvRequest.setTimeoutInternal(
+ getIterationTimeoutMs(ctx.timeoutMs, ctx.startNanos));
+ writeContent(buffer, ctx);
+ kvRequest.setTimeoutInternal(ctx.timeoutMs);
- if (serialVersion < 3 && kvRequest instanceof TableRequest) {
- TableLimits limits = ((TableRequest)kvRequest).getTableLimits();
- if (limits != null &&
- limits.getMode() == CapacityMode.ON_DEMAND) {
- oneTimeMessage("The requested feature is not supported " +
- "by the connected server: on demand " +
- "capacity table");
+ /*
+ * If on-premises the authProvider will always be a
+ * StoreAccessTokenProvider. If so, check against
+ * configurable limit. Otherwise check against internal
+ * hardcoded cloud limit.
+ */
+ if (authProvider instanceof StoreAccessTokenProvider) {
+ if (buffer.readableBytes() >
+ httpClient.getMaxContentLength()) {
+ throw new RequestSizeLimitException("The request " +
+ "size of " + buffer.readableBytes() +
+ " exceeded the limit of " +
+ httpClient.getMaxContentLength());
}
+ } else {
+ kvRequest.setCheckRequestSize(true);
+ BinaryProtocol.checkRequestSizeLimit(
+ kvRequest, buffer.readableBytes());
+ }
+ final FullHttpRequest request =
+ new DefaultFullHttpRequest(
+ HTTP_1_1, POST, kvRequestURI,
+ buffer,
+ headersFactory().withValidation(false),
+ trailersFactory().withValidation(false));
+ HttpHeaders headers = request.headers();
+ addCommonHeaders(headers);
+ int contentLength = buffer.readableBytes();
+ ctx.reqSize = contentLength;
+ headers.add(HttpHeaderNames.HOST, host)
+ .add(REQUEST_ID_HEADER, ctx.requestId)
+ .setInt(CONTENT_LENGTH, contentLength);
+ if (sessionCookie != null) {
+ headers.add(COOKIE, sessionCookie);
+ }
+ String serdeVersion = getSerdeVersion(kvRequest);
+ if (serdeVersion != null) {
+ headers.add("x-nosql-serde-version", serdeVersion);
}
- ResponseHandler responseHandler = null;
- short serialVersionUsed = serialVersion;
- short queryVersionUsed = queryVersion;
- ByteBuf buffer = null;
- try {
- /*
- * NOTE: the ResponseHandler will release the Channel
- * in its close() method, which is always called in the
- * finally clause. This handles both successful and retried
- * operations in the loop.
- */
- Channel channel = httpClient.getChannel(thisIterationTimeoutMs);
- /* update iteration timeout in case channel took some time */
- thisIterationTimeoutMs =
- getIterationTimeoutMs(timeoutMs, startNanos);
- /* ensure limiting didn't throw us over the timeout */
- if (thisIterationTimeoutMs <= 0) {
- break;
- }
-
- requestId = Long.toString(nextRequestId());
- responseHandler =
- new ResponseHandler(httpClient, logger, channel,
- requestId, kvRequest.shouldRetry());
- buffer = channel.alloc().directBuffer();
- buffer.retain();
-
- /*
- * we expressly check size limit below based on onprem versus
- * cloud. Set the request to not check size limit inside
- * writeContent().
- */
- kvRequest.setCheckRequestSize(false);
-
- /* Set the topo seq num in the request, if it has not been set
- * already */
- if (!(kvRequest instanceof QueryRequest) ||
- kvRequest.isQueryRequest()) {
- kvRequest.setTopoSeqNum(getTopoSeqNum());
- }
+ /*
+ * boolean that indicates whether content must be signed. Cross
+ * region operations must include content when signing. See comment
+ * on the method
+ */
+ final boolean signContent = requireContentSigned(kvRequest);
- /*
- * Temporarily change the timeout in the request object so
- * the serialized timeout sent to the server is correct for
- * this iteration. After serializing the request, set the
- * timeout back to the overall request timeout so that other
- * processing (retry delays, etc) work correctly.
- */
- kvRequest.setTimeoutInternal(thisIterationTimeoutMs);
- serialVersionUsed = writeContent(buffer, kvRequest,
- queryVersionUsed);
- kvRequest.setTimeoutInternal(timeoutMs);
-
- /*
- * If on-premises the authProvider will always be a
- * StoreAccessTokenProvider. If so, check against
- * configurable limit. Otherwise check against internal
- * hardcoded cloud limit.
- */
- if (authProvider instanceof StoreAccessTokenProvider) {
- if (buffer.readableBytes() >
- httpClient.getMaxContentLength()) {
- throw new RequestSizeLimitException("The request " +
- "size of " + buffer.readableBytes() +
- " exceeded the limit of " +
- httpClient.getMaxContentLength());
+ /*
+ * Get request body bytes if the request needed to be signed
+ * with content
+ */
+ byte[] content = signContent ? getBodyBytes(buffer) : null;
+ return authProvider.setRequiredHeadersAsync(authString, kvRequest,
+ headers, content)
+ .thenApply(n -> {
+ String namespace = kvRequest.getNamespace();
+ if (namespace == null) {
+ namespace = config.getDefaultNamespace();
}
- } else {
- kvRequest.setCheckRequestSize(true);
- BinaryProtocol.checkRequestSizeLimit(
- kvRequest, buffer.readableBytes());
- }
+ if (namespace != null) {
+ headers.add(REQUEST_NAMESPACE_HEADER, namespace);
+ }
+ return request;
+ });
+ } catch (Throwable e) {
+ /* Release the buffer on error */
+ if (buffer != null) {
+ buffer.release();
+ }
+ return CompletableFuture.failedFuture(e);
+ }
+ }
- final FullHttpRequest request =
- new DefaultFullHttpRequest(
- HTTP_1_1, POST, kvRequestURI,
- buffer,
- headersFactory().withValidation(false),
- trailersFactory().withValidation(false));
- HttpHeaders headers = request.headers();
- addCommonHeaders(headers);
- int contentLength = buffer.readableBytes();
- headers.add(HttpHeaderNames.HOST, host)
- .add(REQUEST_ID_HEADER, requestId)
- .setInt(CONTENT_LENGTH, contentLength);
- if (sessionCookie != null) {
- headers.add(COOKIE, sessionCookie);
- }
+ /**
+ * Send the HTTP request to server and get the response back.
+ */
+ private CompletableFuture submitRequest(
+ RequestContext ctx, HttpRequest request) {
- String serdeVersion = getSerdeVersion(kvRequest);
- if (serdeVersion != null) {
- headers.add("x-nosql-serde-version", serdeVersion);
- }
+ final Request kvRequest = ctx.kvRequest;
+ if (isLoggable(logger, Level.FINE) && !kvRequest.getIsRefresh()) {
+ logTrace(logger, "Request: " + ctx.requestClass +
+ ", requestId=" + ctx.requestId);
+ }
+ ctx.latencyNanos = System.nanoTime();
+ int timeoutMs = getIterationTimeoutMs(ctx.timeoutMs, ctx.startNanos);
- /*
- * If the request doesn't set an explicit compartment, use
- * the config default if provided.
- */
- if (kvRequest.getCompartment() == null) {
- kvRequest.setCompartmentInternal(
- config.getDefaultCompartment());
- }
+ return httpClient.runRequest(request, timeoutMs)
+ .whenComplete((res, err) -> {
+ ctx.networkLatency =
+ (System.nanoTime() - ctx.latencyNanos) / 1_000_000;
+ });
+ }
- /*
- * Get request body bytes if the request needed to be signed
- * with content
- */
- byte[] content = signContent ? getBodyBytes(buffer) : null;
- authProvider.setRequiredHeaders(authString, kvRequest, headers,
- content);
-
- String namespace = kvRequest.getNamespace();
- if (namespace == null) {
- namespace = config.getDefaultNamespace();
- }
- if (namespace != null) {
- headers.add(REQUEST_NAMESPACE_HEADER, namespace);
- }
+ /**
+ * Deserialize HTTP response into NoSQL Result.
+ */
+ private Result handleResponse(RequestContext ctx, FullHttpResponse fhr) {
+ final Request kvRequest = ctx.kvRequest;
+ if (isLoggable(logger, Level.FINE) && !kvRequest.getIsRefresh()) {
+ logTrace(logger, "Response: " + ctx.requestClass +
+ ", status=" + fhr.status() +
+ ", requestId=" + ctx.requestId );
+ }
+ try {
+ Result result = processResponse(
+ fhr.status(), fhr.headers(), fhr.content(), ctx);
+ ctx.rateDelayedMs.addAndGet(
+ getRateDelayedFromHeader(fhr.headers()));
+ ctx.resSize = fhr.content().readerIndex();
+ return result;
+ } finally {
+ fhr.release(); //release response
+ }
+ }
- if (isLoggable(logger, Level.FINE) &&
- !kvRequest.getIsRefresh()) {
- logTrace(logger, "Request: " + requestClass +
- ", requestId=" + requestId);
- }
- long latencyNanos = System.nanoTime();
- httpClient.runRequest(request, responseHandler, channel);
-
- boolean isTimeout =
- responseHandler.await(thisIterationTimeoutMs);
- if (isTimeout) {
- throw new TimeoutException("Request timed out after " +
- timeoutMs + " milliseconds: requestId=" + requestId);
- }
+ /**
+ * Update stats from the result.
+ */
+ private Result handleResult(RequestContext ctx, Result result) {
+ final Request kvRequest = ctx.kvRequest;
+ setTopology(result.getTopology());
+ if (ctx.serialVersionUsed < 3) {
+ /* so we can emit a one-time message if the app */
+ /* tries to access modificationTime */
+ if (result instanceof GetResult) {
+ ((GetResult)result).setClient(this);
+ } else if (result instanceof WriteResult) {
+ ((WriteResult)result).setClient(this);
+ }
+ }
+ if (result instanceof QueryResult && kvRequest.isQueryRequest()) {
+ QueryRequest qreq = (QueryRequest)kvRequest;
+ qreq.addQueryTraces(((QueryResult)result).getQueryTraces());
+ }
+ if (result instanceof TableResult && rateLimiterMap != null) {
+ /* update rate limiter settings for table */
+ TableLimits tl = ((TableResult)result).getTableLimits();
+ updateRateLimiters(((TableResult)result).getTableName(), tl);
+ }
+ /*
+ * We may not have rate limiters yet because queries may
+ * not have a tablename until after the first request.
+ * So try to get rate limiters if we don't have them yet and
+ * this is a QueryRequest.
+ */
+ if (rateLimiterMap != null && ctx.readLimiter == null) {
+ ctx.readLimiter = getQueryRateLimiter(kvRequest, true);
+ }
+ if (rateLimiterMap != null && ctx.writeLimiter == null) {
+ ctx.writeLimiter = getQueryRateLimiter(kvRequest, false);
+ }
+ return result;
+ }
- if (isLoggable(logger, Level.FINE) &&
- !kvRequest.getIsRefresh()) {
- logTrace(logger, "Response: " + requestClass +
- ", status=" +
- responseHandler.getStatus() +
- ", requestId=" + requestId );
- }
+ /**
+ * Handle rate limit from the Result.
+ * This will consume actual units used by the request and sleep.
+ */
+ private CompletableFuture handlePostRateLimit(RequestContext ctx,
+ Result result) {
+ final Request kvRequest = ctx.kvRequest;
+ int postRateLimitDelayMs = consumeLimiterUnits(ctx.readLimiter,
+ result.getReadUnitsInternal());
+ postRateLimitDelayMs += consumeLimiterUnits(ctx.writeLimiter,
+ result.getWriteUnitsInternal());
+
+ return createDelayFuture(postRateLimitDelayMs)
+ .thenApply(rateDelay -> {
+ ctx.rateDelayedMs.addAndGet(rateDelay);
+ result.setRateLimitDelayedMs(ctx.rateDelayedMs.get());
- ByteBuf wireContent = responseHandler.getContent();
- Result res = processResponse(responseHandler.getStatus(),
- responseHandler.getHeaders(),
- wireContent,
- kvRequest,
- serialVersionUsed,
- queryVersionUsed);
- rateDelayedMs += getRateDelayedFromHeader(
- responseHandler.getHeaders());
- int resSize = wireContent.readerIndex();
- long networkLatency =
- (System.nanoTime() - latencyNanos) / 1_000_000;
-
- setTopology(res.getTopology());
-
- if (serialVersionUsed < 3) {
- /* so we can emit a one-time message if the app */
- /* tries to access modificationTime */
- if (res instanceof GetResult) {
- ((GetResult)res).setClient(this);
- } else if (res instanceof WriteResult) {
- ((WriteResult)res).setClient(this);
- }
- }
+ /* copy retry stats to Result on successful operation */
+ result.setRetryStats(kvRequest.getRetryStats());
+ kvRequest.setRateLimitDelayedMs(ctx.rateDelayedMs.get());
- if (res instanceof QueryResult && kvRequest.isQueryRequest()) {
- QueryRequest qreq = (QueryRequest)kvRequest;
- qreq.addQueryTraces(((QueryResult)res).getQueryTraces());
- }
+ statsControl.observe(kvRequest,
+ Math.toIntExact(ctx.networkLatency),
+ ctx.reqSize, ctx.resSize);
+ checkAuthRefreshList(kvRequest);
+ return result;
+ });
+ }
- if (res instanceof TableResult && rateLimiterMap != null) {
- /* update rate limiter settings for table */
- TableLimits tl = ((TableResult)res).getTableLimits();
- updateRateLimiters(((TableResult)res).getTableName(), tl);
- }
+ /*
+ * Main error handling entry point.
+ */
+ private CompletableFuture handleError(RequestContext ctx,
+ Throwable err) {
+ final Throwable actualCause =
+ (err instanceof CompletionException && err.getCause() != null) ?
+ err.getCause() : err;
+
+ /* set exception on context */
+ ctx.exception = actualCause;
+
+ /* Get the appropriate error handler and delegate */
+ ErrorHandler handler = findErrorHandler(actualCause.getClass());
+ if (handler != null) {
+ return handler.handle(ctx, actualCause);
+ }
+
+ /* Default throwable: retry with small delay */
+ final String name = actualCause.getClass().getName();
+ logInfo(logger, "Client execute Throwable, name: " +
+ name + "message: " + actualCause.getMessage());
+ return retryRequest(ctx, 10, actualCause);
+ }
- /*
- * We may not have rate limiters yet because queries may
- * not have a tablename until after the first request.
- * So try to get rate limiters if we don't have them yet and
- * this is a QueryRequest.
- */
- if (rateLimiterMap != null && readLimiter == null) {
- readLimiter = getQueryRateLimiter(kvRequest, true);
- }
- if (rateLimiterMap != null && writeLimiter == null) {
- writeLimiter = getQueryRateLimiter(kvRequest, false);
- }
+ /*
+ * Initializes the error handlers map with specific exception types
+ * and their corresponding handling strategies.
+ * This method sets up a mapping between various exception classes
+ * and the methods responsible for handling them,facilitating appropriate
+ * error management and retry logic during request execution.
+ */
+ private void initErrorHandlers() {
+ errorHandlers.put(AuthenticationException.class,
+ this::handleAuthException);
+ errorHandlers.put(InvalidAuthorizationException.class,
+ this::handleInvalidAuthError);
+ errorHandlers.put(SecurityInfoNotReadyException.class,
+ this::handleSecurityNotReadyError);
+ errorHandlers.put(RetryableException.class,
+ this::handleRetryableError);
+ errorHandlers.put(UnsupportedQueryVersionException.class,
+ this::handleQueryVerError);
+ errorHandlers.put(UnsupportedProtocolException.class,
+ this::handleProtocolVerError);
+ errorHandlers.put(RequestTimeoutException.class, this::failRequest);
+ errorHandlers.put(NoSQLException.class, this::failRequest);
+ errorHandlers.put(RuntimeException.class, this::failRequest);
+ errorHandlers.put(IOException.class, this::handleIOError);
+ errorHandlers.put(InterruptedException.class,
+ this::handleInterruptedError);
+ errorHandlers.put(ExecutionException.class, this::handleExecutionError);
+ errorHandlers.put(TimeoutException.class, this::handleTimeoutError);
+ /* Add any new error handlers here */
+ }
- /* consume rate limiter units based on actual usage */
- rateDelayedMs += consumeLimiterUnits(readLimiter,
- res.getReadUnitsInternal(),
- thisIterationTimeoutMs);
- rateDelayedMs += consumeLimiterUnits(writeLimiter,
- res.getWriteUnitsInternal(),
- thisIterationTimeoutMs);
- res.setRateLimitDelayedMs(rateDelayedMs);
+ /*
+ * Marks the request as failed and returns failed {@link CompletableFuture}.
+ */
+ private CompletableFuture failRequest(RequestContext ctx,
+ Throwable ex) {
+ final String name = ex.getClass().getName();
+ final String message = String.format("Client execute %s: %s",
+ name, ex.getMessage());
+ logFine(logger, message);
+ ctx.kvRequest.setRateLimitDelayedMs(ctx.rateDelayedMs.get());
+ statsControl.observeError(ctx.kvRequest);
+ return CompletableFuture.failedFuture(ex);
+ }
- /* copy retry stats to Result on successful operation */
- res.setRetryStats(kvRequest.getRetryStats());
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
+ /*
+ * Schedules a retry for the request with the given delay.
+ * Updates retry counters and statistics.
+ */
+ private CompletableFuture retryRequest(RequestContext ctx,
+ int delayMs, Throwable ex) {
+ Request kvRequest = ctx.kvRequest;
+ /* query and protocol exceptions are not errors, do not add them to
+ * retry stats.
+ */
+ if (!(ex instanceof UnsupportedProtocolException
+ || ex instanceof UnsupportedQueryVersionException)) {
+ kvRequest.addRetryException(ex.getClass());
+ kvRequest.incrementRetries();
+ kvRequest.addRetryDelayMs(delayMs);
+ }
+ return scheduleRetry(ctx, delayMs);
+ }
- statsControl.observe(kvRequest, Math.toIntExact(networkLatency),
- contentLength, resSize);
+ /**
+ * Looks up an error handler for the given class by traversing the
+ * class hierarchy until a registered handler is found.
+ *
+ * @param clazz Exception class to resolve
+ * @return A matching {@link ErrorHandler} or {@code null} if none found
+ */
+ private ErrorHandler findErrorHandler(Class> clazz) {
+ while (clazz != null) {
+ if (errorHandlers.containsKey(clazz)) {
+ return errorHandlers.get(clazz);
+ }
+ clazz = clazz.getSuperclass();
+ }
+ return null;
+ }
- checkAuthRefreshList(kvRequest);
+ /*
+ * Error handler for {@link AuthenticationException}
+ */
+ private CompletableFuture handleAuthException(RequestContext ctx,
+ Throwable ex) {
- return res;
+ if (authProvider instanceof StoreAccessTokenProvider) {
+ authProvider.flushCache();
+ return retryRequest(ctx, 0, ex);
+ } else {
+ logInfo(logger, "Unexpected authentication exception: " + ex);
+ return failRequest(ctx, new NoSQLException(
+ "Unexpected exception: " + ex.getMessage(), ex));
+ }
+ }
- } catch (AuthenticationException rae) {
- if (authProvider instanceof StoreAccessTokenProvider) {
- final StoreAccessTokenProvider satp =
- (StoreAccessTokenProvider) authProvider;
- satp.bootstrapLogin(kvRequest);
- kvRequest.addRetryException(rae.getClass());
- kvRequest.incrementRetries();
- exception = rae;
- continue;
- }
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logInfo(logger, "Unexpected authentication exception: " +
- rae);
- throw new NoSQLException("Unexpected exception: " +
- rae.getMessage(), rae);
- } catch (InvalidAuthorizationException iae) {
- /*
- * Allow a single retry for invalid/expired auth
- *
- * This includes "clock skew" errors or signature refresh
- * failures. This does not include permissions-related errors,
- * which would be a UnauthorizedException.
- */
- if (retriedInvalidAuthorizationException(kvRequest)) {
- /* same as NoSQLException below */
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logFine(logger, "Client execute NoSQLException: " +
- iae.getMessage());
- throw iae;
- }
- /* flush auth cache and do one retry */
- authProvider.flushCache();
- kvRequest.addRetryException(iae.getClass());
- kvRequest.incrementRetries();
- exception = iae;
- logFine(logger,
- "Client retrying on InvalidAuthorizationException: " +
- iae.getMessage());
- continue;
- } catch (SecurityInfoNotReadyException sinre) {
- kvRequest.addRetryException(sinre.getClass());
- exception = sinre;
- int delayMs = SEC_ERROR_DELAY_MS;
- if (kvRequest.getNumRetries() > 10) {
- delayMs =
- DefaultRetryHandler.computeBackoffDelay(kvRequest, 0);
- if (delayMs <= 0) {
- break;
- }
- }
- try {
- Thread.sleep(delayMs);
- } catch (InterruptedException ie) {}
- kvRequest.incrementRetries();
- kvRequest.addRetryDelayMs(delayMs);
- continue;
- } catch (RetryableException re) {
-
- if (re instanceof WriteThrottlingException &&
- writeLimiter != null) {
- /* ensure we check write limits next loop */
- checkWriteUnits = true;
- /* set limiter to its limit, if not over already */
- if (writeLimiter.getCurrentRate() < 100.0) {
- writeLimiter.setCurrentRate(100.0);
- }
- /* call retry handler to manage sleep/delay */
- }
- if (re instanceof ReadThrottlingException &&
- readLimiter != null) {
- /* ensure we check read limits next loop */
- checkReadUnits = true;
- /* set limiter to its limit, if not over already */
- if (readLimiter.getCurrentRate() < 100.0) {
- readLimiter.setCurrentRate(100.0);
- }
- /* call retry handler to manage sleep/delay */
- }
+ /*
+ * Error handler for {@link InvalidAuthorizationException}
+ */
+ private CompletableFuture handleInvalidAuthError(RequestContext ctx,
+ Throwable ex) {
+ /*
+ * Allow a single retry for invalid/expired auth
+ *
+ * This includes "clock skew" errors or signature refresh
+ * failures. This does not include permissions-related errors,
+ * which would be a UnauthorizedException.
+ */
+ Request kvRequest = ctx.kvRequest;
+ if (retriedInvalidAuthorizationException(kvRequest)) {
+ return failRequest(ctx, ex);
+ }
+ authProvider.flushCache();
+ logFine(logger,
+ "Client retrying on InvalidAuthorizationException: "
+ + ex.getMessage());
+ return retryRequest(ctx, 0, ex);
+ }
- logFine(logger, "Retryable exception: " +
- re.getMessage());
- /*
- * Handle automatic retries. If this does not throw an error,
- * then the delay (if any) will have been performed and the
- * request should be retried.
- *
- * If there have been too many retries this method will
- * throw the original exception.
- */
-
- kvRequest.addRetryException(re.getClass());
- handleRetry(re, kvRequest);
- kvRequest.incrementRetries();
- exception = re;
- continue;
- } catch (UnsupportedQueryVersionException uqve) {
- /* decrement query version and try again */
- if (decrementQueryVersion(queryVersionUsed) == true) {
- logFine(logger, "Got unsupported query version error " +
- "from server: decrementing query version to " +
- queryVersion + " and trying again.");
- continue;
- }
- throw uqve;
- } catch (UnsupportedProtocolException upe) {
- /* decrement protocol version and try again */
- if (decrementSerialVersion(serialVersionUsed) == true) {
- /* Don't set this exception: it's misleading */
- /* exception = upe; */
- logFine(logger, "Got unsupported protocol error " +
- "from server: decrementing serial version to " +
- serialVersion + " and trying again.");
- continue;
- }
- throw upe;
- } catch (NoSQLException nse) {
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logFine(logger, "Client execute NoSQLException: " +
- nse.getMessage());
- throw nse; /* pass through */
- } catch (RuntimeException e) {
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- if (!kvRequest.getIsRefresh()) {
- /* don't log expected failures from refresh */
- logFine(logger, "Client execute runtime exception: " +
- e.getMessage());
- }
- throw e;
- } catch (IOException ioe) {
- String name = ioe.getClass().getName();
- logFine(logger, "Client execution IOException, name: " +
- name + ", message: " + ioe.getMessage());
- /*
- * An exception in the channel, e.g. the server may have
- * disconnected. Retry.
- */
- kvRequest.addRetryException(ioe.getClass());
- kvRequest.incrementRetries();
- exception = ioe;
+ /*
+ * Error handler for {@link SecurityInfoNotReadyException}
+ */
+ private CompletableFuture handleSecurityNotReadyError(
+ RequestContext ctx, Throwable ex) {
+
+ Request kvRequest = ctx.kvRequest;
+ int delayMs = SEC_ERROR_DELAY_MS;
+ if (kvRequest.getNumRetries() > 10) {
+ delayMs = DefaultRetryHandler.computeBackoffDelay(kvRequest, 0);
+ if (delayMs <= 0) {
+ return failRequest(ctx,
+ new RequestTimeoutException(ctx.timeoutMs,
+ ctx.requestClass + " timed out:" +
+ (ctx.requestId.isEmpty() ? "" :
+ " requestId=" + ctx.requestId) +
+ " nextRequestId=" + nextRequestId() +
+ (kvRequest.getRetryStats() != null ?
+ kvRequest.getRetryStats() : ""), ctx.exception));
+ }
+ }
+ return retryRequest(ctx, delayMs, ex);
+ }
- try {
- Thread.sleep(10);
- } catch (InterruptedException ie) {}
-
- continue;
- } catch (InterruptedException ie) {
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logInfo(logger, "Client interrupted exception: " +
- ie.getMessage());
- /* this exception shouldn't retry -- direct throw */
- throw new NoSQLException("Request interrupted: " +
- ie.getMessage());
- } catch (ExecutionException ee) {
- /*
- * This can happen if a channel is bad in HttpClient.getChannel.
- * This happens if the channel is shut down by the server side
- * or the server (proxy) is restarted, etc. Treat it like
- * IOException above, but retry without waiting
- */
- String name = ee.getCause().getClass().getName();
- logFine(logger, "Client ExecutionException, name: " +
- name + ", message: " + ee.getMessage() + ", retrying");
-
- kvRequest.addRetryException(ee.getCause().getClass());
- kvRequest.incrementRetries();
- exception = ee.getCause();
- continue;
- } catch (TimeoutException te) {
- exception = te;
- logInfo(logger, "Timeout exception: " + te);
- break; /* fall through to exception below */
- } catch (Throwable t) {
- /*
- * this is likely an exception from Netty, perhaps a bad
- * connection. Retry.
- */
- /* Maybe make this logFine */
- String name = t.getClass().getName();
- logInfo(logger, "Client execute Throwable, name: " +
- name + "message: " + t.getMessage());
-
- kvRequest.addRetryException(t.getClass());
- kvRequest.incrementRetries();
- exception = t;
- continue;
- } finally {
- /*
- * Because the buffer.retain() is called after initialized, so
- * the reference count of buffer should be always > 0 here, just
- * call buffer.release(refCnt) to release it.
- */
- if (buffer != null) {
- buffer.release(buffer.refCnt());
- }
- if (responseHandler != null) {
- responseHandler.close();
- }
+
+ /*
+ * Error handler for {@link RetryableException}
+ */
+ private CompletableFuture handleRetryableError(RequestContext ctx,
+ Throwable ex) {
+ Request kvRequest = ctx.kvRequest;
+
+ if (ex instanceof WriteThrottlingException && ctx.writeLimiter != null) {
+ /* ensure we check write limits next retry */
+ ctx.checkWriteUnits = true;
+ /* set limiter to its limit, if not over already */
+ if (ctx.writeLimiter.getCurrentRate() < 100.0) {
+ ctx.writeLimiter.setCurrentRate(100.0);
+ }
+ }
+ if (ex instanceof ReadThrottlingException && ctx.readLimiter != null) {
+ /* ensure we check read limits next loop */
+ ctx.checkReadUnits = true;
+ /* set limiter to its limit, if not over already */
+ if (ctx.readLimiter.getCurrentRate() < 100.0) {
+ ctx.readLimiter.setCurrentRate(100.0);
}
- } while (! timeoutRequest(startNanos, timeoutMs, exception));
+ }
+ logFine(logger, "Retryable exception: " + ex.getMessage());
+ /*
+ * Handle automatic retries. If this does not throw an error,
+ * then the delay (if any) will have been performed and the
+ * request should be retried.
+ *
+ * If there have been too many retries this method will
+ * throw the original exception.
+ */
+ int delayMs = handleRetry((RetryableException) ex, kvRequest);
+ return retryRequest(ctx, delayMs, ex);
+ }
+
+ /*
+ * Error handler for {@link UnsupportedQueryVersionException}
+ */
+ private CompletableFuture handleQueryVerError(RequestContext ctx,
+ Throwable ex) {
+ if (decrementQueryVersion(ctx.queryVersionUsed)) {
+ logFine(logger, "Got unsupported query version error " +
+ "from server: decrementing query version to " +
+ queryVersion + " and trying again.");
+ return retryRequest(ctx, 0, ex);
+ }
+ return failRequest(ctx, ex);
+ }
+
+ /*
+ * Error handler for {@link UnsupportedProtocolException}
+ */
+ private CompletableFuture handleProtocolVerError(RequestContext ctx,
+ Throwable ex) {
+ if (decrementSerialVersion(ctx.serialVersionUsed)) {
+ logFine(logger, "Got unsupported protocol error " +
+ "from server: decrementing serial version to " +
+ serialVersion + " and trying again.");
+ return retryRequest(ctx, 0, ex);
+ }
+ return failRequest(ctx, ex);
+ }
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
+ /*
+ * Error handler for {@link IOException}
+ */
+ private CompletableFuture handleIOError(RequestContext ctx,
+ Throwable ex) {
+ Request kvRequest = ctx.kvRequest;
+ String name = ex.getClass().getName();
+ logFine(logger, "Client execution IOException, name: " +
+ name + ", message: " + ex.getMessage());
+ /* Retry only 10 times. We shouldn't be retrying till timeout occurs
+ * as this can consume a lot of async resources.
+ */
+ if (kvRequest.getNumRetries() > 10) {
+ return failRequest(ctx, ex);
+ }
+ return retryRequest(ctx, 10, ex);
+ }
+
+ private CompletableFuture handleInterruptedError(RequestContext ctx,
+ Throwable ex) {
+ logInfo(logger, "Interrupted: " + ex.getMessage());
+ return failRequest(ctx,
+ new NoSQLException("Request interrupted: " + ex.getMessage()));
+ }
+
+ private CompletableFuture handleExecutionError(RequestContext ctx,
+ Throwable ex) {
/*
- * If the request timed out in a single iteration, and the
- * timeout was fairly long, and there was no delay due to
- * rate limiting, reset the session cookie so the next request
- * may use a different server.
+ * This can happen if a channel is bad in HttpClient.getChannel.
+ * This happens if the channel is shut down by the server side
+ * or the server (proxy) is restarted, etc. Treat it like
+ * IOException above, but retry without waiting
*/
- if (timeoutMs == thisIterationTimeoutMs &&
- timeoutMs >= 2000 &&
- rateDelayedMs == 0) {
- setSessionCookieValue(null);
- }
- throw new RequestTimeoutException(timeoutMs,
- requestClass + " timed out:" +
- (requestId.isEmpty() ? "" : " requestId=" + requestId) +
+ String name = ex.getCause().getClass().getName();
+ logFine(logger, "Client ExecutionException, name: " +
+ name + ", message: " + ex.getMessage() + ", retrying");
+ return retryRequest(ctx, 10, ex);
+ }
+
+ private CompletableFuture handleTimeoutError(RequestContext ctx,
+ Throwable ex) {
+ logInfo(logger, "Timeout exception: " + ex);
+ return failRequest(ctx,
+ new RequestTimeoutException(
+ ctx.timeoutMs,
+ ctx.requestClass + " timed out:" +
+ (ctx.requestId.isEmpty() ? "" : " requestId=" + ctx.requestId) +
" nextRequestId=" + nextRequestId() +
- " iterationTimeout=" + thisIterationTimeoutMs + "ms " +
- (kvRequest.getRetryStats() != null ?
- kvRequest.getRetryStats() : ""), exception);
+ (ctx.kvRequest.getRetryStats() != null ?
+ ctx.kvRequest.getRetryStats() : ""),
+ ctx.exception));
}
+ /**
+ * Helper method to create a CompletableFuture that completes after a delay.
+ * This is used for non-blocking asynchronous delays for rate limiting.
+ *
+ * @param delayMs The delay in milliseconds.
+ * @return A CompletableFuture that completes after the specified delay.
+ */
+ private CompletableFuture createDelayFuture(int delayMs) {
+ CompletableFuture delayFuture = new CompletableFuture<>();
+ if (delayMs > 0) {
+ taskExecutor.schedule(() -> delayFuture.complete(delayMs), delayMs,
+ TimeUnit.MILLISECONDS);
+ } else {
+ delayFuture.complete(delayMs); // Complete immediately if no delay
+ }
+ return delayFuture;
+ }
+
+ private CompletableFuture scheduleRetry(RequestContext ctx,
+ int delayMs) {
+ //TODO check for overall timeout before schedule
+ CompletableFuture retryFuture = new CompletableFuture<>();
+ taskExecutor.schedule(() -> {
+ /* Increment request-id for retry */
+ ctx.requestId = String.valueOf(ctx.nextIdSupplier.get());
+ executeWithRetry(ctx)
+ .whenComplete((res, e) -> {
+ if (e != null) {
+ retryFuture.completeExceptionally(e);
+ } else {
+ retryFuture.complete(res);
+ }
+ });
+ }, delayMs, TimeUnit.MILLISECONDS);
+ return retryFuture;
+ }
/**
* Calculate the timeout for the next iteration.
* This is basically the given timeout minus the time
@@ -1096,7 +1338,7 @@ private RateLimiter getQueryRateLimiter(Request request, boolean read) {
* @return the number of milliseconds delayed due to rate limiting
*/
private int consumeLimiterUnits(RateLimiter rl,
- long units, int timeoutMs) {
+ long units) {
if (rl == null || units <= 0) {
return 0;
@@ -1115,13 +1357,7 @@ private int consumeLimiterUnits(RateLimiter rl,
* better to avoid spikes in throughput and oscillation that
* can result from it.
*/
-
- try {
- return rl.consumeUnitsWithTimeout(units, timeoutMs, false);
- } catch (TimeoutException e) {
- /* Don't throw - operation succeeded. Just return timeoutMs. */
- return timeoutMs;
- }
+ return ((SimpleRateLimiter) rl).consumeExternally(units);
}
@@ -1205,26 +1441,27 @@ boolean timeoutRequest(long startNanos,
*
* @throws IOException
*/
- private short writeContent(ByteBuf content, Request kvRequest,
- short queryVersion)
+ private void writeContent(ByteBuf content, RequestContext ctx)
throws IOException {
+ final Request kvRequest = ctx.kvRequest;
final NettyByteOutputStream bos = new NettyByteOutputStream(content);
- final short versionUsed = serialVersion;
+ ctx.serialVersionUsed = serialVersion;
+ ctx.queryVersionUsed = queryVersion;
+
SerializerFactory factory = chooseFactory(kvRequest);
- factory.writeSerialVersion(versionUsed, bos);
+ factory.writeSerialVersion(ctx.serialVersionUsed, bos);
if (kvRequest instanceof QueryRequest ||
kvRequest instanceof PrepareRequest) {
kvRequest.createSerializer(factory).serialize(kvRequest,
- versionUsed,
- queryVersion,
+ ctx.serialVersionUsed,
+ ctx.queryVersionUsed,
bos);
} else {
kvRequest.createSerializer(factory).serialize(kvRequest,
- versionUsed,
+ ctx.serialVersionUsed,
bos);
}
- return versionUsed;
}
/**
@@ -1238,9 +1475,7 @@ private short writeContent(ByteBuf content, Request kvRequest,
final Result processResponse(HttpResponseStatus status,
HttpHeaders headers,
ByteBuf content,
- Request kvRequest,
- short serialVersionUsed,
- short queryVersionUsed) {
+ RequestContext ctx) {
if (!HttpResponseStatus.OK.equals(status)) {
processNotOKResponse(status, content);
@@ -1254,8 +1489,8 @@ final Result processResponse(HttpResponseStatus status,
Result res = null;
try (ByteInputStream bis = new NettyByteInputStream(content)) {
- res = processOKResponse(bis, kvRequest, serialVersionUsed,
- queryVersionUsed);
+ res = processOKResponse(bis, ctx.kvRequest, ctx.serialVersionUsed,
+ ctx.queryVersionUsed);
}
String sv = headers.get(SERVER_SERIAL_VERSION);
if (sv != null) {
@@ -1384,8 +1619,10 @@ private void setSessionCookie(HttpHeaders headers) {
}
}
- private synchronized void setSessionCookieValue(String pVal) {
- sessionCookie = pVal;
+ private void setSessionCookieValue(String pVal) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ sessionCookie = pVal;
+ });
}
/**
@@ -1435,20 +1672,22 @@ private void setTableNeedsRefresh(String tableName, boolean needsRefresh) {
* Query table limits and create rate limiters for a table in a
* short-lived background thread.
*/
- private synchronized void backgroundUpdateLimiters(String tableName,
- String compartmentId) {
- if (tableNeedsRefresh(tableName) == false) {
- return;
- }
- setTableNeedsRefresh(tableName, false);
+ private void backgroundUpdateLimiters(String tableName,
+ String compartmentId) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (tableNeedsRefresh(tableName) == false) {
+ return;
+ }
+ setTableNeedsRefresh(tableName, false);
- try {
- threadPool.execute(() -> {
- updateTableLimiters(tableName, compartmentId);
- });
- } catch (RejectedExecutionException e) {
- setTableNeedsRefresh(tableName, true);
- }
+ try {
+ threadPool.execute(() -> {
+ updateTableLimiters(tableName, compartmentId);
+ });
+ } catch (RejectedExecutionException e) {
+ setTableNeedsRefresh(tableName, true);
+ }
+ });
}
/*
@@ -1464,7 +1703,7 @@ private void updateTableLimiters(String tableName, String compartmentId) {
try {
logFine(logger, "Starting GetTableRequest for table '" +
tableName + "'");
- res = (TableResult) this.execute(gtr);
+ res = (TableResult) ConcurrentUtil.awaitFuture(this.execute(gtr));
} catch (Exception e) {
logFine(logger, "GetTableRequest for table '" +
tableName + "' returned exception: " + e.getMessage());
@@ -1508,7 +1747,7 @@ private boolean retriedInvalidAuthorizationException(Request request) {
return rs.getNumExceptions(InvalidAuthorizationException.class) > 0;
}
- private void handleRetry(RetryableException re,
+ private int handleRetry(RetryableException re,
Request kvRequest) {
int numRetries = kvRequest.getNumRetries();
String msg = "Retry for request " +
@@ -1520,7 +1759,7 @@ private void handleRetry(RetryableException re,
logFine(logger, "Too many retries");
throw re;
}
- handler.delay(kvRequest, numRetries, re);
+ return handler.delayTime(kvRequest, numRetries, re);
}
private void logRetries(int numRetries, Throwable exception) {
@@ -1638,19 +1877,21 @@ StatsControl getStatsControl() {
* @return true: version was decremented
* false: already at lowest version number.
*/
- private synchronized boolean decrementSerialVersion(short versionUsed) {
- if (serialVersion != versionUsed) {
- return true;
- }
- if (serialVersion == V4) {
- serialVersion = V3;
- return true;
- }
- if (serialVersion == V3) {
- serialVersion = V2;
- return true;
- }
- return false;
+ private boolean decrementSerialVersion(short versionUsed) {
+ return ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (serialVersion != versionUsed) {
+ return true;
+ }
+ if (serialVersion == V4) {
+ serialVersion = V3;
+ return true;
+ }
+ if (serialVersion == V3) {
+ serialVersion = V2;
+ return true;
+ }
+ return false;
+ });
}
/**
@@ -1660,18 +1901,19 @@ private synchronized boolean decrementSerialVersion(short versionUsed) {
* @return true: version was decremented
* false: already at lowest version number.
*/
- private synchronized boolean decrementQueryVersion(short versionUsed) {
-
- if (queryVersion != versionUsed) {
- return true;
- }
+ private boolean decrementQueryVersion(short versionUsed) {
+ return ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (queryVersion != versionUsed) {
+ return true;
+ }
- if (queryVersion == QueryDriver.QUERY_V3) {
- return false;
- }
+ if (queryVersion == QueryDriver.QUERY_V3) {
+ return false;
+ }
- --queryVersion;
- return true;
+ --queryVersion;
+ return true;
+ });
}
/**
@@ -1805,38 +2047,42 @@ private boolean stringsEqualOrNull(String s1, String s2) {
* Add get, put, delete to cover all auth types
* This is synchronized to avoid 2 requests adding the same table
*/
- private synchronized void addRequestToRefreshList(Request request) {
- logFine(logger, "Adding table to request list: " +
- request.getCompartment() + ":" + request.getTableName());
- PutRequest pr =
- new PutRequest().setTableName(request.getTableName());
- pr.setCompartmentInternal(request.getCompartment());
- pr.setValue(badValue);
- pr.setIsRefresh(true);
- authRefreshRequests.add(pr);
- GetRequest gr =
- new GetRequest().setTableName(request.getTableName());
- gr.setCompartmentInternal(request.getCompartment());
- gr.setKey(badValue);
- gr.setIsRefresh(true);
- authRefreshRequests.add(gr);
- DeleteRequest dr =
- new DeleteRequest().setTableName(request.getTableName());
- dr.setCompartmentInternal(request.getCompartment());
- dr.setKey(badValue);
- dr.setIsRefresh(true);
- authRefreshRequests.add(dr);
+ private void addRequestToRefreshList(Request request) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ logFine(logger, "Adding table to request list: " +
+ request.getCompartment() + ":" + request.getTableName());
+ PutRequest pr =
+ new PutRequest().setTableName(request.getTableName());
+ pr.setCompartmentInternal(request.getCompartment());
+ pr.setValue(badValue);
+ pr.setIsRefresh(true);
+ authRefreshRequests.add(pr);
+ GetRequest gr =
+ new GetRequest().setTableName(request.getTableName());
+ gr.setCompartmentInternal(request.getCompartment());
+ gr.setKey(badValue);
+ gr.setIsRefresh(true);
+ authRefreshRequests.add(gr);
+ DeleteRequest dr =
+ new DeleteRequest().setTableName(request.getTableName());
+ dr.setCompartmentInternal(request.getCompartment());
+ dr.setKey(badValue);
+ dr.setIsRefresh(true);
+ authRefreshRequests.add(dr);
+ });
}
/**
* @hidden
* for internal use
*/
- public synchronized void oneTimeMessage(String msg) {
- if (oneTimeMessages.add(msg) == false) {
- return;
- }
- logWarning(logger, msg);
+ public void oneTimeMessage(String msg) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (oneTimeMessages.add(msg) == false) {
+ return;
+ }
+ logWarning(logger, msg);
+ });
}
private SerializerFactory chooseFactory(Request rq) {
@@ -1940,20 +2186,22 @@ public TopologyInfo getTopology() {
return topology;
}
- private synchronized int getTopoSeqNum() {
- return (topology == null ? -1 : topology.getSeqNum());
+ private int getTopoSeqNum() {
+ return ConcurrentUtil.synchronizedCall(this.lock, () ->
+ (topology == null ? -1 : topology.getSeqNum()));
}
- private synchronized void setTopology(TopologyInfo topo) {
-
- if (topo == null) {
- return;
- }
+ private void setTopology(TopologyInfo topo) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (topo == null) {
+ return;
+ }
- if (topology == null || topology.getSeqNum() < topo.getSeqNum()) {
- topology = topo;
- trace("New topology: " + topo, 1);
- }
+ if (topology == null || topology.getSeqNum() < topo.getSeqNum()) {
+ topology = topo;
+ trace("New topology: " + topo, 1);
+ }
+ });
}
/*
diff --git a/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleAsyncImpl.java b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleAsyncImpl.java
new file mode 100644
index 00000000..f12ecbb6
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleAsyncImpl.java
@@ -0,0 +1,507 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.http;
+
+import io.netty.handler.ssl.SslContextBuilder;
+import io.netty.util.internal.logging.InternalLoggerFactory;
+import io.netty.util.internal.logging.JdkLoggerFactory;
+import oracle.nosql.driver.AuthorizationProvider;
+import oracle.nosql.driver.NoSQLHandleAsync;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.StatsControl;
+import oracle.nosql.driver.UserInfo;
+import oracle.nosql.driver.iam.SignatureProvider;
+import oracle.nosql.driver.kv.StoreAccessTokenProvider;
+import oracle.nosql.driver.ops.AddReplicaRequest;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.DropReplicaRequest;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetIndexesResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.MultiDeleteResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryPaginatorResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.ReplicaStatsRequest;
+import oracle.nosql.driver.ops.ReplicaStatsResult;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.ops.SystemRequest;
+import oracle.nosql.driver.ops.SystemResult;
+import oracle.nosql.driver.ops.SystemStatusRequest;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.TableUsageResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.util.ConcurrentUtil;
+import oracle.nosql.driver.util.LogUtil;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.JsonUtils;
+import oracle.nosql.driver.values.MapValue;
+
+import javax.net.ssl.SSLException;
+import java.util.ArrayList;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Logger;
+
+public class NoSQLHandleAsyncImpl implements NoSQLHandleAsync {
+ private static final int cores = Runtime.getRuntime().availableProcessors();
+
+ /*
+ * The HTTP client. This is not final so that it can be nulled upon
+ * close.
+ */
+ private final Client client;
+ private final AtomicBoolean isClosed = new AtomicBoolean(false);
+
+ /* thread-pool for scheduling tasks */
+ private final ScheduledExecutorService taskExecutor;
+
+ public NoSQLHandleAsyncImpl(NoSQLHandleConfig config) {
+ configNettyLogging();
+ final Logger logger = getLogger(config);
+ /*
+ * config SslContext first, on-prem authorization provider
+ * will reuse the context in NoSQLHandleConfig
+ */
+ configSslContext(config);
+ taskExecutor = new ScheduledThreadPoolExecutor(cores /* core threads */,
+ new ThreadFactory() {
+ private final AtomicInteger threadNumber = new AtomicInteger(1);
+ @Override
+ public Thread newThread(Runnable r) {
+ final Thread t = Executors.defaultThreadFactory()
+ .newThread(r);
+ t.setName(String.format("nosql-task-executor-%s",
+ threadNumber.getAndIncrement()));
+ t.setDaemon(true);
+ t.setUncaughtExceptionHandler((thread, error) -> {
+ if (ConcurrentUtil.unwrapCompletionException(error)
+ instanceof RejectedExecutionException) {
+ /*
+ * Ignore uncaught error for rejected exception
+ * since that is expected to happen during
+ * executor shut down.
+ */
+ return;
+ }
+ logger.warning(() -> String.format(
+ "Uncaught exception from %s: %s",
+ error, LogUtil.getStackTrace(error)));
+ });
+ return t;
+ }
+ });
+ client = new Client(logger, config, taskExecutor);
+ try {
+ /* configAuthProvider may use client */
+ configAuthProvider(logger, config);
+ } catch (RuntimeException re) {
+ /* cleanup client */
+ client.shutdown();
+ taskExecutor.shutdown();
+ throw re;
+ }
+ }
+
+ /**
+ * Returns the logger used for the driver. If no logger is specified
+ * create one based on this class name.
+ */
+ private Logger getLogger(NoSQLHandleConfig config) {
+ if (config.getLogger() != null) {
+ return config.getLogger();
+ }
+
+ /*
+ * The default logger logs at INFO. If this is too verbose users
+ * must create a logger and pass it in.
+ */
+ Logger logger = Logger.getLogger(getClass().getName());
+ return logger;
+ }
+
+ /**
+ * Configures the logging of Netty library.
+ */
+ private void configNettyLogging() {
+ /*
+ * Configure default Netty logging using Jdk Logger.
+ */
+ InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE);
+ }
+
+ private void configSslContext(NoSQLHandleConfig config) {
+ if (config.getSslContext() != null) {
+ return;
+ }
+ if (config.getServiceURL().getProtocol().equalsIgnoreCase("HTTPS")) {
+ try {
+ SslContextBuilder builder = SslContextBuilder.forClient();
+ if (config.getSSLCipherSuites() != null) {
+ builder.ciphers(config.getSSLCipherSuites());
+ }
+ if (config.getSSLProtocols() != null) {
+ builder.protocols(config.getSSLProtocols());
+ }
+ builder.sessionTimeout(config.getSSLSessionTimeout());
+ builder.sessionCacheSize(config.getSSLSessionCacheSize());
+ config.setSslContext(builder.build());
+ } catch (SSLException se) {
+ throw new IllegalStateException(
+ "Unable to start handle with SSL", se);
+ }
+ }
+ }
+
+ private void configAuthProvider(Logger logger, NoSQLHandleConfig config) {
+ final AuthorizationProvider ap = config.getAuthorizationProvider();
+ if (ap instanceof StoreAccessTokenProvider) {
+ final StoreAccessTokenProvider stProvider =
+ (StoreAccessTokenProvider) ap;
+ if (stProvider.getLogger() == null) {
+ stProvider.setLogger(logger);
+ }
+ if (stProvider.isSecure() &&
+ stProvider.getEndpoint() == null) {
+ String endpoint = config.getServiceURL().toString();
+ if (endpoint.endsWith("/")) {
+ endpoint = endpoint.substring(0, endpoint.length() - 1);
+ }
+ stProvider.setEndpoint(endpoint)
+ .setSslContext(config.getSslContext())
+ .setSslHandshakeTimeout(
+ config.getSSLHandshakeTimeout());
+ /* Check credentials are correct in initial phase only */
+ stProvider.getAuthorizationString(null /* request */);
+ }
+
+ } else if (ap instanceof SignatureProvider) {
+ SignatureProvider sigProvider = (SignatureProvider) ap;
+ if (sigProvider.getLogger() == null) {
+ sigProvider.setLogger(logger);
+ }
+ sigProvider.prepare(config);
+ if (config.getAuthRefresh()) {
+ sigProvider.setOnSignatureRefresh(new SigRefresh());
+ client.createAuthRefreshList();
+ }
+ }
+ }
+
+ @Override
+ public CompletableFuture delete(DeleteRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture get(GetRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture put(PutRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture writeMultiple(
+ WriteMultipleRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture multiDelete(
+ MultiDeleteRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture query(QueryRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public QueryPaginatorResult queryPaginator(QueryRequest request) {
+ return new QueryPaginatorResult(request, this);
+ }
+
+ @Override
+ public CompletableFuture prepare(PrepareRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture tableRequest(TableRequest request) {
+ return executeASync(request).thenApply(tres -> {
+ TableResult res = (TableResult) tres;
+ /* update rate limiters, if table has limits */
+ client.updateRateLimiters(res.getTableName(), res.getTableLimits());
+ return res;
+ });
+ }
+
+ @Override
+ public CompletableFuture getTable(GetTableRequest request) {
+ return executeASync(request).thenApply(tres -> {
+ TableResult res = (TableResult) tres;
+ /* update rate limiters, if table has limits */
+ client.updateRateLimiters(res.getTableName(), res.getTableLimits());
+ return res;
+ });
+ }
+
+ @Override
+ public CompletableFuture systemRequest(
+ SystemRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture systemStatus(
+ SystemStatusRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture getTableUsage(
+ TableUsageRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture listTables(
+ ListTablesRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture getIndexes(
+ GetIndexesRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture addReplica(
+ AddReplicaRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture dropReplica(
+ DropReplicaRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture getReplicaStats(
+ ReplicaStatsRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public void close() {
+ if (isClosed.compareAndSet(false, true)) {
+ client.shutdown();
+ taskExecutor.shutdown();
+ }
+ }
+
+ @Override
+ public CompletableFuture listNamespaces() {
+ return doSystemRequest("show as json namespaces")
+ .thenApply((SystemResult dres )-> {
+ String jsonResult = dres.getResultString();
+ if (jsonResult == null) {
+ return null;
+ }
+ MapValue root = JsonUtils.createValueFromJson(jsonResult, null)
+ .asMap();
+
+ FieldValue namespaces = root.get("namespaces");
+ if (namespaces == null) {
+ return null;
+ }
+
+ ArrayList results = new ArrayList(
+ namespaces.asArray().size());
+ for (FieldValue val : namespaces.asArray()) {
+ results.add(val.getString());
+ }
+ return results.toArray(new String[0]);
+ });
+ }
+
+ @Override
+ public CompletableFuture listUsers() {
+ return doSystemRequest("show as json users")
+ .thenApply((SystemResult dres) -> {
+ String jsonResult = dres.getResultString();
+ if (jsonResult == null) {
+ return null;
+ }
+
+ MapValue root = JsonUtils.createValueFromJson(
+ jsonResult, null).asMap();
+
+ FieldValue users = root.get("users");
+ if (users == null) {
+ return null;
+ }
+
+ ArrayList results = new ArrayList(
+ users.asArray().size());
+
+ for (FieldValue val : users.asArray()) {
+ String id = val.asMap().getString("id");
+ String name = val.asMap().getString("name");
+ results.add(new UserInfo(id, name));
+ }
+ return results.toArray(new UserInfo[0]);
+ });
+ }
+
+ @Override
+ public CompletableFuture listRoles() {
+ return doSystemRequest("show as json roles")
+ .thenApply((SystemResult dres) -> {
+ String jsonResult = dres.getResultString();
+ if (jsonResult == null) {
+ return null;
+ }
+ MapValue root = JsonUtils.createValueFromJson(
+ jsonResult, null).asMap();
+
+ FieldValue roles = root.get("roles");
+ if (roles == null) {
+ return null;
+ }
+
+ ArrayList results = new ArrayList(
+ roles.asArray().size());
+ for (FieldValue val : roles.asArray()) {
+ String role = val.asMap().getString("name");
+ results.add(role);
+ }
+ return results.toArray(new String[0]);
+ });
+ }
+
+ /**
+ * Internal method used by list* methods that defaults timeouts.
+ */
+ private CompletableFuture doSystemRequest(String statement) {
+ return doSystemRequest(statement, 30000, 1000);
+ }
+
+ @Override
+ public CompletableFuture doTableRequest(TableRequest request,
+ int timeoutMs,
+ int pollIntervalMs) {
+
+ return tableRequest(request).thenCompose((TableResult res) ->
+ res.waitForCompletionAsync(this, timeoutMs, pollIntervalMs)
+ .thenApply(v -> res));
+ }
+
+ @Override
+ public CompletableFuture doSystemRequest(String statement,
+ int timeoutMs,
+ int pollIntervalMs) {
+ checkClient();
+ SystemRequest dreq =
+ new SystemRequest().setStatement(statement.toCharArray());
+ return systemRequest(dreq).thenCompose((SystemResult dres) ->
+ dres.waitForCompletionAsync(this, timeoutMs, pollIntervalMs)
+ .thenApply(v -> dres));
+ }
+
+ @Override
+ public StatsControl getStatsControl() {
+ return client.getStatsControl();
+ }
+
+ void checkClient() {
+ if (isClosed.get()) {
+ throw new IllegalStateException("NoSQLHandle has been closed");
+ }
+ }
+
+ /**
+ * @hidden
+ * For testing use
+ */
+ public Client getClient() {
+ return client;
+ }
+
+ /**
+ * @hidden
+ * For testing use
+ */
+ public short getSerialVersion() {
+ return client.getSerialVersion();
+ }
+
+ /**
+ * @hidden
+ *
+ * Testing use only.
+ */
+ public void setDefaultNamespace(String ns) {
+ client.setDefaultNamespace(ns);
+ }
+
+ @SuppressWarnings("unchecked")
+ CompletableFuture executeASync(Request request) {
+ checkClient();
+ return client.execute(request).thenApply(result -> (T) result);
+ }
+
+ public ScheduledExecutorService getTaskExecutor() {
+ return taskExecutor;
+ }
+
+ /**
+ * Cloud service only.
+ * The refresh method of this class is called when a Signature is refreshed
+ * in SignatureProvider. This happens every 4 minutes or so. This mechanism
+ * allows the authentication and authorization information cached by the
+ * server to be refreshed out of band with the normal request path.
+ */
+ private class SigRefresh implements SignatureProvider.OnSignatureRefresh {
+
+ /*
+ * Attempt to refresh the server's authentication and authorization
+ * information for a new signature.
+ */
+ @Override
+ public void refresh(long refreshMs) {
+ client.doRefresh(refreshMs);
+ }
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java
index b29b039f..f8d326b0 100644
--- a/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java
+++ b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java
@@ -7,18 +7,10 @@
package oracle.nosql.driver.http;
-import java.util.ArrayList;
-import java.util.logging.Logger;
-
-import javax.net.ssl.SSLException;
-
-import oracle.nosql.driver.AuthorizationProvider;
import oracle.nosql.driver.NoSQLHandle;
import oracle.nosql.driver.NoSQLHandleConfig;
import oracle.nosql.driver.StatsControl;
import oracle.nosql.driver.UserInfo;
-import oracle.nosql.driver.iam.SignatureProvider;
-import oracle.nosql.driver.kv.StoreAccessTokenProvider;
import oracle.nosql.driver.ops.AddReplicaRequest;
import oracle.nosql.driver.ops.DeleteRequest;
import oracle.nosql.driver.ops.DeleteResult;
@@ -41,6 +33,8 @@
import oracle.nosql.driver.ops.QueryResult;
import oracle.nosql.driver.ops.ReplicaStatsRequest;
import oracle.nosql.driver.ops.ReplicaStatsResult;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.Result;
import oracle.nosql.driver.ops.SystemRequest;
import oracle.nosql.driver.ops.SystemResult;
import oracle.nosql.driver.ops.SystemStatusRequest;
@@ -50,13 +44,7 @@
import oracle.nosql.driver.ops.TableUsageResult;
import oracle.nosql.driver.ops.WriteMultipleRequest;
import oracle.nosql.driver.ops.WriteMultipleResult;
-import oracle.nosql.driver.values.FieldValue;
-import oracle.nosql.driver.values.JsonUtils;
-import oracle.nosql.driver.values.MapValue;
-
-import io.netty.handler.ssl.SslContextBuilder;
-import io.netty.util.internal.logging.InternalLoggerFactory;
-import io.netty.util.internal.logging.JdkLoggerFactory;
+import oracle.nosql.driver.util.ConcurrentUtil;
/**
* The methods in this class require non-null arguments. Because they all
@@ -64,235 +52,110 @@
* single place.
*/
public class NoSQLHandleImpl implements NoSQLHandle {
-
/*
* The HTTP client. This is not final so that it can be nulled upon
* close.
*/
- private Client client;
+ private final NoSQLHandleAsyncImpl asyncHandle;
public NoSQLHandleImpl(NoSQLHandleConfig config) {
-
- configNettyLogging();
- final Logger logger = getLogger(config);
-
- /*
- * config SslContext first, on-prem authorization provider
- * will reuse the context in NoSQLHandleConfig
- */
- configSslContext(config);
- client = new Client(logger, config);
- try {
- /* configAuthProvider may use client */
- configAuthProvider(logger, config);
- } catch (RuntimeException re) {
- /* cleanup client */
- client.shutdown();
- throw re;
- }
- }
-
- /**
- * Returns the logger used for the driver. If no logger is specified
- * create one based on this class name.
- */
- private Logger getLogger(NoSQLHandleConfig config) {
- if (config.getLogger() != null) {
- return config.getLogger();
- }
-
- /*
- * The default logger logs at INFO. If this is too verbose users
- * must create a logger and pass it in.
- */
- Logger logger = Logger.getLogger(getClass().getName());
- return logger;
- }
-
- /**
- * Configures the logging of Netty library.
- */
- private void configNettyLogging() {
- /*
- * Configure default Netty logging using Jdk Logger.
- */
- InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE);
- }
-
- private void configSslContext(NoSQLHandleConfig config) {
- if (config.getSslContext() != null) {
- return;
- }
- if (config.getServiceURL().getProtocol().equalsIgnoreCase("HTTPS")) {
- try {
- SslContextBuilder builder = SslContextBuilder.forClient();
- if (config.getSSLCipherSuites() != null) {
- builder.ciphers(config.getSSLCipherSuites());
- }
- if (config.getSSLProtocols() != null) {
- builder.protocols(config.getSSLProtocols());
- }
- builder.sessionTimeout(config.getSSLSessionTimeout());
- builder.sessionCacheSize(config.getSSLSessionCacheSize());
- config.setSslContext(builder.build());
- } catch (SSLException se) {
- throw new IllegalStateException(
- "Unable to start handle with SSL", se);
- }
- }
- }
-
- private void configAuthProvider(Logger logger, NoSQLHandleConfig config) {
- final AuthorizationProvider ap = config.getAuthorizationProvider();
- if (ap instanceof StoreAccessTokenProvider) {
- final StoreAccessTokenProvider stProvider =
- (StoreAccessTokenProvider) ap;
- if (stProvider.getLogger() == null) {
- stProvider.setLogger(logger);
- }
- if (stProvider.isSecure() &&
- stProvider.getEndpoint() == null) {
- String endpoint = config.getServiceURL().toString();
- if (endpoint.endsWith("/")) {
- endpoint = endpoint.substring(0, endpoint.length() - 1);
- }
- stProvider.setEndpoint(endpoint)
- .setSslContext(config.getSslContext())
- .setSslHandshakeTimeout(
- config.getSSLHandshakeTimeout());
- }
- } else if (ap instanceof SignatureProvider) {
- SignatureProvider sigProvider = (SignatureProvider) ap;
- if (sigProvider.getLogger() == null) {
- sigProvider.setLogger(logger);
- }
- sigProvider.prepare(config);
- if (config.getAuthRefresh()) {
- sigProvider.setOnSignatureRefresh(new SigRefresh());
- client.createAuthRefreshList();
- }
- }
+ asyncHandle = new NoSQLHandleAsyncImpl(config);
}
@Override
public DeleteResult delete(DeleteRequest request) {
- checkClient();
- return (DeleteResult) client.execute(request);
+ return executeSync(request);
}
@Override
public GetResult get(GetRequest request) {
- checkClient();
- return (GetResult) client.execute(request);
+ return executeSync(request);
}
@Override
public PutResult put(PutRequest request) {
- checkClient();
- return (PutResult) client.execute(request);
+ return executeSync(request);
}
@Override
public WriteMultipleResult writeMultiple(WriteMultipleRequest request) {
- checkClient();
- return (WriteMultipleResult) client.execute(request);
+ return executeSync(request);
}
@Override
public MultiDeleteResult multiDelete(MultiDeleteRequest request) {
- checkClient();
- return (MultiDeleteResult) client.execute(request);
+ return executeSync(request);
}
@Override
public QueryResult query(QueryRequest request) {
- checkClient();
- return (QueryResult) client.execute(request);
+ return ConcurrentUtil.awaitFuture(asyncHandle.query(request));
}
@Override
public QueryIterableResult queryIterable(QueryRequest request) {
- checkClient();
+ asyncHandle.checkClient();
return new QueryIterableResult(request, this);
}
@Override
public PrepareResult prepare(PrepareRequest request) {
- checkClient();
- return (PrepareResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableResult tableRequest(TableRequest request) {
- checkClient();
- TableResult res = (TableResult) client.execute(request);
- /* update rate limiters, if table has limits */
- client.updateRateLimiters(res.getTableName(), res.getTableLimits());
- return res;
+ return executeSync(request);
}
@Override
public TableResult getTable(GetTableRequest request) {
- checkClient();
- TableResult res = (TableResult) client.execute(request);
- /* update rate limiters, if table has limits */
- client.updateRateLimiters(res.getTableName(), res.getTableLimits());
- return res;
+ return executeSync(request);
}
@Override
public SystemResult systemRequest(SystemRequest request) {
- checkClient();
- return (SystemResult) client.execute(request);
+ return executeSync(request);
}
@Override
public SystemResult systemStatus(SystemStatusRequest request) {
- checkClient();
- return (SystemResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableUsageResult getTableUsage(TableUsageRequest request) {
- checkClient();
- return (TableUsageResult) client.execute(request);
+ return executeSync(request);
}
@Override
public ListTablesResult listTables(ListTablesRequest request) {
- checkClient();
- return (ListTablesResult) client.execute(request);
+ return executeSync(request);
}
@Override
public GetIndexesResult getIndexes(GetIndexesRequest request) {
- checkClient();
- return (GetIndexesResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableResult addReplica(AddReplicaRequest request) {
- checkClient();
- return (TableResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableResult dropReplica(DropReplicaRequest request) {
- checkClient();
- return (TableResult) client.execute(request);
+ return executeSync(request);
}
@Override
public ReplicaStatsResult getReplicaStats(ReplicaStatsRequest request) {
- checkClient();
- return (ReplicaStatsResult) client.execute(request);
+ return executeSync(request);
}
@Override
- synchronized public void close() {
- checkClient();
- client.shutdown();
- client = null;
+ public void close() {
+ asyncHandle.close();
}
/**
@@ -302,25 +165,7 @@ synchronized public void close() {
*/
@Override
public String[] listNamespaces() {
- SystemResult dres = doSystemRequest("show as json namespaces");
-
- String jsonResult = dres.getResultString();
- if (jsonResult == null) {
- return null;
- }
- MapValue root = JsonUtils.createValueFromJson(jsonResult, null).asMap();
-
- FieldValue namespaces = root.get("namespaces");
- if (namespaces == null) {
- return null;
- }
-
- ArrayList results = new ArrayList(
- namespaces.asArray().size());
- for (FieldValue val : namespaces.asArray()) {
- results.add(val.getString());
- }
- return results.toArray(new String[0]);
+ return ConcurrentUtil.awaitFuture(asyncHandle.listNamespaces());
}
/**
@@ -330,29 +175,7 @@ public String[] listNamespaces() {
*/
@Override
public UserInfo[] listUsers() {
- SystemResult dres = doSystemRequest("show as json users");
-
- String jsonResult = dres.getResultString();
- if (jsonResult == null) {
- return null;
- }
-
- MapValue root = JsonUtils.createValueFromJson(jsonResult, null).asMap();
-
- FieldValue users = root.get("users");
- if (users == null) {
- return null;
- }
-
- ArrayList results = new ArrayList(
- users.asArray().size());
-
- for (FieldValue val : users.asArray()) {
- String id = val.asMap().getString("id");
- String name = val.asMap().getString("name");
- results.add(new UserInfo(id, name));
- }
- return results.toArray(new UserInfo[0]);
+ return ConcurrentUtil.awaitFuture(asyncHandle.listUsers());
}
/**
@@ -362,26 +185,7 @@ public UserInfo[] listUsers() {
*/
@Override
public String[] listRoles() {
- SystemResult dres = doSystemRequest("show as json roles");
-
- String jsonResult = dres.getResultString();
- if (jsonResult == null) {
- return null;
- }
- MapValue root = JsonUtils.createValueFromJson(jsonResult, null).asMap();
-
- FieldValue roles = root.get("roles");
- if (roles == null) {
- return null;
- }
-
- ArrayList results = new ArrayList(
- roles.asArray().size());
- for (FieldValue val : roles.asArray()) {
- String role = val.asMap().getString("name");
- results.add(role);
- }
- return results.toArray(new String[0]);
+ return ConcurrentUtil.awaitFuture(asyncHandle.listRoles());
}
@@ -415,16 +219,14 @@ public SystemResult doSystemRequest(String statement,
@Override
public StatsControl getStatsControl() {
- return client.getStatsControl();
+ return asyncHandle.getStatsControl();
}
/**
* Ensure that the client exists and hasn't been closed;
*/
private void checkClient() {
- if (client == null) {
- throw new IllegalStateException("NoSQLHandle has been closed");
- }
+ asyncHandle.checkClient();
}
/**
@@ -432,7 +234,7 @@ private void checkClient() {
* For testing use
*/
public Client getClient() {
- return client;
+ return asyncHandle.getClient();
}
/**
@@ -440,7 +242,7 @@ public Client getClient() {
* For testing use
*/
public short getSerialVersion() {
- return client.getSerialVersion();
+ return asyncHandle.getSerialVersion();
}
/**
@@ -449,25 +251,11 @@ public short getSerialVersion() {
* Testing use only.
*/
public void setDefaultNamespace(String ns) {
- client.setDefaultNamespace(ns);
+ asyncHandle.setDefaultNamespace(ns);
}
- /**
- * Cloud service only.
- * The refresh method of this class is called when a Signature is refreshed
- * in SignatureProvider. This happens every 4 minutes or so. This mechanism
- * allows the authentication and authorization information cached by the
- * server to be refreshed out of band with the normal request path.
- */
- private class SigRefresh implements SignatureProvider.OnSignatureRefresh {
-
- /*
- * Attempt to refresh the server's authentication and authorization
- * information for a new signature.
- */
- @Override
- public void refresh(long refreshMs) {
- client.doRefresh(refreshMs);
- }
+ @SuppressWarnings("unchecked")
+ private T executeSync(Request request) {
+ return (T) ConcurrentUtil.awaitFuture(asyncHandle.executeASync(request));
}
}
diff --git a/driver/src/main/java/oracle/nosql/driver/http/Stats.java b/driver/src/main/java/oracle/nosql/driver/http/Stats.java
index aed97279..09eaff0a 100644
--- a/driver/src/main/java/oracle/nosql/driver/http/Stats.java
+++ b/driver/src/main/java/oracle/nosql/driver/http/Stats.java
@@ -26,6 +26,7 @@
import oracle.nosql.driver.SecurityInfoNotReadyException;
import oracle.nosql.driver.StatsControl;
import oracle.nosql.driver.ThrottlingException;
+import oracle.nosql.driver.httpclient.ConnectionPool;
import oracle.nosql.driver.kv.AuthenticationException;
import oracle.nosql.driver.ops.PreparedStatement;
import oracle.nosql.driver.ops.QueryRequest;
@@ -239,41 +240,71 @@ synchronized void clear() {
}
/**
- * Stores connection aggregated statistics. Min, max, avg show the number of
- * simultaneously opened connections.
+ * Stores connection aggregated statistics. Min, max, avg for various
+ * connection metrics.
*/
private static class ConnectionStats {
- private long count;
- private int min = Integer.MAX_VALUE;
- private int max;
- private long sum;
-
- synchronized void observe(int connections) {
- if (connections < min) {
- min = connections;
- }
- if (connections > max) {
- max = connections;
- }
- sum += connections;
- count++;
+ private final MetricStats maxConnections = new MetricStats();
+ private final MetricStats acquiredConnections = new MetricStats();
+ private final MetricStats pendingAcquires = new MetricStats();
+ private final MetricStats idleConnections = new MetricStats();
+ private final MetricStats totalConnections = new MetricStats();
+
+ synchronized void observe(ConnectionPool.PoolMetrics poolMetrics) {
+ maxConnections.observe(poolMetrics.maxConnections);
+ acquiredConnections.observe(poolMetrics.acquiredConnections);
+ pendingAcquires.observe(poolMetrics.pendingAcquires);
+ idleConnections.observe(poolMetrics.idleConnections);
+ totalConnections.observe(poolMetrics.totalConnections);
}
synchronized void toJSON(MapValue root) {
- if (count > 0) {
- MapValue connections = new MapValue();
- connections.put("min", min);
- connections.put("max", max);
- connections.put("avg", 1.0 * sum / count);
- root.put("connections", connections);
- }
+ MapValue all = new MapValue();
+ maxConnections.toJSON("maxConnections", all);
+ acquiredConnections.toJSON("acquiredConnections", all);
+ pendingAcquires.toJSON("pendingAcquires", all);
+ idleConnections.toJSON("idleConnections", all);
+ totalConnections.toJSON("totalConnections", all);
+ root.put("connections", all);
}
synchronized void clear() {
- count = 0;
- min = Integer.MAX_VALUE;
- max = 0;
- sum = 0;
+ maxConnections.clear();
+ acquiredConnections.clear();
+ pendingAcquires.clear();
+ idleConnections.clear();
+ totalConnections.clear();
+ }
+
+ private static class MetricStats {
+ long count;
+ int min = Integer.MAX_VALUE;
+ int max;
+ long sum;
+
+ void observe(int value) {
+ if (value < min) min = value;
+ if (value > max) max = value;
+ sum += value;
+ count++;
+ }
+
+ void toJSON(String name, MapValue root) {
+ if (count > 0) {
+ MapValue m = new MapValue();
+ m.put("min", min);
+ m.put("max", max);
+ m.put("avg", (double) sum / count);
+ root.put(name, m);
+ }
+ }
+
+ void clear() {
+ count = 0;
+ min = Integer.MAX_VALUE;
+ max = 0;
+ sum = 0;
+ }
}
}
@@ -545,8 +576,8 @@ private void clearStats() {
* response sizes and latency.
*/
void observeError(Request kvRequest,
- int connections) {
- observe(kvRequest, true, connections, -1, -1, -1);
+ ConnectionPool.PoolMetrics poolMetrics) {
+ observe(kvRequest, true, poolMetrics, -1, -1, -1);
}
/**
@@ -556,14 +587,16 @@ void observeError(Request kvRequest,
*
* @param kvRequest The request object.
* @param error Hard error, ie. return error to user.
- * @param connections The number of active connections in the pool.
+ * @param poolMetrics The connection pool metrics.
* @param reqSize Request size in bytes.
* @param resSize Result size in bytes.
* @param requestLatency Latency on the wire, in milliseconds, it doesn't
* include retry delay or rate limit delay.
*/
void observe(Request kvRequest, boolean error,
- int connections, int reqSize, int resSize, int requestLatency) {
+ ConnectionPool.PoolMetrics poolMetrics, int reqSize,
+ int resSize,
+ int requestLatency) {
int authCount = 0, throttleCount = 0, retries = 0, retryDelay = 0;
RetryStats retryStats = kvRequest.getRetryStats();
@@ -604,7 +637,7 @@ void observe(Request kvRequest, boolean error,
rStat.observe(error, retries, retryDelay, rateLimitDelay, authCount,
throttleCount, reqSize, resSize, requestLatency);
- connectionStats.observe(connections);
+ connectionStats.observe(poolMetrics);
if (extraQueryStats == null &&
statsControl.getProfile().ordinal() >=
diff --git a/driver/src/main/java/oracle/nosql/driver/http/StatsControlImpl.java b/driver/src/main/java/oracle/nosql/driver/http/StatsControlImpl.java
index dcf4961c..f66eb68f 100644
--- a/driver/src/main/java/oracle/nosql/driver/http/StatsControlImpl.java
+++ b/driver/src/main/java/oracle/nosql/driver/http/StatsControlImpl.java
@@ -21,12 +21,12 @@ public class StatsControlImpl
implements StatsControl {
private StatsControl.Profile profile;
- private int interval;
+ private final int interval;
private boolean prettyPrint;
- private Logger logger;
- private HttpClient httpClient; /* required for connections */
- private String id = Integer.toHexString(UUID.randomUUID().hashCode());
+ private final Logger logger;
+ private final HttpClient httpClient; /* required for connections */
+ private final String id = Integer.toHexString(UUID.randomUUID().hashCode());
private StatsHandler statsHandler;
private boolean enableCollection = false;
private Stats stats;
@@ -138,15 +138,14 @@ void observe(Request kvRequest, int networkLatency,
int reqSize, int resSize) {
if (stats != null && enableCollection) {
stats.observe(kvRequest, false,
- httpClient.getAcquiredChannelCount(),
+ httpClient.getPoolMetrics(),
reqSize, resSize, networkLatency);
}
}
void observeError(Request kvRequest) {
if (stats != null && enableCollection) {
- stats.observeError(kvRequest,
- httpClient.getAcquiredChannelCount());
+ stats.observeError(kvRequest, httpClient.getPoolMetrics());
}
}
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java b/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java
index e4f7f800..deccd73a 100644
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java
+++ b/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java
@@ -8,13 +8,16 @@
package oracle.nosql.driver.httpclient;
import static oracle.nosql.driver.util.LogUtil.logFine;
-import static oracle.nosql.driver.util.LogUtil.logInfo;
-import java.io.IOException;
import java.util.Map;
+import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedDeque;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Logger;
import io.netty.bootstrap.Bootstrap;
@@ -33,11 +36,16 @@
* and tracking of Channels.
*
* Configuration:
- * minSize - actively keep this many alive, even after inactivity, by default
- * this is the number of cores
+ * minSize - actively keep this many alive, even after inactivity, by default,
+ * this is set to 2
* inactivityPeriod - remove inactive channels after this many seconds.
* If negative, don't ever remove them
* Logger
+ * maxSize - Maximum number of connections to create. Once these many channels
+ * are acquired, further channel acquires are put into the pending queue
+ * maxPending - Maximum number of pending acquires. Once pending queue is full
+ * further acquires will fail till channels are released back to the pool
+ *
*
* Usage
* o acquire()
@@ -52,6 +60,9 @@
* release
* o if no Channels are in the queue for acquire a new one is created and
* placed in the queue on release
+ * o During release, if there are pending acquire requests in pending queue,
+ * released channel is used to serve pending request instead of putting back
+ * to the queue
*
* Keep-alive and minimum size
* o if a pool is not a minimal pool a refresh task is created on construction.
@@ -67,7 +78,7 @@
* 65s (a default that cannot be modified).
*/
-class ConnectionPool {
+public class ConnectionPool {
/* remove channels that have not been used in this many seconds */
final static int DEFAULT_INACTIVITY_PERIOD_SECS = 30;
@@ -99,7 +110,22 @@ class ConnectionPool {
* closed.
*/
private final Map stats;
- private int acquiredChannelCount;
+ private final AtomicInteger acquiredChannelCount = new AtomicInteger();
+
+ /* Executor to run keep-alive task periodically */
+ private final ScheduledExecutorService keepAlivescheduler;
+
+ private final int maxPoolSize;
+ private final int maxPending;
+
+ /* State to ensure to maxConnections */
+ private final AtomicInteger currentConnectionCount;
+
+ /* State to ensure to maxPending */
+ private final AtomicInteger pendingAcquireCount;
+
+ /* Queue to track pending acquires */
+ private final Queue> pendingAcquires;
/**
* Keepalive callback interface
@@ -118,7 +144,7 @@ interface KeepAlive {
*
* @param bootstrap (netty)
* @param handler the handler, mostly used for event callbacks
- * @param logger
+ * @param logger logger
* @param isMinimalPool set to true if this is a one-time, or minimal time
* use. In this case no refresh task is created
* @param poolMin the minimum size at which the pool should be maintained.
@@ -129,13 +155,17 @@ interface KeepAlive {
* to the minimum (if set). This allows bursty behavior to automatically
* clean up when channels are no longer required. This is more for on-prem
* than the cloud service but applies to both.
+ * @param maxPoolSize maximum number of connections in the pool
+ * @param maxPending maximum number of pending acquires
*/
ConnectionPool(Bootstrap bootstrap,
ChannelPoolHandler handler,
Logger logger,
boolean isMinimalPool,
int poolMin,
- int inactivityPeriodSeconds) {
+ int inactivityPeriodSeconds,
+ int maxPoolSize,
+ int maxPending) {
/* clone bootstrap to set handler */
this.bootstrap = bootstrap.clone();
@@ -162,6 +192,12 @@ protected void initChannel(Channel ch) throws Exception {
queue = new ConcurrentLinkedDeque();
stats = new ConcurrentHashMap();
+ this.maxPoolSize = maxPoolSize;
+ this.maxPending = maxPending;
+ this.currentConnectionCount = new AtomicInteger(0);
+ this.pendingAcquireCount = new AtomicInteger(0);
+ this.pendingAcquires = new ConcurrentLinkedDeque<>();
+
/*
* If not creating a minimal pool run RefreshTask every 30s. A
* minimal pool is short-lived so don't create the overhead.
@@ -177,10 +213,17 @@ protected void initChannel(Channel ch) throws Exception {
DEFAULT_REFRESH_PERIOD_SECS :
Math.min(DEFAULT_REFRESH_PERIOD_SECS,
this.inactivityPeriodSeconds);
- this.bootstrap.config().group().next()
- .scheduleAtFixedRate(new RefreshTask(),
- refreshPeriod, refreshPeriod,
- TimeUnit.SECONDS);
+ this.keepAlivescheduler =
+ Executors.newSingleThreadScheduledExecutor(r -> {
+ Thread t = new Thread(r, "nosql-keep-alive");
+ t.setDaemon(true);
+ return t;
+ });
+ keepAlivescheduler.scheduleAtFixedRate(new RefreshTask(),
+ refreshPeriod, refreshPeriod,
+ TimeUnit.SECONDS);
+ } else {
+ this.keepAlivescheduler = null;
}
}
@@ -204,49 +247,34 @@ final Future acquire() {
* significant time sink in terms of affecting overall latency of this call
*
* Acquired channels are removed from the queue and are "owned" by the
- * caller until released, at which time they are put back on the queue.
+ * caller until released, at which time they are put back on the queue or
+ * serve pending acquires
*/
final Future acquire(final Promise promise) {
try {
+ /* 1. Try to get a free channel from the idle pool (LIFO) */
+ Channel channel = queue.pollFirst();
+ if (channel != null) {
+ activateChannel(channel, promise);
+ return promise;
+ }
+
+ /* 2. Pool is empty.
+ * Try to create a new connection respecting maxPoolSize.
+ */
while (true) {
- /* this *removes* the channel from the queue */
- final Channel channel = queue.pollFirst();
- if (channel == null) {
- /* need a new Channel */
- Bootstrap bs = bootstrap.clone();
- ChannelFuture fut = bs.connect();
- if (fut.isDone()) {
- notifyOnConnect(fut, promise);
- } else {
- fut.addListener(new ChannelFutureListener() {
- @Override
- public void operationComplete(
- ChannelFuture future) throws Exception {
- notifyOnConnect(future, promise);
- }
- });
- }
+ int current = currentConnectionCount.get();
+ if (current >= maxPoolSize) {
+ /* Pool is full. Enqueue the request and return */
+ enqueueRequest(promise);
return promise;
}
- /*
- * This logic must happen in the event loop
- */
- EventLoop loop = channel.eventLoop();
- if (loop.inEventLoop()) {
- if (checkChannel(channel, promise)) {
- /* bad channel, try again */
- continue;
- }
- } else {
- /*
- * Note: run() may be executed some time after this method
- * returns a promise. So the caller may have to wait a
- * few milliseconds for the promise to be completed
- * (successfully or not).
- */
- loop.execute(() -> checkChannel(channel, promise));
+ /* CAS (Compare-And-Swap) to reserve a slot */
+ if (currentConnectionCount.compareAndSet(current, current + 1)) {
+ createConnection(promise);
+ return promise;
}
- break;
+ /* If CAS failed, loop retry */
}
} catch (Throwable t) {
promise.tryFailure(t);
@@ -255,21 +283,77 @@ public void operationComplete(
}
/**
- * Release a channel. This is not async. The channel is added to the
- * front of the queue. This class implements a LIFO algorithm to ensure
- * that the first, or first few channels on the queue remain active and
- * are not subject to inactivity timeouts from the server side.
- * Note that inactive released channels will be closed and not
- * re-added to the queue.
+ * Helper to safely enqueue pending requests.
+ */
+ private void enqueueRequest(Promise promise) {
+ /* Atomic check-then-act */
+ if (pendingAcquireCount.incrementAndGet() > maxPending) {
+ /* Rollback and fail */
+ pendingAcquireCount.decrementAndGet();
+ promise.tryFailure(new IllegalStateException(
+ "Pending acquire queue has reached its maximum size of "
+ + maxPending));
+ } else {
+ pendingAcquires.add(promise);
+ }
+ }
+
+ /**
+ * Helper to create a new connection.
+ */
+ private void createConnection(Promise promise) {
+ Bootstrap bs = bootstrap.clone();
+ ChannelFuture fut = bs.connect();
+ if (fut.isDone()) {
+ notifyOnConnect(fut, promise);
+ } else {
+ fut.addListener((ChannelFutureListener) future ->
+ notifyOnConnect(future, promise));
+ }
+ }
+
+ /**
+ * Release a channel. This is not async.
+ *
+ *
+ * If the released channel is inactive it will be closed and not added
+ * back to the pool. Also, If there is a pending acquire, new channel is
+ * created to replace the closed channel.
+ *
+ *
+ * If there is a pending acquire, the released channel is assigned to the
+ * pending acquire rather than releasing back to the pool.
+ *
+ *
+ * Otherwise, The channel is added to the front of the queue.
+ * This class implements a LIFO algorithm to ensure that the first,
+ * or first few channels on the queue remain active and are not subject to
+ * inactivity timeouts from the server side.
+ *
+ *
*/
void release(Channel channel) {
if (!channel.isActive()) {
logFine(logger,
"Inactive channel on release, closing: " + channel);
removeChannel(channel);
- } else {
- queue.addFirst(channel);
+ return;
+ }
+
+ /* Check for pending waiters */
+ Promise waitingPromise = pendingAcquires.poll();
+ if (waitingPromise != null) {
+ /* Decrement pending count as we pulled one out */
+ int pending = pendingAcquireCount.decrementAndGet();
+ assert pending>=0;
+ updateStats(channel, false);
+ /* Handoff directly to the waiter and skip the queue */
+ activateChannel(channel, waitingPromise);
+ return;
}
+
+ /* No waiters, put back in idle queue (LIFO) */
+ queue.addFirst(channel);
updateStats(channel, false);
try { handler.channelReleased(channel); } catch (Exception e) {}
}
@@ -278,15 +362,29 @@ void release(Channel channel) {
* Close and remove channel from pool.
* The channel may or may not currently be in the queue.
* This will normally only be called on channels that were acquired and
- * found to be inactive or otherwise invalid, but it may also occasionally
- * be called by an async netty callback when netty sees that a channel
- * has been disconnected or become otherwise inactive. In the latter case,
- * the channel is likely still in the queue and will be removed.
+ * found to be inactive.
*/
- public void removeChannel(Channel channel) {
+ private void removeChannel(Channel channel) {
queue.remove(channel);
stats.remove(channel);
channel.close();
+
+ /* Free up the slot */
+ int cur = currentConnectionCount.decrementAndGet();
+ assert cur>=0;
+
+ /*If there are waiters, use this newly freed slot to create a
+ * connection for them
+ */
+ Promise waiter = pendingAcquires.poll();
+ if (waiter != null) {
+ /* We removed a waiter */
+ int pending = pendingAcquireCount.decrementAndGet();
+ assert pending >= 0;
+ /* We are reserving the slot again */
+ currentConnectionCount.incrementAndGet();
+ createConnection(waiter);
+ }
}
/**
@@ -297,6 +395,18 @@ public void removeChannel(Channel channel) {
*/
void close() {
logFine(logger, "Closing pool, stats " + getStats());
+ if (keepAlivescheduler != null) {
+ keepAlivescheduler.shutdown();
+ }
+
+ // Reject pending queue
+ Promise pending;
+ while ((pending = pendingAcquires.poll()) != null) {
+ pending.tryFailure(new RejectedExecutionException(
+ "Connection pool is closed"));
+ pendingAcquireCount.decrementAndGet();
+ }
+
/* TODO: do this cleanly */
validatePool("close1");
Channel ch = queue.pollFirst();
@@ -311,51 +421,89 @@ void close() {
* How many channels have been acquired since this pool was created
*/
int getAcquiredChannelCount() {
- return acquiredChannelCount;
+ return acquiredChannelCount.get();
}
private void notifyOnConnect(ChannelFuture future,
- Promise promise) throws Exception {
- if (future.isSuccess()) {
- Channel channel = future.channel();
- updateStats(channel, true);
- handler.channelAcquired(channel);
- if (!promise.trySuccess(channel)) {
- /* Promise was completed (like cancelled), release channel */
- release(channel);
+ Promise promise) {
+ try {
+ if (future.isSuccess()) {
+ Channel channel = future.channel();
+ updateStats(channel, true);
+ handler.channelAcquired(channel);
+ if (!promise.trySuccess(channel)) {
+ /* Promise was completed (like cancelled), release channel */
+ release(channel);
+ }
+ } else {
+ /* Connect failed, we must free the slot we reserved */
+ int count = currentConnectionCount.decrementAndGet();
+ assert count >= 0;
+ promise.tryFailure(future.cause());
+
+ /* Retry for next pending if any (since this attempt failed) */
+ Promise waiter = pendingAcquires.poll();
+ if (waiter != null) {
+ int pending = pendingAcquireCount.decrementAndGet();
+ assert pending >= 0;
+ currentConnectionCount.incrementAndGet();
+ createConnection(waiter);
+ }
}
- } else {
- promise.tryFailure(future.cause());
+ } catch (Exception e) {
+ promise.tryFailure(e);
}
}
- private boolean checkChannel(final Channel channel,
- final Promise promise) {
-
- /*
- * If channel isn't healthy close it. It's been removed from
- * the queue
- */
- if (!channel.isActive()) {
- logFine(logger,
- "Inactive channel found, closing: " + channel);
- removeChannel(channel);
- promise.tryFailure(new IOException("inactive channel"));
- return true;
+ /**
+ * Helper to verify channel health on the EventLoop
+ */
+ private void activateChannel(final Channel channel, final Promise promise) {
+ EventLoop loop = channel.eventLoop();
+ if (loop.inEventLoop()) {
+ checkChannel(channel, promise);
+ } else {
+ loop.execute(() -> checkChannel(channel, promise));
}
+ }
+
+ private void checkChannel(final Channel channel,
+ final Promise promise) {
try {
- updateStats(channel, true);
- handler.channelAcquired(channel);
- } catch (Exception e) {} /* ignore */
- promise.setSuccess(channel);
- return false;
+ /*
+ * If channel isn't healthy close it. It's been removed from
+ * the queue
+ */
+ if (!channel.isActive()) {
+ logFine(logger,
+ "Inactive channel found, closing: " + channel);
+ removeChannel(channel);
+ /* retry channel acquire, which might queue if pool filled in
+ * background
+ */
+ acquire(promise);
+ } else {
+ try {
+ updateStats(channel, true);
+ handler.channelAcquired(channel);
+ } catch (Exception e) {} /* ignore */
+ if (!promise.trySuccess(channel)) {
+ release(channel);
+ }
+ }
+ } catch (Throwable cause) {
+ if (channel != null) {
+ removeChannel(channel); // Ensure slot is freed
+ }
+ promise.tryFailure(cause);
+ }
}
/**
* Returns the total number of channels, acquired and not, in the pool
*/
int getTotalChannels() {
- return queue.size() + acquiredChannelCount;
+ return queue.size() + acquiredChannelCount.get();
}
/**
@@ -365,6 +513,10 @@ int getFreeChannels() {
return queue.size();
}
+ int getPendingAcquires() {
+ return pendingAcquireCount.get();
+ }
+
/**
* Prune channels
* 1. remove any inactive channels (closed by other side)
@@ -388,7 +540,7 @@ int pruneChannels() {
}
}
- /**
+ /*
* If inactivityPeriodSeconds is negative there is nothing to
* prune
*/
@@ -400,7 +552,10 @@ int pruneChannels() {
* period, remove it
*/
ChannelStats cs = stats.get(ch);
- assert cs != null;
+ /* stats race condition check */
+ if (cs == null) {
+ continue;
+ }
long inactive = (now - cs.getLastAcquired())/1000;
if (inactive > inactivityPeriodSeconds) {
logFine(logger,
@@ -444,7 +599,7 @@ int doKeepAlive(int keepAlivePeriod) {
* This works for poolMin of 0 as well. If HttpClient is null
* there is no way to do this either.
*/
- int numToSend = poolMin - acquiredChannelCount;
+ int numToSend = poolMin - acquiredChannelCount.get();
if (numToSend <= 0) {
return 0;
}
@@ -504,12 +659,15 @@ private void validatePool(final String caller) {
* Some sanity checking. Stats size should include all channels in the
* pool -- acquired plus not-acquired
*/
- if ((queue.size() + acquiredChannelCount) != stats.size()) {
+
+ // Below check is not valid in concurrent access, removing it
+
+ /*if ((queue.size() + acquiredChannelCount.get()) != stats.size()) {
logInfo(logger,
"Pool count discrepancy, called from " + caller +
" : Queue size, acquired count, stats size :" + queue.size() + ", " +
acquiredChannelCount + ", " + stats.size());
- }
+ }*/
}
/**
@@ -517,16 +675,16 @@ private void validatePool(final String caller) {
*/
private void updateStats(Channel channel, boolean isAcquire) {
ChannelStats cstats = stats.get(channel);
- if (cstats == null) {
+ if (cstats == null && isAcquire) {
cstats = new ChannelStats();
stats.put(channel, cstats);
}
synchronized(this) {
if (isAcquire) {
- acquiredChannelCount++;
+ acquiredChannelCount.incrementAndGet();
cstats.acquired();
} else {
- acquiredChannelCount--;
+ acquiredChannelCount.decrementAndGet();
}
}
}
@@ -541,12 +699,13 @@ void logStats() {
*/
String getStats() {
StringBuilder sb = new StringBuilder();
- sb.append("acquiredCount=" + acquiredChannelCount +
- ", freeChannelCount=" + queue.size() +
- ", totalChannelCount=" + stats.size());
+ sb.append("acquiredCount=").append(acquiredChannelCount)
+ .append(", freeChannelCount=").append(queue.size())
+ .append(", totalChannelCount=").append(stats.size())
+ .append(", pendingRequests=").append(pendingAcquireCount.get());
sb.append(", [");
for (Map.Entry entry : stats.entrySet()) {
- sb.append("channel=" + entry.getKey().id() + "[");
+ sb.append("channel=").append(entry.getKey().id()).append("[");
entry.getValue().toStringBuilder(sb);
sb.append("]");
}
@@ -576,7 +735,7 @@ int getUseCount(Channel ch) {
* An internal class that maintains stats on Channels. Consider exposing
* it beyond tests.
*/
- class ChannelStats {
+ static class ChannelStats {
/* when the channel was last acquired -- timestamp */
private long lastAcquired;
/* how many times the channel has been used */
@@ -596,8 +755,8 @@ int getUseCount() {
}
void toStringBuilder(StringBuilder sb) {
- sb.append("useCount=" + useCount +
- ", lastAcquired=" + java.time.Instant.ofEpochMilli(lastAcquired));
+ sb.append("useCount=").append(useCount).append(", lastAcquired=");
+ sb.append(java.time.Instant.ofEpochMilli(lastAcquired));
}
@Override
@@ -631,4 +790,41 @@ public final void run() {
}
}
}
+
+ /**
+ * DTO for connection pool metrics.
+ */
+ public static class PoolMetrics {
+ public final int maxConnections;
+ public final int acquiredConnections;
+ public final int pendingAcquires;
+ public final int idleConnections;
+ public final int totalConnections; // acquired + idle
+
+ private PoolMetrics(int maxConnections, int acquiredConnections, int pendingAcquires, int idleConnections) {
+ this.maxConnections = maxConnections;
+ this.acquiredConnections = acquiredConnections;
+ this.pendingAcquires = pendingAcquires;
+ this.idleConnections = idleConnections;
+ this.totalConnections = acquiredConnections + idleConnections;
+ }
+
+ @Override
+ public String toString() {
+ return "ConnectionPoolMetrics{" +
+ "max=" + maxConnections +
+ ", acquired=" + acquiredConnections +
+ ", pending=" + pendingAcquires +
+ ", total" + "=" + totalConnections +
+ ", idle=" + idleConnections +
+ "}";
+ }
+ }
+
+ PoolMetrics getMetrics() {
+ return new PoolMetrics(this.maxPoolSize,
+ getAcquiredChannelCount(),
+ getPendingAcquires(),
+ getFreeChannels());
+ }
}
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java
index 492e62b7..82a41779 100644
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java
+++ b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java
@@ -11,13 +11,10 @@
import static io.netty.handler.codec.http.HttpMethod.HEAD;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static oracle.nosql.driver.util.HttpConstants.CONNECTION;
-import static oracle.nosql.driver.util.LogUtil.isFineEnabled;
import static oracle.nosql.driver.util.LogUtil.logFine;
-import static oracle.nosql.driver.util.LogUtil.logInfo;
-import static oracle.nosql.driver.util.LogUtil.logWarning;
-import java.io.IOException;
-import java.util.concurrent.ExecutionException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.logging.Logger;
@@ -29,15 +26,19 @@
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
+import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.ssl.SslContext;
+import io.netty.handler.timeout.ReadTimeoutHandler;
import io.netty.util.AttributeKey;
-import io.netty.util.concurrent.Future;
/*
* If this code is ever made generic, the proxy information obtained
* from this config needs to be abstracted to a generic class.
*/
+import io.netty.util.ReferenceCountUtil;
+import io.netty.util.concurrent.FutureListener;
import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.util.ConcurrentUtil;
/**
* Netty HTTP client. Initialization process:
@@ -56,26 +57,32 @@
* use by requests.
*
*
- * Using the client to send request and get a synchronous response. The
+ * Using the client to send request. The
* request must be an instance of HttpRequest:
+ *
+ * For synchronous calls, wait for a response:
+ *
+ * response.join() or response.get();
+ *
+ *
+ *
+ * For asynchronous calls, consume the response future.
+ *
+ *
+ * If there was a problem with the send or receive, future completes
+ * with exception.
+ *
+ *
*
- * 1. Get a Channel.
- * Channel channel = client.getChannel(timeoutMs);
- * 2. Create a ResponseHandler to handle a response.
- * ResponseHandler rhandler = new ResponseHandler(client, logger, channel);
- * Note that the ResponseHandler will release the Channel.
- * 3. Call runRequest to send the request.
- * client.runRequest(request, rhandler, channel);
- * 4. For synchronous calls, wait for a response:
- * rhandler.await(timeoutMs);
- * If there was a problem with the send or receive this call will throw a
- * Throwable with the relevant information. If successful the response
- * information can be extracted from the ResponseHandler.
- * ResponseHandler instances must be closed using the close() method. This
- * releases resources associated with the request/response dialog such as the
- * channel and the HttpResponse itself.
- *
- * TODO: asynchronous handler
*/
public class HttpClient {
@@ -84,8 +91,11 @@ public class HttpClient {
static final int DEFAULT_HANDSHAKE_TIMEOUT_MS = 3000;
static final int DEFAULT_MIN_POOL_SIZE = 2; // min pool size
- static final AttributeKey STATE_KEY =
- AttributeKey.valueOf("rqstate");
+ /* AttributeKey to attach a CompletableFuture to the Channel,
+ * allowing the HttpResponseHandler to signal completion.
+ */
+ public static final AttributeKey>
+ STATE_KEY = AttributeKey.valueOf("rqstate");
//private final FixedChannelPool pool;
private final ConnectionPool pool;
@@ -98,12 +108,6 @@ public class HttpClient {
private final int port;
private final String name;
- /*
- * Amount of time to wait for acquiring a channel before timing
- * out and possibly retrying
- */
- private final int acquireRetryIntervalMs;
-
/*
* Non-null if using SSL
*/
@@ -153,7 +157,9 @@ public static HttpClient createMinimalClient(String host,
true, /* minimal client */
DEFAULT_MAX_CONTENT_LENGTH,
DEFAULT_MAX_CHUNK_SIZE,
- sslCtx, handshakeTimeoutMs, name, logger);
+ sslCtx, handshakeTimeoutMs, name, logger,
+ 1, /* max connections */
+ 1 /* max pending connections */);
}
/**
@@ -197,7 +203,60 @@ public HttpClient(String host,
this(host, port, numThreads, connectionPoolMinSize,
inactivityPeriodSeconds, false /* not minimal */,
- maxContentLength, maxChunkSize, sslCtx, handshakeTimeoutMs, name, logger);
+ maxContentLength, maxChunkSize, sslCtx, handshakeTimeoutMs, name,
+ logger,
+ 100 /* max connections */,
+ 10_000 /* max pending connections */);
+ }
+
+ /**
+ * Creates a new HttpClient class capable of sending Netty HttpRequest
+ * instances and receiving replies. This is a concurrent, asynchronous
+ * interface capable of sending and receiving on multiple HTTP channels
+ * at the same time.
+ *
+ * @param host the hostname for the HTTP server
+ * @param port the port for the HTTP server
+ * @param numThreads the number of async threads to use for Netty
+ * notifications. If 0, a default value is used based on the number of
+ * cores
+ * @param connectionPoolMinSize the number of connections to keep in the
+ * pool and keep alive using a minimal HTTP request. If 0, none are kept
+ * alive
+ * @param inactivityPeriodSeconds the number of seconds to keep an
+ * inactive channel/connection before removing it. 0 means use the default,
+ * a negative number means there is no timeout and channels are not
+ * removed
+ * @param maxContentLength maximum size in bytes of requests/responses.
+ * If 0, a default value is used (32MB).
+ * @param maxChunkSize maximum size in bytes of chunked response messages.
+ * If 0, a default value is used (64KB).
+ * @param sslCtx if non-null, SSL context to use for connections.
+ * @param handshakeTimeoutMs if not zero, timeout to use for SSL handshake
+ * @param name A name to use in logging messages for this client.
+ * @param logger A logger to use for logging messages.
+ * @param maxConnections Maximum size of the connection pool
+ * @param maxPendingConnections The maximum number of pending acquires
+ * for the pool
+ */
+ public HttpClient(String host,
+ int port,
+ int numThreads,
+ int connectionPoolMinSize,
+ int inactivityPeriodSeconds,
+ int maxContentLength,
+ int maxChunkSize,
+ SslContext sslCtx,
+ int handshakeTimeoutMs,
+ String name,
+ Logger logger,
+ int maxConnections,
+ int maxPendingConnections) {
+
+ this(host, port, numThreads, connectionPoolMinSize,
+ inactivityPeriodSeconds, false /* not minimal */,
+ maxContentLength, maxChunkSize, sslCtx, handshakeTimeoutMs, name,
+ logger, maxConnections, maxPendingConnections);
}
/*
@@ -214,7 +273,9 @@ private HttpClient(String host,
SslContext sslCtx,
int handshakeTimeoutMs,
String name,
- Logger logger) {
+ Logger logger,
+ int maxConnections,
+ int maxPendingConnections) {
this.logger = logger;
this.sslCtx = sslCtx;
@@ -257,7 +318,9 @@ private HttpClient(String host,
pool = new ConnectionPool(b, poolHandler, logger,
isMinimalClient,
connectionPoolMinSize,
- inactivityPeriodSeconds);
+ inactivityPeriodSeconds,
+ maxConnections,
+ maxPendingConnections);
/*
* Don't do keepalive if min size is not set. That configuration
@@ -273,11 +336,6 @@ public boolean keepAlive(Channel ch) {
}
});
}
-
- /* TODO: eventually add this to Config? */
- acquireRetryIntervalMs = Integer.getInteger(
- "oracle.nosql.driver.acquire.retryinterval",
- 1000);
}
SslContext getSslContext() {
@@ -366,11 +424,19 @@ public int getFreeChannelCount() {
return pool.getFreeChannels();
}
+ public int getPendingChannelsCount() {
+ return pool.getPendingAcquires();
+ }
+
/* available for testing */
ConnectionPool getConnectionPool() {
return pool;
}
+ public ConnectionPool.PoolMetrics getPoolMetrics() {
+ return pool.getMetrics();
+ }
+
/**
* Cleanly shut down the client.
*/
@@ -388,70 +454,20 @@ public void shutdown() {
syncUninterruptibly();
}
- public Channel getChannel(int timeoutMs)
- throws InterruptedException, ExecutionException, TimeoutException {
-
- long startMs = System.currentTimeMillis();
- long now = startMs;
- int retries = 0;
-
- while (true) {
- long msDiff = now - startMs;
-
- /* retry loop with at most (retryInterval) ms timeouts */
- long thisTimeoutMs = (timeoutMs - msDiff);
- if (thisTimeoutMs <= 0) {
- String msg = "Timed out trying to acquire channel";
- logInfo(logger, "HttpClient " + name + " " + msg);
- throw new TimeoutException(msg);
- }
- if (thisTimeoutMs > acquireRetryIntervalMs) {
- thisTimeoutMs = acquireRetryIntervalMs;
- }
- Future fut = pool.acquire();
- Channel retChan = null;
- try {
- retChan = fut.get(thisTimeoutMs, TimeUnit.MILLISECONDS);
- } catch (TimeoutException e) {
- if (retries == 0) {
- logFine(logger, "Timed out after " +
- (System.currentTimeMillis() - startMs) +
- "ms trying to acquire channel: retrying");
+ private CompletableFuture getChannel() {
+ CompletableFuture acquireFuture = new CompletableFuture<>();
+ pool.acquire().addListener((FutureListener) channelFuture -> {
+ if (channelFuture.isSuccess()) {
+ Channel channel = channelFuture.getNow();
+ if (!acquireFuture.complete(channel)) {
+ /* future already completed release channel back to pool */
+ pool.release(channel);
}
- /* fall through */
+ } else {
+ acquireFuture.completeExceptionally(channelFuture.cause());
}
-
- /*
- * Ensure that the channel is in good shape. retChan is null
- * on a timeout exception from above; that path will retry.
- */
- if (retChan != null) {
- if (fut.isSuccess() && retChan.isActive()) {
- /*
- * Clear out any previous state. The channel should not
- * have any state associated with it, but this code is here
- * just in case it does.
- */
- if (retChan.attr(STATE_KEY).get() != null) {
- if (isFineEnabled(logger)) {
- logFine(logger,
- "HttpClient acquired a channel with " +
- "a still-active state: clearing.");
- }
- retChan.attr(STATE_KEY).set(null);
- }
- return retChan;
- }
- logFine(logger,
- "HttpClient " + name + ", acquired an inactive " +
- "channel, releasing it and retrying, reason: " +
- fut.cause());
- releaseChannel(retChan);
- }
- /* reset "now" and increment retries */
- now = System.currentTimeMillis();
- retries++;
- }
+ });
+ return acquireFuture;
}
public void releaseChannel(Channel channel) {
@@ -466,58 +482,129 @@ public void releaseChannel(Channel channel) {
}
/**
- * Close and remove channel from client pool.
- */
- public void removeChannel(Channel channel) {
- logFine(logger, "closing and removing channel " + channel);
- pool.removeChannel(channel);
- }
-
-
- /**
- * Sends an HttpRequest, setting up the ResponseHandler as the handler to
- * use for the (asynchronous) response.
+ * Sends an HttpRequest to the server.
*
- * @param request the request
- * @param handler the response handler
- * @param channel the Channel to use for the request/response
- *
- * @throws IOException if there is a network problem (bad channel). Such
- * exceptions can be retried.
+ * @param request HttpRequest
+ * @param timeoutMs Time to wait for the response from the server.
+ * Returned future completes with {@link TimeoutException}
+ * in case of timeout
+ * @return {@link CompletableFuture} holding the response from the server.
+ * @apiNote The caller must release the response by calling
+ * {@link FullHttpResponse#release()} or
+ * {@link ReferenceCountUtil#release(Object)}
*/
- public void runRequest(HttpRequest request,
- ResponseHandler handler,
- Channel channel)
+ public CompletableFuture runRequest(HttpRequest request,
+ int timeoutMs) {
+ CompletableFuture responseFuture =
+ new CompletableFuture<>();
+ long deadlineNs = System.nanoTime() +
+ TimeUnit.MILLISECONDS.toNanos(timeoutMs);
- throws IOException {
+ /* Acquire a channel from the pool */
+ CompletableFuture acuireFuture = getChannel();
- /*
- * If the channel goes bad throw IOE to allow the caller to retry
- */
- if (!channel.isActive()) {
- String msg = "HttpClient " + name + ", runRequest, channel " +
- channel + " is not active: ";
- logWarning(logger, msg);
- throw new IOException(msg);
- }
-
- RequestState state = new RequestState(handler);
- channel.attr(STATE_KEY).set(state);
+ /* setup timeout on channel acquisition */
+ acuireFuture.orTimeout(timeoutMs, TimeUnit.MILLISECONDS);
- /*
- * Send the request. If the operation fails set the exception
- * on the ResponseHandler where it will be thrown synchronously to
- * users of that object. operationComplete will likely be called in
- * another thread.
+ /* when acquire future completes exceptionally, release request bytebuf
+ * and complete the response future
*/
- channel.writeAndFlush(request).
- addListener((ChannelFutureListener) future -> {
- if (!future.isSuccess()) {
- /* handleException logs this exception */
- handler.handleException("HttpClient: send failed",
- future.cause());
+ acuireFuture.whenComplete((ch, err) -> {
+ if (err != null) {
+ ReferenceCountUtil.release(request);
+ /* Unwrap to check the real cause */
+ Throwable cause = err instanceof CompletionException ?
+ err.getCause() : err;
+ if (cause instanceof TimeoutException) {
+ final String msg = "Timed out trying to acquire channel";
+ responseFuture.completeExceptionally(
+ new CompletionException(new TimeoutException(msg)));
+ }
+ /* Re-throw original if it wasn't a timeout */
+ responseFuture.completeExceptionally(cause);
+ }
+ });
+
+ /* send request on acquired channel */
+ acuireFuture.thenAccept(channel -> {
+ long remainingTimeoutNs = deadlineNs - System.nanoTime();
+ long remainingTimeoutMs = Math.max(1,
+ TimeUnit.NANOSECONDS.toMillis(remainingTimeoutNs));
+
+ /* Execute the request on the acquired channel */
+ CompletableFuture requestExecutionFuture =
+ runRequest(request, channel, remainingTimeoutMs);
+
+ /* When the request execution future completes (either
+ * successfully or exceptionally),
+ * complete the public responseFuture and ensure the channel
+ * is released back to the pool.
+ */
+ requestExecutionFuture.whenComplete((response, throwable) -> {
+ /* Always release the channel */
+ releaseChannel(channel);
+ if (throwable != null) {
+ responseFuture.completeExceptionally(throwable);
+ } else {
+ responseFuture.complete(response);
}
});
+ });
+ return responseFuture;
+ }
+
+ /**
+ * Sends an HttpRequest to the server on a given netty channel.
+ *
+ * @param request HttpRequest
+ * @param channel Netty channel
+ * @param timeoutMs Time to wait for the response from the server.
+ * Returned future completes with {@link TimeoutException}
+ * in case of timeout
+ * @return {@link CompletableFuture} holding the response from the server.
+ * @apiNote The caller must release the response by calling
+ * {@link FullHttpResponse#release()} or
+ * {@link ReferenceCountUtil#release(Object)}
+ */
+ private CompletableFuture runRequest(HttpRequest request,
+ Channel channel,
+ long timeoutMs) {
+ CompletableFuture
+ responseFuture = new CompletableFuture<>();
+
+ /* Attach the responseFuture to the channel's attribute.
+ * This responseFuture is completed by HttpClientHandler when response
+ * arrives from the server.
+ */
+ channel.attr(STATE_KEY).set(responseFuture);
+
+ /* Add timeout handler to the pipeline.
+ * Timeout handler makes sure that responseFuture completes within
+ * timeoutMs. This also makes sure that response future always
+ * completes even during network disconnection, and the channel is
+ * released back to the pool. The timeout handler is removed in
+ * HttpClientHandler upon response from the server.
+ */
+ channel.pipeline().addFirst(
+ new ReadTimeoutHandler(timeoutMs, TimeUnit.MILLISECONDS));
+
+ /* Write the request to the channel and flush it */
+ channel.writeAndFlush(request)
+ .addListener((ChannelFutureListener) writeFuture -> {
+ if (!writeFuture.isSuccess()) {
+ /* If write fails, remove channel attr and timeout handler and
+ * complete the response future exceptionally.
+ * When this happens, HttpClientHandler won't be called as
+ * request is not sent to the server.
+ */
+ Throwable err = writeFuture.cause();
+ logFine(logger, "HttpClient: send failed, cause:" + err);
+ channel.attr(STATE_KEY).set(null);
+ channel.pipeline().remove(ReadTimeoutHandler.class);
+ responseFuture.completeExceptionally(err);
+ }
+ });
+ return responseFuture;
}
/**
@@ -525,8 +612,7 @@ public void runRequest(HttpRequest request,
*/
boolean doKeepAlive(Channel ch) {
final int keepAliveTimeout = 3000; /* ms */
- ResponseHandler responseHandler =
- new ResponseHandler(this, logger, ch);
+ FullHttpResponse response = null;
try {
final HttpRequest request =
new DefaultFullHttpRequest(HTTP_1_1, HEAD, "/");
@@ -536,19 +622,14 @@ boolean doKeepAlive(Channel ch) {
* other server may reject them and close the connection
*/
request.headers().add(HOST, host);
- runRequest(request, responseHandler, ch);
- boolean isTimeout = responseHandler.await(keepAliveTimeout);
- if (isTimeout) {
- logFine(logger,
- "Timeout on keepalive HEAD request on channel " + ch);
- return false;
- }
+ response = ConcurrentUtil.awaitFuture(
+ runRequest(request, ch, keepAliveTimeout));
/*
* LBaaS will return a non-200 status but that is expected as the
* path "/" does not map to the service. This is ok because all that
* matters is that the connection remain alive.
*/
- String conn = responseHandler.getHeaders().get(CONNECTION);
+ String conn = response.headers().get(CONNECTION);
if (conn == null || !"keep-alive".equalsIgnoreCase(conn)) {
logFine(logger,
"Keepalive HEAD request did not return keep-alive " +
@@ -556,10 +637,14 @@ boolean doKeepAlive(Channel ch) {
}
return true;
- } catch (Throwable t) {
- logFine(logger, "Exception sending HTTP HEAD: " + t);
+ } catch (Throwable t) {
+ String msg = String.format(
+ "Exception sending keepalive on [channel:%s] error:%s",
+ ch.id(), t.getMessage());
+ logFine(logger, msg);
} finally {
- responseHandler.releaseResponse();
+ /* Release response */
+ ReferenceCountUtil.release(response);
}
/* something went wrong, caller is responsible for disposition */
return false;
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientChannelPoolHandler.java b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientChannelPoolHandler.java
index d58ee102..cb55fb1e 100644
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientChannelPoolHandler.java
+++ b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientChannelPoolHandler.java
@@ -156,7 +156,10 @@ public void channelInactive(ChannelHandlerContext ctx) {
logFine(client.getLogger(),
"HttpClient " + client.getName() +
", channel " + ctx.channel() + " inactive");
- client.removeChannel(ctx.channel());
+ /* Removing a channel from the pool is handled internally.
+ * No need for the below call.
+ */
+ //client.removeChannel(ctx.channel());
ctx.fireChannelInactive();
}
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java
index 649d851a..a41c8087 100644
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java
+++ b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java
@@ -7,18 +7,23 @@
package oracle.nosql.driver.httpclient;
+import static oracle.nosql.driver.httpclient.HttpClient.STATE_KEY;
import static oracle.nosql.driver.util.HttpConstants.REQUEST_ID_HEADER;
import static oracle.nosql.driver.util.LogUtil.isFineEnabled;
import static oracle.nosql.driver.util.LogUtil.logFine;
import static oracle.nosql.driver.util.LogUtil.logWarning;
import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeoutException;
import java.util.logging.Logger;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.codec.http.FullHttpResponse;
+import io.netty.handler.timeout.ReadTimeoutException;
+import io.netty.handler.timeout.ReadTimeoutHandler;
/**
*
@@ -34,8 +39,8 @@ public class HttpClientHandler extends ChannelInboundHandlerAdapter {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
- final RequestState state =
- ctx.channel().attr(HttpClient.STATE_KEY).get();
+ final CompletableFuture responseFuture =
+ ctx.channel().attr(STATE_KEY).getAndSet(null);
/*
* TODO/think about:
@@ -44,10 +49,15 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) {
* o redirects
*/
+ /* Remove timeout handler upon response arrival */
+ if (ctx.pipeline().get(ReadTimeoutHandler.class) != null) {
+ ctx.pipeline().remove(ReadTimeoutHandler.class);
+ }
+
if (msg instanceof FullHttpResponse) {
FullHttpResponse fhr = (FullHttpResponse) msg;
- if (state == null) {
+ if (responseFuture == null) {
/*
* This message came in after the client was done processing
* a request in a different thread.
@@ -65,14 +75,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) {
fhr.release();
return;
}
-
- state.setResponse(fhr);
-
- /*
- * Notify the response handler
- */
- state.getHandler().receive(state);
-
+ responseFuture.complete(fhr);
return;
}
logWarning(logger,
@@ -82,24 +85,31 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
- final RequestState state =
- ctx.channel().attr(HttpClient.STATE_KEY).get();
- if (state != null) {
+ final CompletableFuture responseFuture =
+ ctx.channel().attr(STATE_KEY).getAndSet(null);
+ if (responseFuture != null) {
/* handleException logs */
- state.getHandler().handleException("HttpClientHandler read failed",
- cause);
+ logFine(logger, "HttpClientHandler read failed, cause: " + cause);
+ Throwable err = cause;
+ if (err instanceof ReadTimeoutException) {
+ err = new TimeoutException("Request timed out while waiting "
+ + "for the response from the server");
+ }
+ responseFuture.completeExceptionally(err);
}
ctx.close();
}
@Override
- public void channelInactive(ChannelHandlerContext ctx) throws Exception {
- final RequestState state =
- ctx.channel().attr(HttpClient.STATE_KEY).get();
+ public void channelInactive(ChannelHandlerContext ctx) {
+ final CompletableFuture responseFuture =
+ ctx.channel().attr(STATE_KEY).getAndSet(null);
/* handleException logs */
- if (state != null) {
+ if (responseFuture != null && !responseFuture.isDone()) {
String msg = "Channel is inactive: " + ctx.channel();
- state.getHandler().handleException(msg, new IOException(msg));
+ Throwable cause = new IOException(msg);
+ logFine(logger, msg + ", cause: " + cause);
+ responseFuture.completeExceptionally(cause);
}
/* should the context be closed? */
ctx.close();
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/RequestState.java b/driver/src/main/java/oracle/nosql/driver/httpclient/RequestState.java
deleted file mode 100644
index 185399d5..00000000
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/RequestState.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*-
- * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
- *
- * Licensed under the Universal Permissive License v 1.0 as shown at
- * https://oss.oracle.com/licenses/upl/
- */
-
-package oracle.nosql.driver.httpclient;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.handler.codec.http.FullHttpResponse;
-import io.netty.handler.codec.http.HttpHeaders;
-import io.netty.handler.codec.http.HttpResponseStatus;
-
-/**
- * An instance of this class is created when a request is sent and is used to
- * collect response state. The instance is attached to a Channel's attribute
- * map, which means that this will work for HTTP/1.1 where channels are not
- * multiplexed, but will need to change for HTTP/2.
- *
- * This class is not thread-safe but is used in a safe, single-threaded manner
- * mapped 1:1 with a channel associated with a single HTTP request/response
- * cycle.
- *
- * At this time this object does not aggregate chunks of content into a single
- * buffer. It is expected that this is done using an HttpContentAggregator in
- * the pipeline and is only called with a FullHttpResponse. If aggregation is
- * desired here it can be added using a CompositeByteBuf and calls to add
- * content incrementally.
- */
-class RequestState {
-
- private final ResponseHandler handler;
- private FullHttpResponse response;
-
- RequestState(ResponseHandler handler) {
- this.handler = handler;
- }
-
- ResponseHandler getHandler() {
- return handler;
- }
-
- HttpResponseStatus getStatus() {
- if (response != null) {
- return response.status();
- }
- return null;
- }
-
- HttpHeaders getHeaders() {
- if (response != null) {
- return response.headers();
- }
- return null;
- }
-
- int getContentSize() {
- ByteBuf buf = getBuf();
- if (buf != null) {
- return buf.readableBytes();
- }
- return -1;
- }
-
- ByteBuf getBuf() {
- if (response != null) {
- return response.content();
- }
- return null;
- }
-
- void setResponse(FullHttpResponse response) {
- this.response = response;
- }
-
- FullHttpResponse getResponse() {
- return response;
- }
-}
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/ResponseHandler.java b/driver/src/main/java/oracle/nosql/driver/httpclient/ResponseHandler.java
deleted file mode 100644
index 12b38216..00000000
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/ResponseHandler.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*-
- * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
- *
- * Licensed under the Universal Permissive License v 1.0 as shown at
- * https://oss.oracle.com/licenses/upl/
- */
-
-package oracle.nosql.driver.httpclient;
-
-import static oracle.nosql.driver.util.LogUtil.logFine;
-import static oracle.nosql.driver.util.HttpConstants.REQUEST_ID_HEADER;
-
-import java.io.Closeable;
-import java.net.ProtocolException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.logging.Logger;
-import javax.net.ssl.SSLException;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
-import io.netty.handler.codec.http.HttpHeaders;
-import io.netty.handler.codec.http.HttpResponseStatus;
-import io.netty.util.ReferenceCountUtil;
-
-/**
- * This class allows for asynchronous or synchronous request operation.
- * An instance is passed when sending a request. The caller can handle the
- * response asynchronously by overriding the responseReceived() method, or
- * synchronously by using the default implementation and waiting for the
- * response.
- *
- * Instances of this class must be closed using close().
- *
- * TODO: examples of both sync and async usage
- */
-public class ResponseHandler implements Closeable {
-
- private HttpResponseStatus status;
- private HttpHeaders headers;
- private ByteBuf content;
- private RequestState state;
- private final HttpClient httpClient;
- private final Channel channel;
- private final String requestId;
-
- /* logger may be null */
- private final Logger logger;
-
- /* this is set if there is an exception in send or receive */
- private Throwable cause;
-
- /* OK to retry: affects logic when there are specific protocol errors */
- private final boolean allowRetry;
-
- /*
- * the latch counts down when the response is received. It's only needed
- * in synchronous mode
- */
- private final CountDownLatch latch;
-
- public ResponseHandler(final HttpClient httpClient,
- final Logger logger,
- final Channel channel) {
- this(httpClient, logger, channel, null, false);
- }
-
- public ResponseHandler(final HttpClient httpClient,
- final Logger logger,
- final Channel channel,
- final String requestId,
- boolean allowRetry) {
- this.httpClient = httpClient;
- this.logger = logger;
- this.channel = channel;
- this.requestId = requestId;
- this.allowRetry = allowRetry;
-
- /*
- * TODO: this won't be needed for an async client
- */
- latch = new CountDownLatch(1);
- }
-
- /**
- * An exception occurred. Set cause and count down the latch to wake
- * up any waiters. This is synchronized because the call may come from
- * a different thread.
- */
- public void handleException(String msg, Throwable th) {
-
- synchronized(this) {
- this.cause = th;
- if (th instanceof SSLException) {
- /* disconnect channel to re-create channel and engine */
- channel.disconnect();
- }
- latch.countDown();
- }
- logFine(logger, msg + ", cause: " + th);
- }
-
- /**
- * The full response has been received. Users can override this method
- * to do full async operation. Synchronous users will wait for the latch
- * and get the response objects from this class.
- */
- public void responseReceived(HttpResponseStatus rStatus,
- HttpHeaders rHeaders,
- ByteBuf rContent) {
- status = rStatus;
- headers = rHeaders;
- content = rContent;
- }
-
- /**
- * Wait for the latch to count down. This can happen on a successful
- * receive operation or an exception that occurs during send or receive.
- */
- public boolean await(int milliSeconds) throws Throwable {
-
- boolean ret = !latch.await(milliSeconds, TimeUnit.MILLISECONDS);
-
- synchronized(this) {
- if (cause != null) {
- throw cause;
- }
- }
- return ret;
- }
-
- /**
- * Gets the status, or null if the operation has not yet completed
- */
- public HttpResponseStatus getStatus() {
- return status;
- }
-
- /**
- * Gets the headers, or null if the operation has not yet completed
- */
- public HttpHeaders getHeaders() {
- return headers;
- }
-
- /**
- * Gets the content, or null if the operation has not yet completed
- */
- public ByteBuf getContent() {
- return content;
- }
-
- /**
- * Gets the Throwable if an exception has occurred during send or
- * receive
- */
- public Throwable getCause() {
- return cause;
- }
-
- /**
- * Internal close that does not release the channel. This is used
- * by keepalive HEAD requests
- */
- void releaseResponse() {
- if (state != null) {
- if (state.getResponse() != null) {
- ReferenceCountUtil.release(state.getResponse());
- }
- }
- }
-
- @Override
- public void close() {
- if (channel != null) {
- httpClient.releaseChannel(channel);
- }
-
- /*
- * Release the response
- */
- releaseResponse();
- }
-
- /*
- * TODO: error responses with and without status
- */
-
- /*
- * Internal receive that calls the public method and counts down the latch.
- * Use try/finally in case there is a throw in the receive.
- */
- void receive(RequestState requestState) {
- /*
- * Check the request id in response's header, discards this response
- * if it is not for the request.
- */
- if (requestId != null) {
- String resReqId = requestState.getHeaders().get(REQUEST_ID_HEADER);
- if (resReqId == null || !resReqId.equals(requestId)) {
- logFine(logger,
- "Expected response for request " + requestId +
- ", but got response for request " + resReqId +
- ": discarding response");
- if (resReqId == null) {
- logFine(logger, "Headers for discarded response: " +
- requestState.getHeaders());
- if (this.allowRetry) {
- this.cause = new ProtocolException(
- "Received invalid response with no requestId");
- latch.countDown();
- }
- }
- if (requestState.getResponse() != null) {
- ReferenceCountUtil.release(requestState.getResponse());
- }
- return;
- }
- }
-
- /*
- * We got a valid message: don't accept any more for this handler.
- * This logic may change if we enable full async and allow multiple
- * messages to be processed by the same channel for the same client.
- * This clears the response handler from this channel so that any
- * additional messages on this channel will be properly discarded.
- */
- channel.attr(HttpClient.STATE_KEY).set(null);
-
- state = requestState;
- try {
- responseReceived(state.getStatus(),
- state.getHeaders(),
- state.getBuf());
- } finally {
- latch.countDown();
- }
- }
-}
diff --git a/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java b/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java
index bc2f76ec..81817d36 100644
--- a/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java
+++ b/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java
@@ -26,6 +26,8 @@
import java.util.Date;
import java.util.Timer;
import java.util.TimerTask;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -38,6 +40,7 @@
import oracle.nosql.driver.ops.Request;
import io.netty.handler.codec.http.HttpHeaders;
+import oracle.nosql.driver.util.ConcurrentUtil;
/**
* Cloud service only.
@@ -159,6 +162,7 @@ public class SignatureProvider
private String serviceHost;
private Region region;
private Logger logger;
+ private final ReentrantLock lock = new ReentrantLock();
/**
* A callback interface called when the signature is refreshed. This
@@ -858,16 +862,24 @@ public SignatureProvider(AuthenticationProfileProvider profileProvider,
@Override
public String getAuthorizationString(Request request) {
+ return ConcurrentUtil.awaitFuture(getAuthorizationStringAsync(request));
+ }
+
+ @Override
+ public CompletableFuture getAuthorizationStringAsync(
+ Request request) {
if (serviceHost == null) {
- throw new IllegalArgumentException(
- "Unable to find service host, use setServiceHost " +
- "to load from NoSQLHandleConfig");
- }
- SignatureDetails sigDetails = getSignatureDetails(request);
- if (sigDetails != null) {
- return sigDetails.getSignatureHeader();
+ CompletableFuture.failedFuture(new IllegalArgumentException(
+ "Unable to find service host, use setServiceHost " +
+ "to load from NoSQLHandleConfig"));
}
- return null;
+
+ return getSignatureDetails(request).thenApply(sigDetails -> {
+ if (sigDetails != null) {
+ return sigDetails.getSignatureHeader();
+ }
+ return null;
+ });
}
@Override
@@ -876,44 +888,62 @@ public void setRequiredHeaders(String authString,
HttpHeaders headers,
byte[] content) {
- SignatureDetails sigDetails = (content != null) ?
- getSignatureWithContent(request, headers, content):
- getSignatureDetails(request);
- if (sigDetails == null) {
- return;
- }
- headers.add(AUTHORIZATION, sigDetails.getSignatureHeader());
- headers.add(DATE, sigDetails.getDate());
+ ConcurrentUtil.awaitFuture(
+ setRequiredHeadersAsync(authString, request, headers, content));
+ }
- final String token = getDelegationToken(request);
- if (token != null) {
- headers.add(OBO_TOKEN_HEADER, token);
- }
- String compartment = request.getCompartment();
- if (compartment == null) {
- /*
- * If request doesn't has compartment id, set the tenant id as the
- * default compartment, which is the root compartment in IAM if
- * using user principal. If using an instance principal this
- * value is null.
- */
- compartment = getTenantOCID();
- }
+ @Override
+ public CompletableFuture setRequiredHeadersAsync(String authString,
+ Request request,
+ HttpHeaders headers,
+ byte[] content) {
- if (compartment != null) {
- headers.add(REQUEST_COMPARTMENT_ID, compartment);
+ CompletableFuture sigDetailsFuture;
+ if (content != null) {
+ sigDetailsFuture = getSignatureWithContent(
+ request, headers, content);
} else {
- throw new IllegalArgumentException(
- "Compartment is null. When authenticating using an " +
- "Instance Principal the compartment for the operation " +
- "must be specified.");
+ sigDetailsFuture = getSignatureDetails(request);
}
+
+ return sigDetailsFuture.thenAccept(sigDetails -> {
+ if (sigDetails != null) {
+ headers.add(AUTHORIZATION, sigDetails.getSignatureHeader());
+ headers.add(DATE, sigDetails.getDate());
+
+ final String token = getDelegationToken(request);
+ if (token != null) {
+ headers.add(OBO_TOKEN_HEADER, token);
+ }
+ String compartment = request.getCompartment();
+ if (compartment == null) {
+ /*
+ * If request doesn't has compartment id, set the tenant id
+ * as the default compartment, which is the root compartment
+ * in IAM if using user principal. If using an instance
+ * principal this value is null.
+ */
+ compartment = getTenantOCID();
+ }
+
+ if (compartment != null) {
+ headers.add(REQUEST_COMPARTMENT_ID, compartment);
+ } else {
+ throw new IllegalArgumentException(
+ "Compartment is null. When authenticating using an " +
+ "Instance Principal the compartment for the operation " +
+ "must be specified.");
+ }
+ }
+ });
}
@Override
- public synchronized void flushCache() {
- currentSigDetails = null;
- refreshSigDetails = null;
+ public void flushCache() {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
+ currentSigDetails = null;
+ refreshSigDetails = null;
+ });
}
/**
@@ -978,7 +1008,10 @@ public SignatureProvider prepare(NoSQLHandleConfig config) {
}
/* creates and caches a signature as warm-up */
- getSignatureDetailsForCache(false);
+ getSignatureDetailsInternal(false, /* isRefresh */
+ null, /* request */
+ null, /* headers */
+ null /* content */);
return this;
}
@@ -1040,115 +1073,129 @@ private void logMessage(Level level, String msg) {
}
}
- private SignatureDetails getSignatureDetails(Request request) {
+ private CompletableFuture
+ getSignatureDetails(Request request) {
SignatureDetails sigDetails =
(request.getIsRefresh() ? refreshSigDetails : currentSigDetails);
if (sigDetails != null) {
- return sigDetails;
+ return CompletableFuture.completedFuture(sigDetails);
}
if (request.getIsRefresh()) {
/* try current details before failing */
sigDetails = currentSigDetails;
if (sigDetails != null) {
- return sigDetails;
+ return CompletableFuture.completedFuture(sigDetails);
}
}
return getSignatureDetailsForCache(false);
}
- private SignatureDetails getSignatureWithContent(Request request,
- HttpHeaders headers,
- byte[] content) {
- return getSignatureDetailsInternal(false, request, headers, content);
+ private CompletableFuture
+ getSignatureWithContent(Request request,
+ HttpHeaders headers,
+ byte[] content) {
+ /* TODO: supplyAsync runs in JVM common fork-join pool.
+ * Do we need a separate executor?
+ */
+ return CompletableFuture.supplyAsync(() ->
+ getSignatureDetailsInternal(false, request, headers, content));
}
- synchronized SignatureDetails
- getSignatureDetailsForCache(boolean isRefresh) {
- return getSignatureDetailsInternal(isRefresh,
- null /* request */,
- null /* headers */,
- null /* content */);
+ private CompletableFuture
+ getSignatureDetailsForCache(boolean isRefresh) {
+ /* TODO: supplyAsync runs in JVM common fork-join pool.
+ * Do we need a separate executor?
+ */
+ return CompletableFuture.supplyAsync(() ->
+ getSignatureDetailsInternal(isRefresh,
+ null /* request */,
+ null /* headers */,
+ null /* content */)
+ );
}
/* visible for testing */
- synchronized SignatureDetails
+ SignatureDetails
getSignatureDetailsInternal(boolean isRefresh,
Request request,
HttpHeaders headers,
byte[] content) {
- /*
- * add one minute to the current time, so that any caching is
- * effective over a more valid time period.
- */
- long nowPlus = System.currentTimeMillis() + 60_000L;
- String date = createFormatter().format(new Date(nowPlus));
- String keyId = provider.getKeyId();
-
- /*
- * Security token based providers may refresh the security token
- * and associated private key in above getKeyId() method, reload
- * private key to PrivateKeyProvider to avoid a mismatch, which
- * will create an invalid signature, cause authentication error.
- */
- if (provider instanceof SecurityTokenBasedProvider) {
- privateKeyProvider.reload(provider.getPrivateKey(),
- provider.getPassphraseCharacters());
- }
- String signature;
- try {
- signature = sign(signingContent(date, request, headers, content),
- privateKeyProvider.getKey());
- } catch (Exception e) {
- logMessage(Level.SEVERE, "Error signing request " + e.getMessage());
- return null;
- }
+ return ConcurrentUtil.synchronizedCall(lock, () -> {
+ /*
+ * add one minute to the current time, so that any caching is
+ * effective over a more valid time period.
+ */
+ long nowPlus = System.currentTimeMillis() + 60_000L;
+ String date = createFormatter().format(new Date(nowPlus));
+ String keyId = provider.getKeyId();
- String token = getDelegationToken(request);
- String signingHeader;
- if (content != null) {
- signingHeader = (token == null)
- ? SIGNING_HEADERS_WITH_CONTENT :
- SIGNING_HEADERS_WITH_CONTENT_OBO;
- } else {
- signingHeader = (token == null)
- ? SIGNING_HEADERS : SIGNING_HEADERS_WITH_OBO;
- }
+ /*
+ * Security token based providers may refresh the security token
+ * and associated private key in above getKeyId() method, reload
+ * private key to PrivateKeyProvider to avoid a mismatch, which
+ * will create an invalid signature, cause authentication error.
+ */
+ if (provider instanceof SecurityTokenBasedProvider) {
+ privateKeyProvider.reload(provider.getPrivateKey(),
+ provider.getPassphraseCharacters());
+ }
+ String signature;
+ try {
+ signature = sign(signingContent(date, request, headers, content),
+ privateKeyProvider.getKey());
+ } catch (Exception e) {
+ logMessage(Level.SEVERE, "Error signing request " +
+ e.getMessage());
+ return null;
+ }
- String sigHeader = String.format(SIGNATURE_HEADER_FORMAT,
- signingHeader,
- keyId,
- RSA,
- signature,
- SINGATURE_VERSION);
- SignatureDetails sigDetails = new SignatureDetails(sigHeader, date);
+ String token = getDelegationToken(request);
+ String signingHeader;
+ if (content != null) {
+ signingHeader = (token == null)
+ ? SIGNING_HEADERS_WITH_CONTENT :
+ SIGNING_HEADERS_WITH_CONTENT_OBO;
+ } else {
+ signingHeader = (token == null)
+ ? SIGNING_HEADERS : SIGNING_HEADERS_WITH_OBO;
+ }
- /*
- * Don't cache the signature generated with content, which
- * needs to be associated with its request
- */
- if (content != null) {
- return sigDetails;
- }
+ String sigHeader = String.format(SIGNATURE_HEADER_FORMAT,
+ signingHeader,
+ keyId,
+ RSA,
+ signature,
+ SINGATURE_VERSION);
+ SignatureDetails sigDetails = new SignatureDetails(sigHeader, date);
- if (!isRefresh) {
- /*
- * if this is not a refresh, use the normal key and schedule a
- * refresh
- */
- currentSigDetails = sigDetails;
- scheduleRefresh();
- } else {
/*
- * If this is a refresh put the object in a temporary key.
- * The caller (the refresh task) will:
- * 1. perform callbacks if needed and when done,
- * 2. move the object to the normal key and schedule a refresh
+ * Don't cache the signature generated with content, which
+ * needs to be associated with its request
*/
- refreshSigDetails = sigDetails;
- }
- return sigDetails;
+ if (content != null) {
+ return sigDetails;
+ }
+
+ if (!isRefresh) {
+ /*
+ * if this is not a refresh, use the normal key and schedule a
+ * refresh
+ */
+ currentSigDetails = sigDetails;
+ scheduleRefresh();
+ } else {
+ /*
+ * If this is a refresh put the object in a temporary key.
+ * The caller (the refresh task) will:
+ * 1. perform callbacks if needed and when done,
+ * 2. move the object to the normal key and schedule a refresh
+ */
+ refreshSigDetails = sigDetails;
+ }
+ return sigDetails;
+ });
}
/*
@@ -1164,11 +1211,13 @@ private String getDelegationToken(Request req) {
req.getOboToken() : delegationToken;
}
- private synchronized void setRefreshKey() {
- if (refreshSigDetails != null) {
- currentSigDetails = refreshSigDetails;
- refreshSigDetails = null;
- }
+ private void setRefreshKey() {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
+ if (refreshSigDetails != null) {
+ currentSigDetails = refreshSigDetails;
+ refreshSigDetails = null;
+ }
+ });
}
private String signingContent(String date,
@@ -1264,7 +1313,10 @@ public void run() {
Exception lastException;
do {
try {
- getSignatureDetailsForCache(true);
+ getSignatureDetailsInternal(true, /* isRefresh */
+ null /* request */,
+ null /* headers */,
+ null /* content */);
handleRefreshCallback(refreshAheadMs);
return;
} catch (SecurityInfoNotReadyException se) {
diff --git a/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java b/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java
index e3f6d0f3..0571071e 100644
--- a/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java
+++ b/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java
@@ -15,7 +15,9 @@
import java.util.Base64;
import java.util.Timer;
import java.util.TimerTask;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Logger;
import oracle.nosql.driver.AuthorizationProvider;
@@ -24,6 +26,7 @@
import oracle.nosql.driver.NoSQLHandleConfig;
import oracle.nosql.driver.httpclient.HttpClient;
import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.util.ConcurrentUtil;
import oracle.nosql.driver.util.HttpRequestUtil;
import oracle.nosql.driver.util.HttpRequestUtil.HttpResponse;
import oracle.nosql.driver.values.JsonUtils;
@@ -98,7 +101,7 @@ public class StoreAccessTokenProvider implements AuthorizationProvider {
/*
* Login token expiration time.
*/
- private long expirationTime;
+ private volatile long expirationTime;
/*
* A timer task used to periodically renew the login token.
@@ -153,7 +156,7 @@ public class StoreAccessTokenProvider implements AuthorizationProvider {
/*
* Whether this provider is closed
*/
- private boolean isClosed = false;
+ private volatile boolean isClosed = false;
/*
* SslContext used by http client
@@ -170,6 +173,7 @@ public class StoreAccessTokenProvider implements AuthorizationProvider {
*/
public static boolean disableSSLHook;
+ private final ReentrantLock lock = new ReentrantLock();
/**
* This method is used for access to a store without security enabled.
@@ -223,8 +227,9 @@ public StoreAccessTokenProvider(String userName,
*
* Bootstrap login using the provided credentials
*/
- public synchronized void bootstrapLogin(Request request) {
+ public void bootstrapLogin(Request request) {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
/* re-check the authString in case of a race */
if (!isSecure || isClosed || authString.get() != null) {
return;
@@ -277,7 +282,7 @@ public synchronized void bootstrapLogin(Request request) {
throw iae;
} catch (Exception e) {
throw new NoSQLException("Bootstrap login fail", e);
- }
+ }});
}
/**
@@ -285,26 +290,33 @@ public synchronized void bootstrapLogin(Request request) {
*/
@Override
public String getAuthorizationString(Request request) {
+ return ConcurrentUtil.awaitFuture(getAuthorizationStringAsync(request));
+ }
+
+ /**
+ * @hidden
+ */
+ @Override
+ public CompletableFuture
+ getAuthorizationStringAsync(Request request) {
- if (!isSecure) {
- return null;
+ if (!isSecure || isClosed) {
+ return CompletableFuture.completedFuture(null);
}
- /*
- * Already close
- */
- if (isClosed) {
- return null;
+ String token = authString.get();
+ if (token != null) {
+ return CompletableFuture.completedFuture(token);
}
- /*
- * If there is no cached auth string, re-authentication to retrieve
- * the login token and generate the auth string.
+ /* Run bootstrap login asynchronously, reusing existing sync logic. */
+ /* TODO: supplyAsync runs in JVM common fork-join pool.
+ * Do we need a separate executor?
*/
- if (authString.get() == null) {
+ return CompletableFuture.supplyAsync(() -> {
bootstrapLogin(request);
- }
- return authString.get();
+ return authString.get();
+ });
}
/**
@@ -319,13 +331,29 @@ public void validateAuthString(String input) {
}
}
+ @Override
+ public void flushCache() {
+ ConcurrentUtil.synchronizedCall(lock,
+ () -> {
+ if (!isSecure || isClosed) {
+ return;
+ }
+ authString.set(null);
+ expirationTime = 0;
+ if (timer != null) {
+ timer.cancel();
+ timer = null;
+ }
+ });
+ }
+
/**
* Closes the provider, releasing resources such as a stored login
* token.
*/
@Override
- public synchronized void close() {
-
+ public void close() {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
/*
* Don't do anything for non-secure case
*/
@@ -336,20 +364,22 @@ public synchronized void close() {
/*
* Send request for logout
*/
- try {
- final HttpResponse response =
- sendRequest(authString.get(), LOGOUT_SERVICE, 0);
- if (response.getStatusCode() != HttpResponseStatus.OK.code()) {
+ if (authString.get() != null) {
+ try {
+ final HttpResponse response =
+ sendRequest(authString.get(), LOGOUT_SERVICE, 0);
+ if (response.getStatusCode() != HttpResponseStatus.OK.code()) {
+ if (logger != null) {
+ logger.info("Failed to logout user " + userName +
+ ": " + response.getOutput());
+ }
+ }
+ } catch (Exception e) {
if (logger != null) {
logger.info("Failed to logout user " + userName +
- ": " + response.getOutput());
+ ": " + e);
}
}
- } catch (Exception e) {
- if (logger != null) {
- logger.info("Failed to logout user " + userName +
- ": " + e);
- }
}
/*
@@ -363,6 +393,7 @@ public synchronized void close() {
timer.cancel();
timer = null;
}
+ });
}
/**
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/QueryPaginatorResult.java b/driver/src/main/java/oracle/nosql/driver/ops/QueryPaginatorResult.java
new file mode 100644
index 00000000..bd09d11e
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/ops/QueryPaginatorResult.java
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.ops;
+
+import oracle.nosql.driver.NoSQLHandleAsync;
+import oracle.nosql.driver.values.MapValue;
+
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.Flow;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Result for {@link oracle.nosql.driver.NoSQLHandleAsync#queryPaginator}.
+ *
+ * Pagination is supported through the {@code Flow.Publisher>}.
+ * Users can subscribe to the publisher return by
+ * {@link QueryPaginatorResult#getResults()} to consume the results of the query
+ * operation.
+ *
+ * Note: The read/write KB/Units, rate limit delay and retry stats are summed up
+ * from the beginning of the subscription.
+ */
+public class QueryPaginatorResult extends Result {
+ private final QueryPublisher publisher;
+ final QueryRequest queryRequest;
+ final NoSQLHandleAsync handle;
+
+ final AtomicInteger readKB = new AtomicInteger();
+ final AtomicInteger readUnits = new AtomicInteger();
+ final AtomicInteger writeKB = new AtomicInteger();
+ final AtomicInteger writeUnits = new AtomicInteger();
+
+ public QueryPaginatorResult(QueryRequest queryRequest,
+ NoSQLHandleAsync handle) {
+ Objects.requireNonNull(queryRequest, "queryRequest should not be null");
+ Objects.requireNonNull(handle, "NoSQL handle should not be null");
+ if (queryRequest.getContKey() != null) {
+ throw new IllegalArgumentException(
+ "A new QueryRequest is required for a QueryIterableResult.");
+ }
+ this.queryRequest = queryRequest;
+ this.handle = handle;
+ publisher = new QueryPublisher(this);
+ }
+
+ public Flow.Publisher> getResults() {
+ return publisher;
+ }
+
+ public int getReadKB() {
+ return readKB.get();
+ }
+
+ public int getWriteKB() {
+ return writeKB.get();
+ }
+
+ public int getReadUnits() {
+ return readUnits.get();
+ }
+
+ public int getWriteUnits() {
+ return writeUnits.get();
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/QueryPublisher.java b/driver/src/main/java/oracle/nosql/driver/ops/QueryPublisher.java
new file mode 100644
index 00000000..d998253b
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/ops/QueryPublisher.java
@@ -0,0 +1,90 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.ops;
+
+import oracle.nosql.driver.values.MapValue;
+
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.Flow;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * A {@link Flow.Publisher} that wraps a
+ * {@link oracle.nosql.driver.NoSQLHandleAsync#query(QueryRequest)} to
+ * iteratively request pages and stream all items from a paginated query.
+ *
+ *
Key properties:
+ *
+ *
Single subscription
+ *
Backpressure-aware: items are emitted only up to the downstream demand.
+ *
At most one remote call in-flight at a time.
+ *
Buffers up to a single page beyond current demand.
+ *
Terminates with {@code onComplete()} after all pages are consumed and
+ * the buffer is drained, or with {@code onError(Throwable)} if the remote
+ * call fails.
This publisher supports concurrent calls to
+ * {@link Flow.Subscription#request(long)} and
+ * {@link Flow.Subscription#cancel()}.
+ *
Signals to the subscriber are serialized
+ * (no concurrent {@code onNext} calls).
+ *
+ * @implSpec The publisher:
+ *
+ *
Starts fetching only when there is outstanding demand and no buffered items.
+ *
Never emits more than requested items; any overage from a page is buffered.
+ *
Checks completion both while emitting and after emission even if demand becomes zero,
+ * ensuring {@code onComplete()} can be delivered without additional demand.
+ *
Attempts to cancel any in-flight future on {@code cancel()}.
+ *
+ */
+final class QueryPublisher implements Flow.Publisher> {
+
+ final QueryPaginatorResult queryPaginatorResult;
+ private final AtomicBoolean subscribed = new AtomicBoolean(false);
+
+ public QueryPublisher(QueryPaginatorResult queryPaginatorResult) {
+ this.queryPaginatorResult = queryPaginatorResult;
+ }
+
+ @Override
+ public void subscribe(Flow.Subscriber super List> subscriber) {
+ Objects.requireNonNull(subscriber, "subscriber should not be null");
+ /* only allow one subscriber */
+ if (!subscribed.compareAndSet(false, true)) {
+ subscriber.onSubscribe(new Flow.Subscription() {
+ @Override
+ public void request(long n) {
+ }
+ @Override
+ public void cancel() {
+ }
+ });
+ subscriber.onError(new IllegalStateException("already subscribed"));
+ return;
+ }
+
+ QuerySubscription subscription =
+ new QuerySubscription(queryPaginatorResult, subscriber);
+ try {
+ subscriber.onSubscribe(subscription);
+ } catch (Throwable t) {
+ IllegalStateException err =
+ new IllegalStateException(subscriber +
+ " violated the Reactive Streams rule 2.13 by throwing an" +
+ " exception from onSubscribe.", t);
+ subscription.signalError(err);
+ }
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/QuerySubscription.java b/driver/src/main/java/oracle/nosql/driver/ops/QuerySubscription.java
new file mode 100644
index 00000000..23dd031c
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/ops/QuerySubscription.java
@@ -0,0 +1,353 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.ops;
+
+import oracle.nosql.driver.NoSQLHandleAsync;
+import oracle.nosql.driver.values.MapValue;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Flow;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Subscription that manages backpressure, buffering, and page fetching for a
+ * single subscriber.
+ *
All subscriber signals are serialized. At most one page-fetch is in flight
+ * at any time.
+ */
+class QuerySubscription implements Flow.Subscription {
+ private static final Logger logger =
+ Logger.getLogger(QuerySubscription.class.getName());
+ private final QueryPaginatorResult queryPaginatorResult;
+ private final QueryRequest queryRequest;
+ private final NoSQLHandleAsync handle;
+ /* Backpressure and state */
+ private final AtomicLong demand;
+ private final AtomicInteger wip;
+ private final AtomicBoolean started;
+ private final Flow.Subscriber super List> subscriber;
+ private volatile boolean cancelled;
+
+ /* Buffer stores at most a single page beyond demand */
+ private final Queue> queue;
+ private final int limit;
+ private final List partialBatch;
+
+ /* set to true when request.isDone() is true after a fetch */
+ private volatile boolean done;
+ private volatile Throwable error; /* set on failure */
+
+ /* Ensure only one in-flight query at a time */
+ private volatile CompletableFuture inFlight;
+ private final AtomicBoolean requestClosed = new AtomicBoolean();
+
+ public QuerySubscription(QueryPaginatorResult queryPaginatorResult,
+ Flow.Subscriber super List> subscriber) {
+ this.queryPaginatorResult = queryPaginatorResult;
+ this.subscriber = subscriber;
+ this.queryRequest = queryPaginatorResult.queryRequest;
+ this.handle = queryPaginatorResult.handle;
+
+ demand = new AtomicLong();
+ wip = new AtomicInteger();
+ started = new AtomicBoolean();
+ queue = new ConcurrentLinkedQueue<>();
+ limit = queryRequest.getLimit();
+ partialBatch = (limit > 0) ? new ArrayList<>(limit) : null;
+ }
+
+ @Override
+ public void request(long n) {
+ if (cancelled) {
+ return;
+ }
+ if (n <= 0) {
+ /* Spec: negative or zero demand is illegal -> onError and cancel */
+ onErrorOnce(new IllegalArgumentException(subscriber +
+ " violated the Reactive Streams rule 3.9 by requesting a " +
+ "non-positive number of elements."));
+ signalError(error);
+ return;
+ }
+ Backpressure.addCap(demand, n);
+ drain();
+ }
+
+ @Override
+ public void cancel() {
+ cancelled = true;
+ closeQueryRequest();
+ }
+
+ /* Core loop: serialize all emission/fetch transitions */
+ private void drain() {
+ if (wip.getAndIncrement() != 0) {
+ return;
+ }
+ int missed = 1;
+ while (true) {
+ if (cancelled) {
+ queue.clear();
+ return;
+ }
+ long r = demand.get();
+ long e = 0L;
+
+ /* Emit from buffer up to r(demand) */
+ while (e != r) {
+ if (cancelled) {
+ queue.clear();
+ return;
+ }
+ boolean d = done;
+ List v = queue.poll();
+ boolean empty = (v == null);
+ if (d && empty) {
+ Throwable ex = error;
+ if (ex != null) {
+ signalError(ex);
+ } else {
+ signalComplete();
+ }
+ return;
+ }
+ if (empty) {
+ break;
+ }
+ signaNext(v);
+ e++;
+ }
+
+ if (e != 0L) {
+ Backpressure.produced(demand, e);
+ }
+
+ /* terminal check must also run when r == 0 (no demand) */
+ if (done && queue.isEmpty()) {
+ Throwable ex = error;
+ if (ex != null) {
+ signalError(ex);
+ } else {
+ signalComplete();
+ }
+ return;
+ }
+
+ // If buffer is empty, not done, and there is outstanding demand, fetch next page
+ if (!cancelled && queue.isEmpty() && !done && demand.get() > 0) {
+ maybeFetchNext();
+ }
+
+ int w = wip.get();
+ if (missed == w) {
+ missed = wip.addAndGet(-missed);
+ if (missed == 0) {
+ break;
+ }
+ } else {
+ missed = w;
+ }
+ }
+ }
+
+ private void maybeFetchNext() {
+ /* Only start a new fetch if no in-flight */
+ if (inFlight != null) {
+ return;
+ }
+
+ CompletableFuture f;
+ try {
+ f = handle.query(queryRequest);
+ } catch (Throwable ex) {
+ onErrorOnce(ex);
+ done = true;
+ /* Ensure terminal signal is delivered */
+ drain();
+ return;
+ }
+ inFlight = f;
+ f.whenCompleteAsync((res, err) -> {
+ inFlight = null;
+ if (cancelled)
+ return;
+ if (err != null) {
+ onErrorOnce(err);
+ done = true;
+ } else {
+ try {
+ setStats(res);
+ List page = res.getResults();
+ if (!page.isEmpty()) {
+ bufferPage(page, false);
+ }
+ if (queryRequest.isDone()) {
+ if (limit > 0) {
+ flushPartialBatch();
+ }
+ done = true;
+ }
+ } catch (Throwable ex) {
+ onErrorOnce(ex);
+ done = true;
+ }
+ }
+ drain();
+ });
+ }
+
+ private void onErrorOnce(Throwable ex) {
+ if (error == null) {
+ error = ex;
+ }
+ }
+
+ private void bufferPage(List page, boolean terminal) {
+ if (limit == 0) {
+ queue.add(page);
+ return;
+ }
+
+ for (MapValue value : page) {
+ partialBatch.add(value);
+ if (partialBatch.size() == limit) {
+ queue.add(new ArrayList<>(partialBatch));
+ partialBatch.clear();
+ }
+ }
+
+ if (terminal && !partialBatch.isEmpty()) {
+ queue.add(new ArrayList<>(partialBatch));
+ partialBatch.clear();
+ }
+ }
+
+ private void flushPartialBatch() {
+ if (limit > 0 && !partialBatch.isEmpty()) {
+ queue.add(new ArrayList<>(partialBatch));
+ partialBatch.clear();
+ }
+ }
+ private void closeQueryRequest() {
+ if (requestClosed.compareAndSet(false, true)) {
+ try {
+ queryRequest.close();
+ } catch (Throwable ignored) {
+
+ }
+ }
+ }
+
+ void signaNext(List v) {
+ try {
+ subscriber.onNext(v);
+ } catch (Throwable ex) {
+ /* downstream threw error. Make sure that we are
+ * cancelled, since we cannot do anything else since the
+ `* Subscriber` is faulty.
+ */
+ cancel();
+ onErrorOnce(ex);
+ logger.log(Level.WARNING,
+ subscriber +
+ " violated the Reactive Streams rule 2.13 by " +
+ "throwing an exception from onNext." +
+ ex);
+ }
+ }
+
+ void signalError(Throwable ex) {
+ if (cancelled)
+ return;
+ cancelled = true; // ensure terminal
+ try {
+ closeQueryRequest();
+ subscriber.onError(ex);
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, subscriber +
+ " violated the Reactive Streams rule 2.13 by " +
+ "throwing an exception from onError.", t);
+ }
+ }
+
+ void signalComplete() {
+ if (cancelled)
+ return;
+ cancelled = true; // ensure terminal
+ try {
+ closeQueryRequest();
+ subscriber.onComplete();
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, subscriber +
+ " violated the Reactive Streams rule 2.13 by " +
+ "throwing an exception from onComplete.", t);
+ }
+ }
+
+ /* Update query metrics on result object */
+ private void setStats(QueryResult internalResult) {
+ queryPaginatorResult.readKB.addAndGet(internalResult.getReadKB());
+ queryPaginatorResult.readUnits.addAndGet(internalResult.getReadUnits());
+ queryPaginatorResult.writeKB.addAndGet(internalResult.getWriteKB());
+ queryPaginatorResult.writeUnits.addAndGet(
+ internalResult.getWriteUnits());
+ queryPaginatorResult.setRateLimitDelayedMs(
+ queryPaginatorResult.getRateLimitDelayedMs() +
+ internalResult.getRateLimitDelayedMs());
+ queryPaginatorResult.setReadKB(
+ queryPaginatorResult.getReadKB() + internalResult.getReadKB());
+ queryPaginatorResult.setReadUnits(
+ queryPaginatorResult.getReadUnits() + internalResult.getReadUnits());
+ queryPaginatorResult.setWriteKB(
+ queryPaginatorResult.getWriteKB() + internalResult.getWriteKB());
+ if (internalResult.getRetryStats() != null) {
+ if (queryPaginatorResult.getRetryStats() == null) {
+ queryPaginatorResult.setRetryStats(new RetryStats());
+ }
+ queryPaginatorResult.getRetryStats().addStats(
+ internalResult.getRetryStats());
+ }
+ }
+
+ /* Small utility to handle requested arithmetic safely */
+ static final class Backpressure {
+ static void addCap(AtomicLong requested, long n) {
+ for (; ; ) {
+ long r = requested.get();
+ long u = r + n;
+ if (u < 0L) { // overflow -> cap
+ u = Long.MAX_VALUE;
+ }
+ if (requested.compareAndSet(r, u)) {
+ return;
+ }
+ }
+ }
+
+ static void produced(AtomicLong requested, long n) {
+ for (; ; ) {
+ long r = requested.get();
+ long u = r - n;
+ if (u < 0L) {
+ u = 0L;
+ }
+ if (requested.compareAndSet(r, u)) {
+ return;
+ }
+ }
+ }
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/Request.java b/driver/src/main/java/oracle/nosql/driver/ops/Request.java
index 565036c8..e4908dd0 100644
--- a/driver/src/main/java/oracle/nosql/driver/ops/Request.java
+++ b/driver/src/main/java/oracle/nosql/driver/ops/Request.java
@@ -61,7 +61,7 @@ public abstract class Request {
/**
* @hidden
*/
- private long startNanos;
+ private volatile long startNanos;
/**
* @hidden
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java b/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java
index 45bf9361..ea332c14 100644
--- a/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java
+++ b/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java
@@ -10,6 +10,11 @@
import oracle.nosql.driver.NoSQLException;
import oracle.nosql.driver.NoSQLHandle;
import oracle.nosql.driver.RequestTimeoutException;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
/**
* On-premises only.
@@ -233,4 +238,84 @@ public void waitForCompletion(NoSQLHandle handle,
}
} while (!state.equals(State.COMPLETE));
}
+
+ /**
+ * Asynchronously waits for the operation to be complete.
+ * This is a polling style wait that delays for the specified number of
+ * milliseconds between each polling operation.
+ *
+ * This instance is modified with any changes in state.
+ *
+ * @param handle the Async NoSQLHandle to use
+ * @param waitMillis the total amount of time to wait, in milliseconds. This
+ * value must be non-zero and greater than delayMillis
+ * @param delayMillis the amount of time to wait between polling attempts,
+ * in milliseconds. If 0 it will default to 500.
+ *
+ * @return Returns a {@link CompletableFuture} which completes
+ * successfully when operation is completed within waitMillis otherwise
+ * completes exceptionally with {@link IllegalArgumentException}
+ * if the operation times out or the parameters are not valid.
+ * Completes exceptionally with {@link NoSQLException}
+ * if the operation id used is unknown or the operation has failed.
+ */
+ public CompletableFuture waitForCompletionAsync(
+ NoSQLHandleAsyncImpl handle, int waitMillis, int delayMillis) {
+
+ if (state.equals(State.COMPLETE)) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ final int DELAY_MS = 500;
+
+ final int delayMS = (delayMillis != 0 ? delayMillis : DELAY_MS);
+ if (waitMillis < delayMillis) {
+ Throwable t = new IllegalArgumentException(
+ "Wait milliseconds must be a minimum of " +
+ DELAY_MS + " and greater than delay milliseconds");
+ return CompletableFuture.failedFuture(t);
+ }
+ final long startTime = System.currentTimeMillis();
+ SystemStatusRequest ds = new SystemStatusRequest()
+ .setOperationId(operationId);
+
+ final CompletableFuture resultFuture = new CompletableFuture<>();
+ final ScheduledExecutorService taskExecutor = handle.getTaskExecutor();
+
+ Runnable poll = new Runnable() {
+ @Override
+ public void run() {
+ final long curTime = System.currentTimeMillis();
+ if ((curTime - startTime) > waitMillis) {
+ Throwable t = new RequestTimeoutException(
+ waitMillis,
+ "Operation not completed within timeout: " +
+ statement);
+ resultFuture.completeExceptionally(t);
+ return;
+ }
+ handle.systemStatus(ds)
+ .whenComplete((res, ex) -> {
+ if (ex != null) {
+ resultFuture.completeExceptionally(ex);
+ return;
+ }
+ /* Update state */
+ resultString = res.resultString;
+ state = res.state;
+
+ if (state.equals(State.COMPLETE)) {
+ resultFuture.complete(null);
+ } else {
+ /* Schedule next poll */
+ taskExecutor.schedule(this, delayMS,
+ TimeUnit.MILLISECONDS);
+ }
+ });
+ }
+ };
+ /* Kick off the first poll immediately */
+ taskExecutor.execute(poll);
+ return resultFuture;
+ }
}
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java b/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java
index 428fec61..78fdf6b2 100644
--- a/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java
+++ b/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java
@@ -12,8 +12,13 @@
import oracle.nosql.driver.NoSQLException;
import oracle.nosql.driver.NoSQLHandle;
import oracle.nosql.driver.RequestTimeoutException;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
import oracle.nosql.driver.ops.TableLimits.CapacityMode;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
/**
* TableResult is returned from {@link NoSQLHandle#getTable} and
* {@link NoSQLHandle#tableRequest} operations. It encapsulates the
@@ -784,6 +789,111 @@ public void waitForCompletion(NoSQLHandle handle,
}
}
+ /**
+ * Asynchronously waits for a table operation to complete. Table operations
+ * are asynchronous. This is a polling style wait that delays for
+ * the specified number of milliseconds between each polling operation.
+ * The returned future completes when the table reaches a
+ * terminal state,
+ * which is either {@link State#ACTIVE} or {@link State#DROPPED}.
+ *
+ * This instance must be the return value of a previous
+ * {@link NoSQLHandle#tableRequest} and contain a non-null operation id
+ * representing the in-progress operation unless the operation has
+ * already completed.
+ *
+ * This instance is modified with any change in table state or metadata.
+ *
+ * @param handle the Async NoSQLHandle to use
+ * @param waitMillis the total amount of time to wait, in milliseconds. This
+ * value must be non-zero and greater than delayMillis
+ * @param delayMillis the amount of time to wait between polling attempts,
+ * in milliseconds. If 0 it will default to 500.
+ *
+ * @return Returns a {@link CompletableFuture} which completes
+ * successfully when operation is completed within waitMillis otherwise
+ * completes exceptionally with {@link IllegalArgumentException}
+ * if the parameters are not valid.
+ * Completes exceptionally with {@link RequestTimeoutException}
+ * if the operation times out.
+ */
+ public CompletableFuture waitForCompletionAsync
+ (NoSQLHandleAsyncImpl handle, int waitMillis, int delayMillis) {
+
+ if (isTerminal()) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ if (operationId == null) {
+ Throwable t = new IllegalArgumentException(
+ "Operation state must not be null");
+ return CompletableFuture.failedFuture(t);
+ }
+
+ /* TODO: try to share code with waitForState? */
+ final int DELAY_MS = 500;
+
+ final int delayMS = (delayMillis != 0 ? delayMillis : DELAY_MS);
+ if (waitMillis < delayMillis) {
+ Throwable t = new IllegalArgumentException(
+ "Wait milliseconds must be a minimum of " +
+ DELAY_MS + " and greater than delay milliseconds");
+ return CompletableFuture.failedFuture(t);
+ }
+
+ final long startTime = System.currentTimeMillis();
+ final CompletableFuture resultFuture = new CompletableFuture<>();
+ final ScheduledExecutorService taskExecutor = handle.getTaskExecutor();
+
+ GetTableRequest getTable =
+ new GetTableRequest().setTableName(tableName).
+ setOperationId(operationId).setCompartment(
+ compartmentOrNamespace);
+
+ Runnable poll = new Runnable() {
+ @Override
+ public void run() {
+ long curTime = System.currentTimeMillis();
+ if ((curTime - startTime) > waitMillis) {
+ Throwable t = new RequestTimeoutException(
+ waitMillis,
+ "Operation not completed in expected time");
+ resultFuture.completeExceptionally(t);
+ return;
+ }
+ handle.getTable(getTable).whenComplete((res, ex) -> {
+ if (ex != null) {
+ resultFuture.completeExceptionally(ex);
+ return;
+ }
+ /*
+ * partial "copy" of possibly modified state. Don't modify
+ * operationId as that is what we are waiting to complete
+ */
+ state = res.getTableState();
+ limits = res.getTableLimits();
+ schema = res.getSchema();
+ matchETag = res.getMatchETag();
+ ddl = res.getDdl();
+ isFrozen = res.isFrozen();
+ isLocalReplicaInitialized = res.isLocalReplicaInitialized();
+ replicas = res.getReplicas();
+
+ if (isTerminal()) {
+ resultFuture.complete(null);
+ } else {
+ /* Schedule next poll */
+ taskExecutor.schedule(this, delayMS,
+ TimeUnit.MILLISECONDS);
+ }
+ });
+ }
+ };
+ /* Kick off the first poll immediately */
+ taskExecutor.execute(poll);
+ return resultFuture;
+ }
+
private boolean isTerminal() {
return state == State.ACTIVE || state == State.DROPPED;
}
diff --git a/driver/src/main/java/oracle/nosql/driver/package-info.java b/driver/src/main/java/oracle/nosql/driver/package-info.java
index 27557cb0..a3dff30b 100644
--- a/driver/src/main/java/oracle/nosql/driver/package-info.java
+++ b/driver/src/main/java/oracle/nosql/driver/package-info.java
@@ -4,6 +4,7 @@
* Licensed under the Universal Permissive License v 1.0 as shown at
* https://oss.oracle.com/licenses/upl/
*/
+ /* TODO: need add NoSQLHandleAsync? */
/**
* Contains the public API for using the Oracle NoSQL Database
* as well as configuration and common parameter classes used in
diff --git a/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java b/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java
index 615e0ac5..4fe29f62 100644
--- a/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java
+++ b/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java
@@ -12,11 +12,14 @@
import java.util.HashSet;
import java.util.List;
import java.util.TreeSet;
+import java.util.concurrent.CompletableFuture;
import oracle.nosql.driver.NoSQLException;
import oracle.nosql.driver.RetryableException;
import oracle.nosql.driver.ops.QueryRequest;
import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.util.ConcurrentUtil;
import oracle.nosql.driver.values.BinaryValue;
import oracle.nosql.driver.values.FieldValue;
import oracle.nosql.driver.values.MapValue;
@@ -592,7 +595,8 @@ private QueryResult execute(RuntimeControlBlock rcb,
NoSQLException e = null;
QueryResult result = null;
try {
- result = (QueryResult)rcb.getClient().execute(reqCopy);
+ CompletableFuture fut = rcb.getClient().execute(reqCopy);
+ result = (QueryResult) ConcurrentUtil.awaitFuture(fut);
} catch (NoSQLException qe) {
e = qe;
}
diff --git a/driver/src/main/java/oracle/nosql/driver/util/ConcurrentUtil.java b/driver/src/main/java/oracle/nosql/driver/util/ConcurrentUtil.java
new file mode 100644
index 00000000..03ef15d6
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/util/ConcurrentUtil.java
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.util;
+
+import oracle.nosql.driver.NoSQLException;
+
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Supplier;
+
+public class ConcurrentUtil {
+ /**
+ * A convenient function to hold the lock and run.
+ */
+ public static T synchronizedCall(ReentrantLock lock,
+ Supplier s) {
+ lock.lock();
+ try {
+ return s.get();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * A convenient function to hold the lock and run.
+ */
+ public static void synchronizedCall(ReentrantLock lock,
+ Runnable r) {
+ lock.lock();
+ try {
+ r.run();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * A helper function to wait for the future to complete.
+ */
+ public static T awaitFuture(CompletableFuture future) {
+ try {
+ return future.get();
+ } catch (ExecutionException e) {
+ final Throwable cause = e.getCause();
+ appendCurrentStack(cause);
+ if (cause instanceof RuntimeException) {
+ throw ((RuntimeException) cause);
+ }
+ throw new NoSQLException("ExecutionException: "
+ + e.getMessage(), e.getCause());
+ } catch (InterruptedException ie) {
+ throw new NoSQLException("Request interrupted: "
+ + ie.getMessage(), ie);
+ }
+ }
+
+ /**
+ * Returns the cause if the exception is a CompletionException, otherwise
+ * returns the exception.
+ */
+ public static Throwable unwrapCompletionException(Throwable t) {
+ Throwable actual = t;
+ while (true) {
+ if (!(actual instanceof CompletionException)
+ || (actual.getCause() == null)) {
+ return actual;
+ }
+ actual = actual.getCause();
+ }
+ }
+
+ private static void appendCurrentStack(Throwable exception) {
+ Objects.requireNonNull(exception, "exception");
+ final StackTraceElement[] existing = exception.getStackTrace();
+ final StackTraceElement[] current = new Throwable().getStackTrace();
+ final StackTraceElement[] updated =
+ new StackTraceElement[existing.length + current.length];
+ System.arraycopy(existing, 0, updated, 0, existing.length);
+ System.arraycopy(current, 0, updated, existing.length, current.length);
+ exception.setStackTrace(updated);
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java b/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java
index d0ea97bb..a5c03fd3 100644
--- a/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java
+++ b/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java
@@ -23,23 +23,22 @@
import java.net.URI;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import java.util.logging.Logger;
import javax.net.ssl.SSLException;
+import io.netty.buffer.Unpooled;
import oracle.nosql.driver.RequestTimeoutException;
import oracle.nosql.driver.httpclient.HttpClient;
-import oracle.nosql.driver.httpclient.ResponseHandler;
import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
-import io.netty.handler.codec.http.HttpResponseStatus;
/**
* Utility to issue HTTP request using {@link HttpClient}.
@@ -209,41 +208,36 @@ private static HttpResponse doRequest(HttpClient httpClient,
final long startTime = System.currentTimeMillis();
int numRetries = 0;
Throwable exception = null;
- HttpResponse res = null;
do {
if (numRetries > 0) {
logInfo(logger, "Client, doing retry: " + numRetries +
- (exception != null ? ", exception: " + exception : ""));
+ (exception != null ? ", exception: " + exception : ""));
}
- Channel channel = null;
- ResponseHandler responseHandler = null;
try {
- channel = httpClient.getChannel(timeoutMs);
- responseHandler =
- new ResponseHandler(httpClient, logger, channel);
-
FullHttpRequest request;
if (payload == null) {
request = buildRequest(uri, method, headers);
} else {
- request = buildRequest(
- uri, headers, method, payload, channel);
+ request = buildRequest(uri, headers, method, payload);
}
addRequiredHeaders(request);
logFine(logger, request.headers().toString());
- httpClient.runRequest(request, responseHandler, channel);
- if (responseHandler.await(timeoutMs)) {
- throw new TimeoutException("Request timed out after " +
- timeoutMs + " milliseconds");
- }
-
- final HttpResponseStatus status = responseHandler.getStatus();
- if (status == null) {
- throw new IllegalStateException("Invalid null response");
- }
- res = processResponse(status.code(),
- responseHandler.getContent());
+ CompletableFuture httpResponse =
+ httpClient.runRequest(request, timeoutMs)
+ .thenApply(fhr -> {
+ if (fhr.status() == null) {
+ throw new IllegalStateException(
+ "Invalid null response");
+ }
+ try {
+ final int code = fhr.status().code();
+ return processResponse(code, fhr.content());
+ } finally {
+ fhr.release();
+ }
+ });
+ HttpResponse res = httpResponse.get();
/*
* Retry upon status code larger than 500, in general,
@@ -251,50 +245,43 @@ private static HttpResponse doRequest(HttpClient httpClient,
*/
if (res.getStatusCode() >= 500) {
logFine(logger,
- "Remote server temporarily unavailable," +
- " status code " + res.getStatusCode() +
- " , response " + res.getOutput());
+ "Remote server temporarily unavailable," +
+ " status code " + res.getStatusCode() +
+ " , response " + res.getOutput());
delay();
++numRetries;
continue;
}
return res;
+ } catch (ExecutionException ee) {
+ Throwable cause = ee.getCause();
+ if (cause instanceof IOException) {
+ IOException ioe = (IOException) cause;
+ String name = ioe.getClass().getName();
+ logFine(logger, "Client execute IOException, name: " +
+ name + ", message: " + ioe.getMessage());
+ /*
+ * An exception in the channel, e.g. the server may have
+ * disconnected. Retry.
+ */
+ exception = ioe;
+ ++numRetries;
+ delay();
+ continue;
+ } else if (cause instanceof TimeoutException) {
+ throw new RuntimeException("Timeout exception: host=" +
+ httpClient.getHost() + " port=" +
+ httpClient.getPort() + " uri=" +
+ uri, cause);
+ }
+ throw new RuntimeException("Unable to execute request: ", ee);
+
} catch (RuntimeException e) {
logFine(logger, "Client execute runtime exception: " +
- e.getMessage());
+ e.getMessage());
throw e;
- } catch (IOException ioe) {
- String name = ioe.getClass().getName();
- logFine(logger, "Client execute IOException, name: " +
- name + ", message: " + ioe.getMessage());
- /*
- * An exception in the channel, e.g. the server may have
- * disconnected. Retry.
- */
- exception = ioe;
- ++numRetries;
- if (ioe instanceof SSLException) {
- /* disconnect the channel to force a new one */
- if (channel != null) {
- logFine(logger,
- "Client disconnecting channel due to: " + ioe);
- channel.disconnect();
- }
- } else {
- delay();
- }
- continue;
} catch (InterruptedException ie) {
- throw new RuntimeException(
- "Client interrupted exception: ", ie);
- } catch (ExecutionException ee) {
- throw new RuntimeException(
- "Unable to execute request: ", ee);
- } catch (TimeoutException te) {
- throw new RuntimeException("Timeout exception: host=" +
- httpClient.getHost() + " port=" +
- httpClient.getPort() + " uri=" +
- uri, te);
+ throw new RuntimeException("Client interrupted exception: ", ie);
} catch (Throwable t) {
/*
* this is likely an exception from Netty, perhaps a bad
@@ -308,10 +295,6 @@ private static HttpResponse doRequest(HttpClient httpClient,
delay();
++numRetries;
continue;
- } finally {
- if (responseHandler != null) {
- responseHandler.close();
- }
}
} while ((System.currentTimeMillis()- startTime) < timeoutMs);
@@ -333,10 +316,8 @@ private static FullHttpRequest buildRequest(String requestURI,
private static FullHttpRequest buildRequest(String requestURI,
HttpHeaders headers,
HttpMethod method,
- byte[] payload,
- Channel channel) {
- final ByteBuf buffer = channel.alloc().directBuffer();
- buffer.writeBytes(payload);
+ byte[] payload) {
+ final ByteBuf buffer = Unpooled.wrappedBuffer(payload);
final FullHttpRequest request =
new DefaultFullHttpRequest(HTTP_1_1, method, requestURI,
diff --git a/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java b/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java
index 25710c02..5aff93ae 100644
--- a/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java
+++ b/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java
@@ -7,6 +7,8 @@
package oracle.nosql.driver.util;
+import java.io.PrintWriter;
+import java.io.StringWriter;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -67,4 +69,19 @@ public static void logTrace(Logger logger, String msg) {
public static boolean isLoggable(Logger logger, Level level) {
return (logger != null && logger.isLoggable(level));
}
+
+ /**
+ * Returns the stack trace.
+ *
+ * @param t the exception
+ */
+ public static String getStackTrace(Throwable t) {
+ if (t == null) {
+ return null;
+ }
+ final StringWriter sw = new StringWriter();
+ final PrintWriter pw = new PrintWriter(sw);
+ t.printStackTrace(pw);
+ return sw.toString();
+ }
}
diff --git a/driver/src/test/java/oracle/nosql/driver/AsyncQueryTest.java b/driver/src/test/java/oracle/nosql/driver/AsyncQueryTest.java
new file mode 100644
index 00000000..04358b25
--- /dev/null
+++ b/driver/src/test/java/oracle/nosql/driver/AsyncQueryTest.java
@@ -0,0 +1,2420 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Flow;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.atomic.LongAdder;
+
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PreparedStatement;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryPaginatorResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.RetryStats;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.util.ConcurrentUtil;
+import oracle.nosql.driver.values.ArrayValue;
+import oracle.nosql.driver.values.DoubleValue;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.JsonNullValue;
+import oracle.nosql.driver.values.JsonUtils;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.NullValue;
+import oracle.nosql.driver.values.StringValue;
+
+import org.junit.Assert;
+import org.junit.Test;
+import reactor.adapter.JdkFlowAdapter;
+
+/**
+ * Test queries in async mode
+ */
+public class AsyncQueryTest extends ProxyTestBase {
+
+ private static final boolean showResults = Boolean.getBoolean("test.showresults");
+ private static final int traceLevel = Integer.getInteger("test.tracelevel", 0);
+
+ private final static int MIN_QUERY_COST = 2;
+
+ final static String tableName = "testTable";
+ final static String indexName = "idxName";
+ final static String jsonTable = "jsonTable";
+ /* timeout for all table operations */
+ final static int timeout = 20000;
+
+ /* Create a table */
+ final static String createTableDDL =
+ "CREATE TABLE IF NOT EXISTS testTable (" +
+ "sid INTEGER, " +
+ "id INTEGER, " +
+ "name STRING, " +
+ "age INTEGER, " +
+ "state STRING, " +
+ "salary LONG, " +
+ "array ARRAY(INTEGER), " +
+ "longString STRING," +
+ "PRIMARY KEY(SHARD(sid), id))";
+
+ final boolean multishard = false; /* TBD */
+
+ /* Create an index on testTable(name) */
+ final String createIdxNameDDL =
+ "CREATE INDEX IF NOT EXISTS idxName on testTable(name)";
+
+ /* Create an index on testTable(sid, age)*/
+ final String createIdxSidAgeDDL =
+ "CREATE INDEX IF NOT EXISTS idxSidAge ON testTable(sid, age)";
+
+ /* Create an index on testTable(state, age)*/
+ final String createIdxStateAgeDDL =
+ "CREATE INDEX IF NOT EXISTS idxStateAge ON testTable(state, age)";
+
+ /* Create an index on testTable(state, age)*/
+ final String createIdxArrayDDL =
+ "CREATE INDEX IF NOT EXISTS idxArray ON testTable(array[])";
+
+ /* Create a table with Json field */
+ final static String createJsonTableDDL =
+ "CREATE TABLE IF NOT EXISTS jsonTable (id INTEGER, info JSON, " +
+ "PRIMARY KEY(id))";
+
+ /* Create a table with 2 major keys, used in testIllegalQuery() */
+ final static String createTestTableDDL =
+ "CREATE TABLE IF NOT EXISTS test (" +
+ "sid1 INTEGER, " +
+ "sid2 INTEGER, " +
+ "id INTEGER, " +
+ "name STRING, " +
+ "PRIMARY KEY(SHARD(sid1, sid2), id))";
+
+ final static String createIdxSid1NameDDL =
+ "CREATE INDEX IF NOT EXISTS idxSid1Name ON test(sid1, name)";
+
+ final static String createIdxNameSid1Sid2DDL =
+ "CREATE INDEX IF NOT EXISTS idxNameSid1Sid2 ON test(name, sid1, sid2)";
+
+ @Override
+ public void beforeTest() throws Exception {
+ super.beforeTest();
+
+ tableOperationAsync(asyncHandle, createTableDDL,
+ new TableLimits(45000, 15000, 50)).join();
+
+ tableOperationAsync(asyncHandle, createIdxNameDDL, null).join();
+ }
+
+ @Override
+ public void afterTest() throws Exception {
+ tableOperationAsync(asyncHandle, "DROP TABLE IF EXISTS " + tableName,
+ null).join();
+ super.afterTest();
+ }
+
+ @Test
+ public void testQuery() {
+
+ final String fullQuery = "select * from testTable";
+ final String predQuery = "select * from testTable where sid > 7";
+ final String updateQuery =
+ "update testTable f set f.name = 'joe' where sid = 9 and id = 9 ";
+ final String getQuery =
+ "select name from testTable where sid = 9 and id = 9 ";
+ final String queryWithVariables =
+ "declare $sid integer; $id integer;" +
+ "select name from testTable where sid = $sid and id >= $id";
+ final String queryWithSort =
+ "select * from testTable where sid = 0 order by sid, id";
+
+ final int numMajor = 10;
+ final int numPerMajor = 10;
+ final int numRows = numMajor * numPerMajor;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, 1);
+
+ /*
+ * Perform a simple query
+ */
+ executeQuery(predQuery, null, 20, 0, false);
+
+ /*
+ * Perform an update query
+ */
+ try (QueryRequest queryRequest = newQueryRequest()) {
+ queryRequest.setStatement(updateQuery);
+ asyncHandle.query(queryRequest).join();
+ }
+
+ /*
+ * Use a simple get query to validate the update
+ */
+ try (QueryRequest queryRequest = newQueryRequest()) {
+ queryRequest.setStatement(getQuery);
+ QueryResult queryRes = asyncHandle.query(queryRequest).join();
+ assertEquals(1, queryRes.getResults().size());
+ assertEquals("joe",
+ queryRes.getResults().get(0).get("name").getString());
+
+ /* full scan to count rows */
+ executeQuery(fullQuery, null, numRows, 0, false /* usePrepStmt */);
+ executeQuery(fullQuery, null, numRows, 0, true /* usePrepStmt */);
+ }
+ /*
+ * Query with external variables
+ */
+ Map variables = new HashMap<>();
+ variables.put("$sid", new IntegerValue(9));
+ variables.put("$id", new StringValue("3"));
+ executeQuery(queryWithVariables, variables, 7, 0, true);
+
+ /* Query with sort */
+ executeQuery(queryWithSort, null, numPerMajor, 0,
+ false /* usePrepStmt */);
+ executeQuery(queryWithSort, null, numPerMajor, 0,
+ true /* usePrepStmt */);
+ }
+
+ /**
+ * Test query with numeric-base and size-based limits
+ */
+ @Test
+ public void testLimits() {
+ final int numMajor = 10;
+ final int numPerMajor = 101;
+ final int numRows = numMajor * numPerMajor;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ /*
+ * number-based limit
+ */
+
+ /* Read rows from all partitions with number-based limit. */
+ String query = "select * from testTable";
+ int expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numRows /* numReadRows */,
+ numRows /* numReadKeys */);
+ int expCnt = numRows;
+ int[] limits = new int[] {0, 20, 100, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, limit, 0, recordKB);
+ }
+
+ /* Read rows from single partition with number-based limit. */
+ query = "select * from testTable where sid = 5";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numPerMajor /* numReadRows */,
+ numPerMajor /* numReadKeys */);
+ expCnt = numPerMajor;
+ limits = new int[] {0, 20, 100, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, false /* keyOnly */, false /* indexScan */,
+ expCnt, expReadKB, limit, 0, recordKB);
+ }
+
+ /* Read rows from all shards with number-based limit. */
+ query = "select * from testTable where name = 'name_1'";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numMajor /* numReadRows */,
+ numMajor /* numReadKeys */);
+ expCnt = numMajor;
+ limits = new int[] {0, 5, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, false /* keyOnly */, true /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */, recordKB);
+ }
+
+ /*
+ * Size-based limit
+ */
+
+ /* Read rows from all partitions with size limit. */
+ query = "select * from testTable";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numRows /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = numRows;
+ int[] maxReadKBs = new int[] {0, 500, 1000, 2000};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ /* Read rows from single partition with size limit. */
+ query = "select * from testTable where sid = 5";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numPerMajor /* numReadRows */,
+ numPerMajor /* numReadKeys */);
+ expCnt = numPerMajor;
+ maxReadKBs = new int[] {0, 50, 100, 250};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false /* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ /* Read rows from all shards with size limit. */
+ query = "select * from testTable where name = \"name_1\"";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numMajor /* numReadRows */,
+ numMajor /* numReadKeys */);
+ expCnt = numMajor;
+ maxReadKBs = new int[] {0, 5, 10, 25};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true /* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ /*
+ * Number-based and size-based limit
+ */
+
+ /* Read rows from all partitions with number and size limit. */
+ query = "select * from testTable";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numRows /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = numRows;
+ executeQuery(query, false /* keyOnly */, false/* indexScan */, expCnt,
+ expReadKB, 50 /* numLimit */, 100 /* sizeLimit */,
+ recordKB);
+
+ /* Read rows from single partition with number and size limit. */
+ query = "select * from testTable where sid = 5";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numPerMajor /* numReadRows */,
+ numPerMajor /* numReadKeys */);
+ expCnt = numPerMajor;
+ executeQuery(query, false /* keyOnly */, false/* indexScan */, expCnt,
+ expReadKB, 10 /* numLimit */, 20 /* sizeLimit */, recordKB);
+
+ /* Read rows from all shards with number and size limit. */
+ query = "select * from testTable where name = \"name_1\"";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numMajor /* numReadRows */,
+ numMajor /* numReadKeys */);
+ expCnt = numMajor;
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 5 /* numLimit */, 10 /* sizeLimit */,
+ recordKB);
+ }
+
+ @Test
+ public void testDupElim() {
+ final int numMajor = 10;
+ final int numPerMajor = 40;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ tableOperationAsync(asyncHandle, createIdxArrayDDL, null).join();
+
+ String query =
+ "select sid, id, t.array[size($)-2:] " +
+ "from testTable t " +
+ "where t.array[] >any 11";
+
+ /* Prepare first, then execute */
+ executeQuery(query, null, 200, 20, true);
+ }
+
+ @Test
+ public void testOrderByPartitions() {
+ final int numMajor = 5;
+ final int numPerMajor = 10;
+ final int numRows = numMajor * numPerMajor;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ tableOperationAsync(asyncHandle, createIdxStateAgeDDL, null).join();
+
+ String query;
+ int expReadKB, expCnt;
+ int[] maxReadKBs;
+
+ /*
+ * Case 1: partial key
+ */
+ query = "select sid, id, name, state " +
+ "from testTable " +
+ "order by sid ";
+
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numRows /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = numRows;
+ maxReadKBs = new int[] {0, 4, 25, 37, 66};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.EVENTUAL, false, null);
+ }
+
+ /*
+ * Case 2: partial key offset limit
+ */
+ query = "select sid, id, name, state " +
+ "from testTable " +
+ "order by sid " +
+ "limit 10 offset 4";
+
+ expCnt = 10;
+ maxReadKBs = new int[] {0, 5, 6, 7, 8, 9, 20, 44, 81};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, -1 /*expReadKB*/, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.EVENTUAL, false, null);
+ }
+
+ /*
+ * Case 3: partial key offset limit
+ */
+ query = "select sid, id, name, state " +
+ "from testTable " +
+ "order by sid " +
+ "limit 5 offset 44";
+
+ expCnt = 5;
+ maxReadKBs = new int[] {0, 5, 14, 51, 88};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, -1 /*expReadKB*/, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.EVENTUAL, false, null);
+ }
+ }
+
+ @Test
+ public void testGroupByPartitions() {
+
+ final int numMajor = 5;
+ final int numPerMajor = 10;
+ final int numRows = numMajor * numPerMajor;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ tableOperationAsync(asyncHandle, createIdxStateAgeDDL, null).join();
+
+ String query;
+ int expReadKB, expCnt;
+ int[] maxReadKBs;
+
+
+ /*
+ * Case 1
+ */
+ query = "select sid, count(*) as cnt, sum(salary) as sum " +
+ "from testTable " +
+ "group by sid";
+
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numRows /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = 5;
+ /* maxReadKBs = new int[] {0, 4, 25, 37, 66}; */
+ maxReadKBs = new int[] {0};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.EVENTUAL, false, null);
+ }
+ }
+
+ @Test
+ public void testOrderByShards() {
+
+ final int numMajor = 10;
+ final int numPerMajor = 40;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ tableOperationAsync(asyncHandle, createIdxStateAgeDDL, null).join();
+
+ String query;
+ int expReadKB, expCnt;
+ int[] maxReadKBs;
+
+ /*
+ * Case 1: multi-shard, covering index
+ */
+ query = "select sid, id, state " +
+ "from testTable " +
+ "order by state " +
+ "limit 20 offset 4";
+
+ if (multishard) {
+ /*
+ * readKBs are not deterministic with multishard
+ * See KVSTORE-649
+ */
+ expReadKB = -1;
+ } else {
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ 24 /* numReadKeys */);
+ }
+
+ expCnt = 20;
+ maxReadKBs = new int[] {0, 5, 7, 11};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.EVENTUAL, false, null);
+ }
+
+ /*
+ * Case 2: multi-shard, non-covering index
+ */
+ query = "select sid, id, state, salary " +
+ "from testTable " +
+ "order by state " +
+ "limit 27 offset 5";
+
+ if (multishard) {
+ /*
+ * readKBs are not deterministic with multishard
+ * See KVSTORE-649
+ */
+ expReadKB = -1;
+ } else {
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ 32 /* numReadRows */,
+ 32 /* numReadKeys */);
+ }
+
+ expCnt = 27;
+ maxReadKBs = new int[] {6, 7, 8};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.EVENTUAL, false, null);
+ }
+
+ /*
+ * Case 3: single-partition, non-covering index
+ */
+ query = "select sid, id, state, salary " +
+ "from testTable " +
+ "where sid = 3 " +
+ "order by sid, id " +
+ "limit 27 offset 5";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ 32 /* numReadRows */,
+ 32 /* numReadKeys */);
+ expCnt = 27;
+ maxReadKBs = new int[] {4, 5, 12};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.EVENTUAL, false, null);
+ }
+ }
+
+ @Test
+ public void testGroupByShards() {
+ final int numMajor = 10;
+ final int numPerMajor = 101;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ String query;
+ int expReadKB, expCnt;
+ int[] maxReadKBs;
+
+ tableOperationAsync(asyncHandle, createIdxStateAgeDDL, null).join();
+ /*
+ * Case 1.
+ */
+ query = "select count(*) from testTable where state = \"CA\"";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ 210);
+ expCnt = 1;
+ /* size-based limit */
+ maxReadKBs = new int[] {10, 17, 23, 37, 209, 210, 500};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ /*
+ * Case 2.
+ * sum(salary) = 165000
+ */
+ query = "select count(*), sum(salary) from testTable " +
+ "where state = \"VT\"";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ 200 /* numReadRows */,
+ 200 /* numReadKeys */);
+ expCnt = 1;
+ /* size-based limit */
+ maxReadKBs = new int[] {9, 19, 31, 44, 200, 500};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ /* Prepare first, then execute */
+ executeQuery(query, null, 1, 22, true);
+
+ /*
+ * Case 3.
+ */
+ query = "select state, count(*) from testTable group by state";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ 1010);
+ expCnt = 5;
+ /* size-based limit */
+ maxReadKBs = new int[] {30};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ /*
+ * Case 4.
+ */
+ query =
+ "select state, " +
+ " count(*) as cnt, " +
+ " sum(salary) as sum, " +
+ " avg(salary) as avg " +
+ "from testTable "+
+ "group by state";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ 1010 /* numReadRows */,
+ 1010);
+ expCnt = 5;
+ /* size-based limit */
+ maxReadKBs = new int[] {34};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ }
+
+ /**
+ * Test group-by query with numeric-base limit and/or size-based limits
+ *
+ * 1. Single partition scan, key-only
+ * select count(*) from testTable where sid = 1
+ *
+ * 2. Single partition scan, key + row
+ * select min(name), min(age) from testTable where sid = 1
+ *
+ * 3. All partitions scan, key only
+ * select count(*) from testTable group by sid
+ *
+ * 4. All partitions scan, key + row
+ * select min(name) from testTable group by sid
+ *
+ * 5. All shards scan, key only
+ * select count(*) from testTable group by sid, name
+ *
+ * 6. All shards scan, key + row
+ * select max(name) from testTable group by sid, name
+ *
+ * 7. All partitions scan, key only, single row returned.
+ * select count(*) from testTable
+ *
+ * 8. All shards scan, key only, single row returned.
+ * select min(name) from testTable
+ */
+ @Test
+ public void testGroupByWithLimits() {
+ final int numMajor = 10;
+ final int numPerMajor = 101;
+ final int numRows = numMajor * numPerMajor;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ String query;
+ int expReadKB, expCnt;
+ int[] limits, maxReadKBs;
+
+ tableOperationAsync(asyncHandle, createIdxSidAgeDDL, null).join();
+
+ /*
+ * Case: Single partition scan, key only
+ */
+ query = "select count(*) from testTable where sid = 1";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ numPerMajor /* numReadKeys */);
+ expCnt = 1;
+ /* number-based limit */
+ limits = new int[] {0, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, true /* keyOnly */, false /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 50, 100, 101};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+ /* number-based and size-based limit */
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 1 /* numLimit */, 50 /* maxReadKB */,
+ recordKB);
+
+ /*
+ * Case 2: Single partition scan, key + row
+ */
+ query = "select min(salary), min(age) from testTable where sid = 1";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numPerMajor /* numReadRows */,
+ numPerMajor /* numReadKeys */);
+ expCnt = 1;
+ /* number-based limit */
+ limits = new int[] {0, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, false /* keyOnly */, false /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 10, 100, 300, 303};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+ /* number-based limit + size-based limit */
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 1 /* numLimit */, 200 /* maxReadKB */,
+ recordKB);
+
+ /*
+ * Case 3: All partitions scan, key only
+ */
+ query = "select count(*) from testTable group by sid";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = numMajor;
+ /* number-based limit */
+ limits = new int[] {0, 5, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, true /* keyOnly */, false /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+ /* number-based limit + size-based limit */
+ executeQuery(query, true /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 1 /* numLimit */, 200 /* maxReadKB */,
+ recordKB);
+ executeQuery(query, true /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 2 /* numLimit */, 200 /* maxReadKB */,
+ recordKB);
+ executeQuery(query, true /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 5 /* numLimit */, 200 /* maxReadKB */,
+ recordKB);
+
+ /*
+ * Case 4: All partitions scan, key + row
+ */
+ query = "select min(salary) from testTable group by sid";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numRows /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = numMajor;
+ /* number-based limit */
+ limits = new int[] {0, 5, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, false /* keyOnly */, false /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 10, 100, 500, 1000, 2047};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+ /* number-based limit + size-based limit */
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 1 /* numLimit */, 400 /* maxReadKB */,
+ recordKB);
+ executeQuery(query, false /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 3 /* numLimit */, 400 /* maxReadKB */,
+ recordKB);
+
+ /*
+ * Case 5: All shards can, key only
+ */
+ query = "select count(*) from testTable group by sid, age";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = numMajor * 10;
+
+ /* number-based limit */
+ limits = new int[] {0, 5, 50, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, true /* keyOnly */, true /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ /* number-based and size-based limit */
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 10 /* numLimit */, 100, recordKB);
+
+ /*
+ * Case 6: All shards can, key + row
+ */
+ query = "select max(salary) from testTable group by sid, age";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ numRows /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = numMajor * 10;
+
+ /* number-based limit */
+ limits = new int[] {0, 5, 50, expCnt, expCnt + 1};
+ for (int limit : limits) {
+ executeQuery(query, false /* keyOnly */, true /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 10, 100, 500, 1000, 2047};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 10 /* numLimit */, 300 /* maxReadKB */,
+ recordKB);
+
+ /*
+ * Case 7: All partitions scan, key only. Single row returned.
+ */
+ query = "select count(*) from testTable";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = 1;
+ /* number-based limits */
+ limits = new int[] {0, 1};
+ for (int limit : limits) {
+ executeQuery(query, true /* keyOnly */, false /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010 };
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+ /* number-based limit + size-based limit */
+ executeQuery(query, true /* keyOnly */, false/* indexScan */,
+ expCnt, expReadKB, 1 /* numLimit */, 500 /* maxReadKB */,
+ recordKB);
+
+ /*
+ * Case 8: All shards scan, key only. Single row returned.
+ */
+ query = "select min(name) from testTable";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ numRows /* numReadKeys */);
+ expCnt = 1;
+ /* number-based limits */
+ limits = new int[] {0, 1};
+ for (int limit : limits) {
+ executeQuery(query, true /* keyOnly */, true /* indexScan */,
+ expCnt, expReadKB, limit, 0 /* maxReadKB */,
+ recordKB);
+ }
+ /* size-based limit */
+ maxReadKBs = new int[] {0, 10, 100, 500, 1000, 1010 };
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB);
+ }
+ /* number-based limit + size-based limit */
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 1 /* numLimit */, 500 /* maxReadKB */,
+ recordKB);
+ }
+
+ @Test
+ public void testDelete() {
+ final int numMajor = 5;
+ final int numPerMajor = 100;
+ final int recordKB = 4;
+
+ tableOperationAsync(asyncHandle, createIdxStateAgeDDL, null).join();
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ int expReadKB, expCnt;
+ int[] maxReadKBs;
+ String query;
+
+ /*
+ * Case 1. ALL_SHARDS delete, without RETURNING, covering index
+ * 100 rows will be deleted. 200 key-reads will be performed
+ */
+ query = "delete from testTable where state = \"CA\"";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ 200/*numReadKeys*/);
+
+ expCnt = 1;
+ maxReadKBs = new int[] {10};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.ABSOLUTE, false, null);
+ }
+
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ /*
+ * Case 2. ALL_SHARDS delete, with RETURNING, covering index
+ * 100 rows will be deleted. 200 key-reads will be performed
+ */
+ query = "delete from testTable where state = \"CA\" returning id";
+ expReadKB = getExpReadKB(true /* keyOnly */, recordKB,
+ 0 /* numReadRows */,
+ 200/*numReadKeys*/);
+ expCnt = 100;
+ maxReadKBs = new int[] {10};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, true /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.ABSOLUTE, false, null);
+ }
+
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ /*
+ * Case 3 ALL_SHARDS delete, with RETURNING, non-covering index
+ * 100 rows will be deleted. 200 key-reads will be performed
+ */
+ query = "delete from testTable where state = \"CA\" " +
+ "returning sid, id, name";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ 100 /* numReadRows */,
+ 200/*numReadKeys*/);
+ expCnt = 100;
+ maxReadKBs = new int[] {10};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.ABSOLUTE, false, null);
+ }
+
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ /*
+ * Case 4. ALL_SHARDS delete, without RETURNING, non-covering index
+ * 100 rows will be deleted. 200 key-reads will be performed
+ */
+ query = "delete from testTable where state = \"CA\" and name != \"abc\"";
+ expReadKB = getExpReadKB(false /* keyOnly */, recordKB,
+ 100 /* numReadRows */,
+ 200/*numReadKeys*/);
+ expCnt = 1;
+ maxReadKBs = new int[] {13};
+ for (int maxReadKB : maxReadKBs) {
+ executeQuery(query, false /* keyOnly */, true/* indexScan */,
+ expCnt, expReadKB, 0 /* numLimit */, maxReadKB,
+ recordKB, Consistency.ABSOLUTE, false, null);
+ }
+ }
+
+ @Test
+ public void testInsert() {
+ final int numMajor = 1;
+ final int numPerMajor = 10;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+
+ QueryRequest req;
+ QueryResult ret;
+
+ /* Insert a new row */
+ int newRecordKB = 8;
+ String longString = genString(newRecordKB * 1024);
+ String query =
+ "insert into testTable values " +
+ "(1, 15, \"myname\", 23, \"WI\", 2500, [], \"" +
+ longString + "\")";
+
+ req = newQueryRequest();
+ req.setStatement(query);
+ ret = asyncHandle.query(req).join();
+
+ assertEquals(1, ret.getResults().size());
+
+ query = "select sid, id, name from testTable where id = 15";
+ req = newQueryRequest();
+ req.setStatement(query);
+ ret = asyncHandle.query(req).join();
+ assertEquals(1, ret.getResults().size());
+ MapValue res = ret.getResults().get(0);
+ FieldValue name = res.get("name");
+ assertEquals("myname", name.getString());
+ }
+
+ @Test
+ public void testUpdatePrepared() throws Throwable {
+ assumeKVVersion("testUpdatePrepared", 21, 3, 1);
+ final int numMajor = 1;
+ final int numPerMajor = 10;
+ final int recordKB = 2;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+ String longString = genString(1024);
+ /* Update using preparedStatement */
+ String query = "declare $sval string; $sid integer; $id integer;" +
+ "update testTable set longString = $sval " +
+ "where sid = $sid and id = $id returning sid";
+ PrepareRequest prepReq = new PrepareRequest()
+ .setStatement(query);
+
+ final AtomicReference req = new AtomicReference<>();
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicReference ex = new AtomicReference<>();
+ asyncHandle.prepare(prepReq)
+ .thenCompose(prepRet -> {
+ assertNotNull(prepRet.getPreparedStatement());
+ prepRet.getPreparedStatement()
+ .setVariable("$sval", new StringValue(longString))
+ .setVariable("$sid", new IntegerValue(0))
+ .setVariable("$id", new IntegerValue(1));
+ req.set(newQueryRequest());
+ req.get().setPreparedStatement(prepRet);
+ return asyncHandle.query(req.get());
+ })
+ .thenApply(res -> {
+ assertNotNull(res.getResults());
+ return res;
+ })
+ .whenComplete((res, err) -> {
+ if (err != null) {
+ ex.set(err);
+ }
+ req.get().close();
+ latch.countDown();
+ });
+ latch.await();
+ if (ex.get() != null) {
+ throw ex.get();
+ }
+ }
+
+ @Test
+ public void testPreparedLongRunning() {
+ final int numMajor = 1;
+ final int numPerMajor = 10;
+ final int recordKB = 2;
+
+ /* This test is only run in specific configurations */
+ assumeTrue(Boolean.getBoolean("test.longrunning"));
+
+ /* Load rows to table */
+ verbose("Loading rows into table...");
+ loadRowsToScanTable(numMajor, numPerMajor, recordKB);
+ verbose("Loaded all rows");
+ String longString = genString(1024);
+
+ /* Update using preparedStatement */
+ String query = "declare $sval string; $sid integer; $id integer;" +
+ "update testTable set longString = $sval " +
+ "where sid = $sid and id = $id returning sid";
+ PrepareRequest prepReq = new PrepareRequest()
+ .setStatement(query);
+ PrepareResult prepRet = asyncHandle.prepare(prepReq).join();
+
+ PreparedStatement ps = prepRet.getPreparedStatement();
+ assertNotNull(ps);
+
+ int total=0;
+ int passed=0;
+ int exceptions=0;
+ int timeouts=0;
+ int nullResults=0;
+ boolean lastPassed = false;
+
+ long runMs = Long.getLong("test.runms", 100000);
+ long delayMs = Long.getLong("test.delayms", 100);
+
+ /* run for N milliseconds, with M milliseconds delay between queries */
+ long startMs = System.currentTimeMillis();
+ while (true) {
+ lastPassed = false;
+ ps.setVariable("$sval", new StringValue(longString))
+ .setVariable("$sid", new IntegerValue(0))
+ .setVariable("$id", new IntegerValue(1));
+
+ try (QueryRequest req = newQueryRequest()) {
+ req.setPreparedStatement(prepRet);
+ total++;
+ verbose("Running query #" + total + "...");
+ QueryResult res = ConcurrentUtil.awaitFuture(
+ asyncHandle.query(req));
+ if (res == null) {
+ verbose(" got null result");
+ nullResults++;
+ } else {
+ passed++;
+ lastPassed = true;
+ }
+ } catch (RequestTimeoutException rte) {
+ /* timeouts are (possibly) expected */
+ timeouts++;
+ verbose(" got request timeout");
+ } catch (Exception e) {
+ exceptions++;
+ verbose(" got exception: " + e);
+ }
+ if ((System.currentTimeMillis() - startMs) > runMs) {
+ break;
+ }
+ try {
+ verbose("Sleeping for " + delayMs + "ms...");
+ Thread.sleep(delayMs);
+ } catch (Exception unused) {}
+ }
+ verbose("Finished: total=" + total + ", pass=" + passed +
+ ", timeouts=" + timeouts + ", exceptions=" + exceptions +
+ ", nullResults=" + nullResults);
+ assertEquals("Unexpected number of exceptions. Expected zero, got " +
+ exceptions, 0, exceptions);
+ assertEquals("Unexpected number of null results. Expected zero, got " +
+ nullResults, 0, nullResults);
+ assertTrue("Expected last request to pass, but it failed", lastPassed);
+ }
+
+ /**
+ * Returns the estimated readKB.
+ */
+ private int getExpReadKB(boolean keyOnly,
+ int recordKB,
+ int numReadRows,
+ int numReadKeys) {
+ final int minRead = 1;
+ int readKB = numReadKeys * minRead;
+ if (!keyOnly) {
+ readKB += numReadRows * recordKB;
+ }
+ return readKB == 0 ? minRead : readKB;
+ }
+
+ /*
+ * Test illegal cases -- both prepared statement and string
+ */
+ @Test
+ public void testIllegalQuery() {
+
+ PrepareRequest prepReq;
+ QueryRequest queryReq;
+ String query;
+
+ final String queryWithVariables =
+ "declare $sid integer; $id integer;" +
+ "select name from testTable where sid = $sid and id >= $id";
+
+ /* Syntax error */
+ prepReq = new PrepareRequest().setStatement("random string");
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.prepare(prepReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException iae) {}
+
+ queryReq = newQueryRequest();
+ queryReq.setStatement("random string");
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException iae) {}
+
+ /* Try a query that requires external variables that are missing */
+ queryReq = newQueryRequest();
+ queryReq.setStatement(queryWithVariables);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ prepReq = new PrepareRequest().setStatement(queryWithVariables);
+ PrepareResult prepRes = handle.prepare(prepReq);
+ queryReq = newQueryRequest();
+ queryReq.setPreparedStatement(prepRes);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /* Wrong name of variables */
+ prepReq = new PrepareRequest().setStatement(queryWithVariables);
+ prepRes = handle.prepare(prepReq);
+ PreparedStatement prepStmt = prepRes.getPreparedStatement();
+ prepStmt.setVariable("sid", new IntegerValue(9));
+ prepStmt.setVariable("id", new IntegerValue(3));
+ queryReq = newQueryRequest();
+ queryReq.setPreparedStatement(prepRes);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException ex) {
+ }
+
+ /* Wrong type for variables */
+ prepReq = new PrepareRequest().setStatement(queryWithVariables);
+ prepRes = handle.prepare(prepReq);
+ prepStmt = prepRes.getPreparedStatement();
+ prepStmt.setVariable("$sid", new DoubleValue(9.1d));
+ prepStmt.setVariable("$id", new IntegerValue(3));
+ queryReq = newQueryRequest();
+ queryReq.setPreparedStatement(prepRes);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /* Table not found */
+ query = "select * from invalidTable";
+ prepReq = new PrepareRequest().setStatement(query);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.prepare(prepReq));
+ fail("prepare should have failed");
+ } catch (TableNotFoundException tnfe) {
+ }
+
+ queryReq = newQueryRequest();
+ queryReq.setStatement(query);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (TableNotFoundException tnfe) {
+ }
+
+ /* Invalid column */
+ query = "select * from testTable where invalidColumn = 1";
+ prepReq = new PrepareRequest().setStatement(query);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.prepare(prepReq));
+ fail("prepare should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ queryReq = newQueryRequest();
+ queryReq.setStatement(query);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException tnfe) {
+ }
+
+ /* Prepare or execute Ddl statement */
+ query = "create table t1(id integer, name string, primary key(id))";
+ prepReq = new PrepareRequest().setStatement(query);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.prepare(prepReq));
+ fail("prepare should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ queryReq = newQueryRequest();
+ queryReq.setStatement(query);
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("query should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ queryReq = newQueryRequest();
+ queryReq.setStatement(query);
+ try {
+ queryReq.setLimit(-1);
+ ConcurrentUtil.awaitFuture(asyncHandle.query(queryReq));
+ fail("QueryRequest.setLimit() should fail with IAE");
+ } catch (IllegalArgumentException iae) {
+ }
+ queryReq.setLimit(0);
+
+ try {
+ queryReq.setMaxReadKB(-1);
+ fail("QueryRequest.setMaxReadKB() should fail with IAE");
+ } catch (IllegalArgumentException iae) {
+ }
+
+
+ /*
+ * Namespaces, child tables and identity columns are not
+ * yet supported
+ */
+ String statement =
+ "create table ns:foo(id integer, primary key(id))";
+ try {
+ tableOperationAsync(asyncHandle, statement,
+ new TableLimits(10, 10, 10)).join();
+ fail("Namespaces not supported in table names");
+ } catch (Exception e) {
+ assertTrue(e.getMessage().toLowerCase().contains("namespace"));
+ }
+
+ statement = "drop table ns:foo";
+ try {
+ tableOperationAsync(asyncHandle, statement,
+ new TableLimits(10, 10, 10)).join();
+ fail("Namespaces not supported in table names");
+ } catch (Exception e) {
+ if (onprem) {
+ assertTrue(e.getCause() instanceof TableNotFoundException);
+ } else {
+ assertTrue(e.getMessage().toLowerCase()
+ .contains("namespace"));
+ }
+ }
+
+ statement = "select * from ns:foo";
+ try {
+ executeQuery(statement, null, 0, 0, false);
+ fail("Query with namespaced table not supported");
+ } catch (Throwable t) {
+ if (onprem) {
+ assertTrue(t instanceof TableNotFoundException);
+ } else {
+ assertTrue(t.getMessage().toLowerCase()
+ .contains("namespace"));
+ }
+ }
+
+ statement = "create namespace myns";
+ try {
+ tableOperationAsync(asyncHandle, statement,
+ new TableLimits(10, 10, 10)).join();
+ if (!onprem) {
+ fail("Creating namespaces not supported");
+ }
+ } catch (Exception e) {
+ assertTrue(e.getMessage().toLowerCase().contains("namespace"));
+ }
+
+ statement = "drop namespace myns";
+ try {
+ tableOperationAsync(asyncHandle, statement,
+ new TableLimits(10, 10, 10)).join();
+ if (!onprem) {
+ fail("Dropping namespaces not supported");
+ }
+ } catch (Exception e) {
+ assertTrue(e.getMessage().toLowerCase().contains("namespace"));
+ }
+
+ statement = "create table a.foo(id integer, primary key(id))";
+ try {
+ tableOperationAsync(asyncHandle, statement,
+ new TableLimits(10, 10, 10)).join();
+ fail("Child tables not supported in table names");
+ } catch (Exception e) {
+ assertTrue((e.getCause() instanceof TableNotFoundException) ||
+ (e.getCause() instanceof IllegalArgumentException));
+ }
+ }
+
+ @Test
+ public void testJson() {
+ final String[] jsonRecords = {
+ "{" +
+ " \"id\":0," +
+ " \"info\":" +
+ " {" +
+ " \"firstName\":\"first0\", \"lastName\":\"last0\",\"age\":10," +
+ " \"address\":" +
+ " {" +
+ " \"city\": \"San Fransisco\"," +
+ " \"state\" : \"CA\"," +
+ " \"phones\" : [" +
+ " { \"areacode\" : 408, \"number\" : 50," +
+ " \"kind\" : \"home\" }," +
+ " { \"areacode\" : 650, \"number\" : 51," +
+ " \"kind\" : \"work\" }," +
+ " \"650-234-4556\"," +
+ " 650234455" +
+ " ]" +
+ " }," +
+ " \"children\":" +
+ " {" +
+ " \"Anna\" : { \"age\" : 10, \"school\" : \"sch_1\"," +
+ " \"friends\" : [\"Anna\", \"John\", \"Maria\"]}," +
+ " \"Lisa\" : { \"age\" : 12, \"friends\" : [\"Ada\"]}" +
+ " }" +
+ " }" +
+ "}",
+
+ "{" +
+ " \"id\":1," +
+ " \"info\":" +
+ " {" +
+ " \"firstName\":\"first1\", \"lastName\":\"last1\",\"age\":11," +
+ " \"address\":" +
+ " {" +
+ " \"city\" : \"Boston\"," +
+ " \"state\" : \"MA\"," +
+ " \"phones\" : [ { \"areacode\" : 304, \"number\" : 30," +
+ " \"kind\" : \"work\" }," +
+ " { \"areacode\" : 318, \"number\" : 31," +
+ " \"kind\" : \"work\" }," +
+ " { \"areacode\" : 400, \"number\" : 41," +
+ " \"kind\" : \"home\" }]" +
+ " }," +
+ " \"children\":" +
+ " {" +
+ " \"Anna\" : { \"age\" : 9, \"school\" : \"sch_1\"," +
+ " \"friends\" : [\"Bobby\", \"John\", null]}," +
+ " \"Mark\" : { \"age\" : 4, \"school\" : \"sch_1\"," +
+ " \"friends\" : [\"George\"]}," +
+ " \"Dave\" : { \"age\" : 15, \"school\" : \"sch_3\"," +
+ " \"friends\" : [\"Bill\", \"Sam\"]}" +
+ " }" +
+ " }" +
+ "}",
+
+ "{" +
+ " \"id\":2," +
+ " \"info\":" +
+ " {" +
+ " \"firstName\":\"first2\", \"lastName\":\"last2\",\"age\":12," +
+ " \"address\":" +
+ " {" +
+ " \"city\" : \"Portland\"," +
+ " \"state\" : \"OR\"," +
+ " \"phones\" : [ { \"areacode\" : 104, \"number\" : 10," +
+ " \"kind\" : \"home\" }," +
+ " { \"areacode\" : 118, \"number\" : 11," +
+ " \"kind\" : \"work\" } ]" +
+ " }," +
+ " \"children\":" +
+ " {" +
+ " }" +
+ " }" +
+ "}",
+
+ "{ " +
+ " \"id\":3," +
+ " \"info\":" +
+ " {" +
+ " \"firstName\":\"first3\", \"lastName\":\"last3\",\"age\":13," +
+ " \"address\":" +
+ " {" +
+ " \"city\" : \"Seattle\"," +
+ " \"state\" : \"WA\"," +
+ " \"phones\" : null" +
+ " }," +
+ " \"children\":" +
+ " {" +
+ " \"George\" : { \"age\" : 7, \"school\" : \"sch_2\"," +
+ " \"friends\" : [\"Bill\", \"Mark\"]}," +
+ " \"Matt\" : { \"age\" : 14, \"school\" : \"sch_2\"," +
+ " \"friends\" : [\"Bill\"]}" +
+ " }" +
+ " }" +
+ "}"
+ };
+
+ String query;
+ Map bindValues = new HashMap();
+
+ tableOperationAsync(asyncHandle, createJsonTableDDL,
+ new TableLimits(15000, 15000, 50)).join();
+
+ loadRowsToTable(jsonTable, jsonRecords);
+
+ /* Basic query on a table with JSON field */
+ query = "select id, f.info from jsonTable f";
+ executeQuery(query, null, 4, 0, false /* usePrepStmt */);
+
+ /* Test JsonNull */
+ query = "select id from jsonTable f where f.info.address.phones = null";
+ executeQuery(query, null, 1, 0, false /* usePrepStmt */);
+
+ /* Bind JsonNull value */
+ query = "declare $phones json;" +
+ "select id, f.info.address.phones " +
+ "from jsonTable f " +
+ "where f.info.address.phones != $phones";
+ bindValues.put("$phones", JsonNullValue.getInstance());
+ executeQuery(query, bindValues, 3, 0, true /* usePrepStmt */);
+
+ /* Bind 2 String values */
+ query = "declare $city string;$name string;" +
+ "select id, f.info.address.city, f.info.children.keys() " +
+ "from jsonTable f " +
+ "where f.info.address.city = $city and " +
+ " not f.info.children.keys() =any $name";
+ bindValues.clear();
+ bindValues.put("$city", new StringValue("Portland"));
+ bindValues.put("$name", new StringValue("John"));
+ executeQuery(query, bindValues, 1, 0, true /* usePrepStmt */);
+
+ /* Bind MapValue */
+ query = "declare $child json;" +
+ "select id, f.info.children.values() " +
+ "from jsonTable f " +
+ "where f.info.children.values() =any $child";
+ String json = "{\"age\":14, \"school\":\"sch_2\", " +
+ " \"friends\":[\"Bill\"]}";
+ bindValues.clear();
+ bindValues.put("$child", JsonUtils.createValueFromJson(json, null));
+ executeQuery(query, bindValues, 1, 0, true /* usePrepStmt */);
+
+ /* Bind ArrayValue */
+ query = "declare $friends json;" +
+ "select id, f.info.children.values() " +
+ "from jsonTable f " +
+ "where f.info.children.values().friends =any $friends";
+
+ ArrayValue friends = new ArrayValue();
+ friends.add("Bill");
+ friends.add("Mark");
+ bindValues.clear();
+ bindValues.put("$friends", friends);
+ executeQuery(query, bindValues, 1, 0, true /* usePrepStmt */);
+ }
+
+ @Test
+ public void testPrepare() {
+ String query;
+ PrepareRequest req;
+ PrepareResult ret;
+
+ query = "select * from testTable";
+ req = new PrepareRequest().setStatement(query);
+ ret = asyncHandle.prepare(req).join();
+ if (!onprem) {
+ assertEquals(ret.getReadKB(), getMinQueryCost());
+ assertEquals(0, ret.getWriteKB());
+ }
+
+ query = "declare $sval string; $sid integer; $id integer;" +
+ "update testTable set longString = $sval " +
+ "where sid = $sid and id = $id";
+ req = new PrepareRequest().setStatement(query);
+ ret = asyncHandle.prepare(req).join();
+ if (!onprem) {
+ assertEquals(ret.getReadKB(), getMinQueryCost());
+ assertEquals(0, ret.getWriteKB());
+ }
+ }
+
+ /**
+ * Prepare a query, use it, evolve table, try again.
+ */
+ @Test
+ public void testEvolution() {
+
+ /* Load rows to table */
+ loadRowsToScanTable(1, 10, 2);
+ String query = "select age from testTable";
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = asyncHandle.prepare(prepReq).join();
+ assertNotNull(prepRet.getPreparedStatement());
+
+ try(QueryRequest qreq = newQueryRequest()) {
+ qreq.setPreparedStatement(prepRet);
+ QueryResult qres = asyncHandle.query(qreq).join();
+ assertEquals(10, qres.getResults().size());
+
+ /*
+ * Evolve and try the query again. It will fail because table schema
+ * has been changed, the query need to be prepared again.
+ *
+ * The exception caught from query may vary depending on the
+ * different proxy and KV version. PrepareQueryException will be
+ * thrown for proxy(KVClient 25.1.1) + KV server 25.1.1 or higher.
+ * Otherwise, IllegalArgumentException will be thrown.
+ */
+ tableOperationAsync(asyncHandle,
+ "alter table testTable(drop age)", null).join();
+
+ try {
+ ConcurrentUtil.awaitFuture(asyncHandle.query(qreq));
+ fail("Query should have failed");
+ } catch (PrepareQueryException | IllegalArgumentException ex) {
+ /*
+ * If can't determine the versions of KV client and server, skip
+ * checking the specific exception.
+ */
+ if (getMinimumKVVersion() > 0) {
+ if (checkKVVersion(25, 1, 1)) {
+ assertTrue(ex instanceof PrepareQueryException);
+ } else {
+ assertTrue(ex instanceof IllegalArgumentException);
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testIdentityAndUUID() {
+ String idName = "testSG";
+ String uuidName = "testUUID";
+ String createTableId =
+ "CREATE TABLE " + idName +
+ "(id INTEGER GENERATED ALWAYS AS IDENTITY, " +
+ "name STRING, " +
+ "PRIMARY KEY(id))";
+ String createTableUUID =
+ "CREATE TABLE " + uuidName +
+ "(id STRING AS UUID GENERATED BY DEFAULT, " +
+ "name STRING, " +
+ "PRIMARY KEY(id))";
+
+ tableOperationAsync(asyncHandle, createTableId,
+ new TableLimits(100, 100, 1)).join();
+
+ /*
+ * Putting a row with a value for "id" should fail because always
+ * generated identity column should not has value.
+ */
+ MapValue value = new MapValue().put("id", 100).put("name", "abc");
+ PutRequest putReq = new PutRequest().setTableName(idName);
+ try {
+ putReq.setValue(value);
+ ConcurrentUtil.awaitFuture(asyncHandle.put(putReq));
+ fail("Expected IAE; a generated always identity " +
+ "column should not have a value");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /*
+ * Putting a row without "id" field should succeed.
+ */
+ value = new MapValue().put("name", "abc");
+ putReq.setValue(value);
+ PutResult putRet = asyncHandle.put(putReq).join();
+ assertNotNull(putRet.getVersion());
+ assertNotNull(putRet.getGeneratedValue());
+
+ if (!checkKVVersion(20, 3, 1)) {
+ return;
+ }
+
+ tableOperationAsync(asyncHandle, createTableUUID,
+ new TableLimits(100, 100, 1)).join();
+
+ /*
+ * Now the UUID table
+ */
+ value = new MapValue().put("id", "abcde").put("name", "abc");
+ putReq = new PutRequest().setTableName(uuidName);
+ try {
+ putReq.setValue(value);
+ ConcurrentUtil.awaitFuture(asyncHandle.put(putReq));
+ fail("Expected IAE; the uuid value set was not a uuid");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /*
+ * Putting a row without "id" field should succeed.
+ */
+ value = new MapValue().put("name", "abc");
+ putReq.setValue(value);
+ putRet = asyncHandle.put(putReq).join();
+ assertNotNull(putRet.getVersion());
+ assertNotNull(putRet.getGeneratedValue());
+ }
+
+ @Test
+ public void testQueryOrder() {
+
+ final String[] declOrder = {
+ "sid", "id", "name", "age", "state","salary", "array", "longString"
+ };
+
+ /* Load rows to table */
+ loadRowsToScanTable(10, 10, 1);
+
+ try (QueryRequest queryReq = newQueryRequest()) {
+ queryReq.setStatement(
+ "select * from testTable where id = 1 and sid = 1");
+
+ QueryResult queryRes = asyncHandle.query(queryReq).join();
+
+ /*
+ * For each result, assert that the fields are all there and in the
+ * expected order.
+ */
+ for (MapValue v : queryRes.getResults()) {
+ assertEquals(declOrder.length, v.size());
+ int i = 0;
+ for (Map.Entry entry : v.entrySet()) {
+ assertEquals(declOrder[i++], entry.getKey());
+ }
+
+ /* perform a get and validate that it also is in decl order */
+ GetRequest getReq = new GetRequest()
+ .setTableName(tableName)
+ .setKey(v);
+ GetResult getRes = asyncHandle.get(getReq).join();
+ i = 0;
+ for (Map.Entry entry :
+ getRes.getValue().entrySet()) {
+ assertEquals(declOrder[i++], entry.getKey());
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testLowThroughput() {
+ if (onprem == false) {
+ assumeKVVersion("testLowThroughput", 21, 3, 1);
+ }
+ final int numRows = 30;
+ String name = "testThroughput";
+ String createTableDdl =
+ "CREATE TABLE " + name +
+ "(id INTEGER, bin binary, json json, primary key(id))";
+
+ tableOperationAsync(asyncHandle, createTableDdl,
+ new TableLimits(2, 20000, 1)).join();
+
+ MapValue value = new MapValue()
+ .put("bin", new byte[3000])
+ .put("json", "abc");
+ PutRequest putReq = new PutRequest().setTableName(name);
+
+ /* add rows */
+ for (int i = 0; i < numRows; i++) {
+ value.put("id", i);
+ putReq.setValue(value);
+ PutResult putRet = asyncHandle.put(putReq).join();
+ assertNotNull(putRet.getVersion());
+ }
+
+ /*
+ * Ensure that this query completes.
+ * 30 rows of 3K+ each = ~90KB.
+ * at 2RUs/sec, that's about 45 seconds.
+ */
+ final String stmt = "select * from " + name;
+ executeQuery(stmt, null, numRows, 0, false);
+ }
+
+ /*
+ * Tests that a query with a V2 sort (geo_near) can operate against
+ * query versions 2 and 3
+ */
+ @Test
+ public void testQueryCompat() {
+ final String geoTable = "create table points (id integer, " +
+ "info json, primary key(id))";
+ final String geoIndex =
+ "create index idx_ptn on points(info.point as point)";
+ final String geoQuery =
+ "select id from points p " +
+ "where geo_near(p.info.point, " +
+ "{ \"type\" : \"point\", \"coordinates\" : [24.0175, 35.5156 ]}," +
+ "5000)";
+
+ TableResult tres = tableOperationAsync(asyncHandle, geoTable,
+ new TableLimits(4, 1, 1)).join();
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ tres = tableOperationAsync(asyncHandle, geoIndex, null).join();
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ PrepareRequest prepReq = new PrepareRequest().setStatement(geoQuery);
+ PrepareResult prepRet = asyncHandle.prepare(prepReq).join();
+ assertNotNull(prepRet.getPreparedStatement());
+ }
+
+ /*
+ * Test use of large query strings for insert/update/upsert
+ */
+ @Test
+ public void testLargeQueryStrings() {
+ if (onprem) {
+ assumeKVVersion("testLargeQueryStrings", 20, 1, 1);
+ } else {
+ assumeKVVersion("testLargeQueryStrings", 21, 3, 1);
+ }
+ final String tableName = "LargeQuery";
+ final String createTable = "create table " + tableName +
+ "(id integer, data json, primary key(id))";
+ final int[] stringSizes = {10, 500, 5000, 20000, 500000};
+
+ tableOperationAsync(asyncHandle, createTable,
+ new TableLimits(4, 1000, 1000)).join();
+ /* create a large JSON data string */
+ for (int size : stringSizes) {
+ String data = createLargeJson(size);
+ String iquery = "insert into " + tableName + " values(1," +
+ data + ") returning id";
+ String uquery = "update " + tableName + " t " +
+ "set t.data = " + data + "where id = 1 returning id";
+
+ /* insert, then update */
+ QueryRequest req = newQueryRequest();
+ req.setStatement(iquery);
+ QueryResult res = asyncHandle.query(req).join();
+ assertEquals(1, res.getResults().get(0).get("id").getInt());
+ req = newQueryRequest();
+ req.setStatement(uquery);
+ res = asyncHandle.query(req).join();
+ assertEquals(1, res.getResults().get(0).get("id").getInt());
+ }
+
+ /* validate that select fails */
+ final String squery = "select * from " + tableName +
+ " t where t.data.data = " + genString(15000);
+
+ try (QueryRequest req = newQueryRequest()) {
+ req.setStatement(squery);
+ ConcurrentUtil.awaitFuture(asyncHandle.query(req));
+ fail("Query should have failed");
+ } catch (IllegalArgumentException iae) {
+ /* success */
+ }
+ }
+
+ @Test
+ public void testBindArrayValue() {
+ if (!arrayAsRecordSupported) {
+ return;
+ }
+ assumeKVVersion("testBindArrayValue", 20, 3, 1);
+ final String tableName = "testBindArrayValue";
+ final String createTable = "create table if not exists " + tableName +
+ "(id integer, " +
+ "info record(name string, age integer, " +
+ "address record(street string, room integer)), " +
+ "primary key(id))";
+
+ tableOperationAsync(asyncHandle, createTable,
+ new TableLimits(100, 100, 1)).join();
+
+ String stmt = "declare $id integer;" +
+ "$info record(name string, age integer, " +
+ "address record(street string, " +
+ "room integer));" +
+ "upsert into " + tableName + " values($id, $info)";
+ PrepareRequest prepReq = new PrepareRequest().setStatement(stmt);
+ PrepareResult prepRet = asyncHandle.prepare(prepReq).join();
+ PreparedStatement pstmt = prepRet.getPreparedStatement();
+
+ MapValue mapVal;
+ int id = 0;
+
+ /* Case1: all fields are specified with non-null value */
+ ArrayValue adVal = new ArrayValue()
+ .add("35 Network drive")
+ .add(203);
+ ArrayValue arrVal = new ArrayValue()
+ .add("Jack Wang")
+ .add(40)
+ .add(adVal);
+ mapVal = new MapValue()
+ .put("name", arrVal.get(0))
+ .put("age", arrVal.get(1))
+ .put("address",
+ new MapValue().put("street", adVal.get(0))
+ .put("room", adVal.get(1)));
+ execInsertAndCheckInfo(pstmt, ++id, arrVal, tableName, mapVal);
+
+ /* Case2: address = NULL*/
+ arrVal = new ArrayValue()
+ .add("Jack Wang")
+ .add(40)
+ .add(NullValue.getInstance());
+ mapVal = new MapValue()
+ .put("name", arrVal.get(0))
+ .put("age", arrVal.get(1))
+ .put("address", NullValue.getInstance());
+ execInsertAndCheckInfo(pstmt, ++id, arrVal, tableName, mapVal);
+
+ /*
+ * Case3: age = "40" and address.room = "203" which are castable to
+ * integer
+ */
+ adVal = new ArrayValue()
+ .add("35 Network drive")
+ .add("203");
+ arrVal = new ArrayValue()
+ .add("Jack Wang")
+ .add("40")
+ .add(adVal);
+ mapVal = new MapValue()
+ .put("name", arrVal.get(0))
+ .put("age", 40)
+ .put("address",
+ new MapValue().put("street", adVal.get(0))
+ .put("room", 203));
+ execInsertAndCheckInfo(pstmt, ++id, arrVal, tableName, mapVal);
+
+ /*
+ * Negative cases
+ */
+ /* info.name: Type mismatch on input. Expected STRING, got INTEGER */
+ arrVal = new ArrayValue()
+ .add(40)
+ .add("Jack Wang")
+ .add(NullValue.getInstance());
+ pstmt.setVariable("$id", new IntegerValue(id));
+ pstmt.setVariable("$info", arrVal);
+
+ try (QueryRequest req = newQueryRequest()) {
+ req.setPreparedStatement(pstmt);
+ handle.query(req);
+ fail("Expected IAE");
+ } catch (IllegalArgumentException ex) {
+ }
+
+ /*
+ * Invalid Array value for Record Value, it has 1 element but
+ * the Record Value contains 3 fields
+ */
+ arrVal = new ArrayValue()
+ .add("Jack Wang");
+ pstmt.setVariable("$id", new IntegerValue(id));
+ pstmt.setVariable("$info", arrVal);
+
+ try (QueryRequest req = newQueryRequest()) {
+ req.setPreparedStatement(pstmt);
+ ConcurrentUtil.awaitFuture(asyncHandle.query(req));
+ fail("Expected IAE");
+ } catch(IllegalArgumentException ex) {
+ }
+ }
+
+ @Test
+ public void testUsabilityQueryPaginator() throws Exception {
+ /* Load rows to table */
+ loadRowsToScanTable(3, 2, 1);
+
+ QueryRequest qreq = newQueryRequest();
+ qreq.setStatement("select * from testTable").setLimit(3);
+
+ final QueryPaginatorResult pageResult = asyncHandle.queryPaginator(qreq);
+ Flow.Publisher> publisher = pageResult.getResults();
+ TestSubscriber subscriber = new TestSubscriber();
+ publisher.subscribe(subscriber);
+ List items = subscriber.awaitItems(1, TimeUnit.SECONDS);
+ assertEquals(6, items.size());
+ items.forEach(Assert::assertNotNull);
+ assertTrue(qreq.isDone());
+ }
+
+ @Test
+ public void testQueryPaginator() throws Exception {
+ final int numMajor = 10;
+ final int numPerMajor = 1;
+
+ /* Load rows to table */
+ loadRowsToScanTable(numMajor, numPerMajor, 1);
+
+ /* check simple queries */
+ checkQueryPaginatorUnordered("select * from testTable");
+
+ /* check non-simple queries, ie. group by, order by */
+ checkQueryPaginatorUnordered("select sid, count(*) from testTable " +
+ "group by sid");
+
+ checkQueryPaginatorOrdered("select * from testTable order by sid, " +
+ "id");
+ checkQueryPaginatorOrdered("select sid, count(*) from testTable group" +
+ " by sid order by sid");
+ }
+
+ /**
+ * Runs query twice and checks if results from queryIterable iterator are
+ * the same as regular query() results.
+ */
+ private void checkQueryPaginatorUnordered(String query) throws Exception {
+
+ QueryResultDTO qiDTO = new QueryResultDTO();
+
+ try (QueryRequest qreq = newQueryRequest()) {
+ qreq.setStatement(query).setLimit(3);
+ processQuery(qreq, 0 /* limit */, qiDTO).join();
+ }
+
+ QueryRequest qreq = newQueryRequest();
+ qreq.setStatement(query).setLimit(3);
+
+ QueryPaginatorResult qipage = asyncHandle.queryPaginator(qreq);
+ TestSubscriber subscriber = new TestSubscriber();
+ qipage.getResults().subscribe(subscriber);
+ subscriber.awaitItems(5, TimeUnit.SECONDS);
+
+ Set expectedSet = Set.copyOf(qiDTO.items);
+ Set actualSet = Set.copyOf(subscriber.items);
+
+ try {
+ assertEquals(expectedSet.size(), actualSet.size());
+ } catch (Throwable ex) {
+ for (MapValue r : actualSet) {
+ if (!expectedSet.contains(r)) {
+ System.out.println("Fail: actual row not " +
+ "expected: " + r);
+ }
+ }
+ for (MapValue r : expectedSet) {
+ if (!actualSet.contains(r)) {
+ System.out.println("Fail: expected row not found: " + r);
+ }
+ }
+ fail("rows not matching: expected size: " +
+ expectedSet.size() + " actual size: " + actualSet.size());
+ }
+
+ assertEquals(expectedSet, actualSet);
+
+ assertEquals(qiDTO.totalRateLimitDelay.intValue(),
+ qipage.getRateLimitDelayedMs());
+ assertEquals(qiDTO.readKB.intValue(), qipage.getReadKB());
+ assertEquals(qiDTO.writeKB.intValue(), qipage.getWriteKB());
+ assertEquals(qiDTO.readUnits.intValue(), qipage.getReadUnits());
+ assertEquals(qiDTO.writeUnits.intValue(), qipage.getWriteUnits());
+ assertEquals(qiDTO.totalRetryStats, qipage.getRetryStats());
+ }
+
+ /**
+ * Runs query twice and checks if results from queryIterable iterator are
+ * the same as regular query() results.
+ */
+ private void checkQueryPaginatorOrdered(String query) throws Exception {
+ QueryRequest qipageReq = newQueryRequest();
+ qipageReq.setStatement(query);
+ QueryResultDTO dto = new QueryResultDTO();
+
+ try (QueryRequest qreq = newQueryRequest()) {
+ qreq.setStatement(query);
+ processQuery(qreq, 0 /* limit */, dto).join();
+ }
+
+ QueryPaginatorResult qipage = asyncHandle.queryPaginator(qipageReq);
+ TestSubscriber subscriber = new TestSubscriber();
+ qipage.getResults().subscribe(subscriber);
+ subscriber.awaitItems(5, TimeUnit.SECONDS);
+
+ List expectedRows = dto.items;
+ List actualRows = subscriber.items;
+
+ try {
+ assertEquals(expectedRows.size(), actualRows.size());
+ } catch (Throwable ex) {
+ for (MapValue r : actualRows) {
+ if (!expectedRows.contains(r)) {
+ System.out.println("Fail: actual row not " +
+ "expected: " + r);
+ }
+ }
+ for (MapValue r : expectedRows) {
+ if (!actualRows.contains(r)) {
+ System.out.println("Fail: expected row not found: " + r);
+ }
+ }
+ fail("rows not matching: expected size: " +
+ expectedRows.size() + " actual size: " + actualRows.size());
+ }
+ assertEquals(dto.readKB.intValue(), qipage.getReadKB());
+ assertEquals(dto.writeKB.intValue(), qipage.getWriteKB());
+ assertEquals(dto.readUnits.intValue(), qipage.getReadUnits());
+ assertEquals(dto.writeUnits.intValue(), qipage.getWriteUnits());
+ assertEquals(dto.totalRetryStats, qipage.getRetryStats());
+ }
+
+ private void execInsertAndCheckInfo(PreparedStatement pstmt,
+ int id,
+ FieldValue info,
+ String tableName,
+ MapValue expInfo) {
+
+ pstmt.setVariable("$id", new IntegerValue(id));
+ pstmt.setVariable("$info", info);
+
+ try (QueryRequest req = newQueryRequest()) {
+ req.setPreparedStatement(pstmt);
+ QueryResult ret = handle.query(req);
+ assertEquals(1, ret.getResults().get(0).asMap()
+ .get("NumRowsInserted").getInt());
+ }
+
+ String stmt = "select info from " + tableName + " where id = " + id;
+ try (QueryRequest req = newQueryRequest()) {
+ req.setStatement(stmt);
+ QueryResult ret = handle.query(req);
+ assertEquals(1, ret.getResults().size());
+ assertEquals(expInfo, ret.getResults().get(0).get("info"));
+ }
+ }
+
+ private String createLargeJson(int size) {
+ MapValue map = new MapValue();
+ map.put("data", genString(size));
+ return map.toString();
+ }
+
+ private void executeQuery(String statement,
+ boolean keyOnly,
+ boolean indexScan,
+ int expNumRows,
+ int expReadKB,
+ int numLimit,
+ int sizeLimit,
+ int recordKB) {
+ executeQuery(statement, keyOnly, indexScan, expNumRows, expReadKB,
+ numLimit, sizeLimit, recordKB, Consistency.EVENTUAL,
+ false, null);
+ executeQuery(statement, keyOnly, indexScan, expNumRows, expReadKB,
+ numLimit, sizeLimit, recordKB, Consistency.ABSOLUTE,
+ false, null);
+ }
+
+ private void executeQuery(String query,
+ Map bindValues,
+ int expNumRows,
+ int maxReadKB,
+ boolean usePrepStmt) {
+ executeQuery(query, false, false, expNumRows, 0, /* expReadKB */
+ 0 /* numLimits */, maxReadKB, 0 /* recordKB */,
+ null /* consistency */, usePrepStmt, bindValues);
+ }
+ private void executeQuery(String statement,
+ boolean keyOnly,
+ boolean indexScan,
+ int expNumRows,
+ int expReadKB,
+ int numLimit,
+ int sizeLimit,
+ int recordKB,
+ Consistency consistency,
+ boolean usePrepStmt,
+ Map bindValues) {
+
+ try (final QueryRequest queryReq = newQueryRequest()) {
+ if (bindValues == null || !usePrepStmt) {
+ queryReq
+ .setStatement(statement).setLimit(numLimit)
+ .setConsistency(consistency)
+ .setMaxReadKB(sizeLimit);
+ } else {
+ PrepareRequest prepReq =
+ new PrepareRequest().setStatement(statement);
+ PrepareResult prepRes = asyncHandle.prepare(prepReq).join();
+ PreparedStatement prepStmt = prepRes.getPreparedStatement();
+ if (bindValues != null) {
+ for (Entry entry : bindValues
+ .entrySet()) {
+ prepStmt.setVariable(entry.getKey(), entry.getValue());
+ }
+ }
+ queryReq
+ .setPreparedStatement(prepStmt)
+ .setMaxReadKB(sizeLimit)
+ .setLimit(numLimit)
+ .setConsistency(consistency);
+ }
+
+ final QueryResultDTO dto = new QueryResultDTO();
+ final AtomicReference ex = new AtomicReference<>();
+ CountDownLatch latch = new CountDownLatch(1);
+ processQuery(queryReq, numLimit, dto)
+ .thenAccept( ignored -> {
+ if (showResults) {
+ System.out.println(
+ "Total ReadKB = " +dto.readKB.intValue() +
+ " Total ReadUnits = " + dto.readUnits.intValue() +
+ " Total WriteKB = " + dto.writeKB.intValue());
+ }
+
+ assertEquals("Wrong number of rows returned, expect "
+ + expNumRows + ", but get " + dto.numRows.intValue(),
+ dto.numRows.intValue(), expNumRows);
+ })
+ .whenComplete((v, err) -> {
+ if (err != null) {
+ ex.set(err.getCause());
+ }
+ latch.countDown();
+ });
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ if (ex.get() != null) {
+ throw new RuntimeException(ex.get());
+ }
+ }
+ }
+
+ private void loadRowsToScanTable(int numMajor, int numPerMajor, int nKB) {
+
+ MapValue value = new MapValue();
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+
+ String[] states = { "CA", "OR", "WA", "VT", "NY" };
+ int[] salaries = { 1000, 15000, 8000, 9000 };
+ ArrayValue[] arrays = new ArrayValue[4];
+
+ for (int i = 0; i < 4; ++i) {
+ arrays[i] = new ArrayValue(4);
+ }
+ arrays[0].add(1).add(5).add(7).add(10);
+ arrays[1].add(4).add(7).add(7).add(11);
+ arrays[2].add(3).add(8).add(17).add(21);
+ arrays[3].add(3).add(8).add(12).add(14);
+
+ int slen = (nKB - 1) * 1024;
+ /* Load rows */
+ for (int i = 0; i < numMajor; i++) {
+ value.put("sid", i);
+ for (int j = 0; j < numPerMajor; j++) {
+ value.put("id", j);
+ value.put("name", "name_" + j);
+ value.put("age", j % 10);
+ value.put("state", states[j % 5]);
+ value.put("salary", salaries[j % 4]);
+ value.put("array", arrays[j % 4]);
+ value.put("longString", genString(slen));
+ PutResult res = asyncHandle.put(putRequest).join();
+ assertNotNull("Put failed", res.getVersion());
+ }
+ }
+ }
+
+ private void loadRowsToTable(String tabName, String[] jsons) {
+
+ for (String json : jsons) {
+ MapValue value = (MapValue)JsonUtils.createValueFromJson(json, null);
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName(tabName);
+ PutResult res = asyncHandle.put(putRequest).join();
+ assertNotNull("Put failed", res.getVersion());
+ }
+ }
+
+ private static int getMinQueryCost() {
+ return MIN_QUERY_COST;
+ }
+
+ @SuppressWarnings("resource")
+ private QueryRequest newQueryRequest() {
+ return new QueryRequest().setTraceLevel(traceLevel);
+ }
+
+ CompletableFuture processQuery(QueryRequest queryRequest,
+ int numLimit,
+ QueryResultDTO dto) {
+ return asyncHandle.query(queryRequest)
+ .thenComposeAsync(queryRes -> {
+ final List results = queryRes.getResults();
+ final int cnt = results.size();
+ if (numLimit > 0) {
+ assertTrue(
+ "Unexpected number of rows returned, expect <= " +
+ numLimit + ", but get " + cnt + " rows",
+ cnt <= numLimit);
+ }
+ final int rkb = queryRes.getReadKB();
+ final int runits = queryRes.getReadUnits();
+ final int wkb = queryRes.getWriteKB();
+ final int wunits = queryRes.getWriteUnits();
+
+ if (showResults) {
+ for (int i = 0; i < results.size(); ++i) {
+ System.out.println("Result "
+ + (dto.numRows.intValue() + i) + " :");
+ System.out.println(results.get(i));
+ }
+ System.out.println("Batch " + dto.numBatches.intValue() +
+ " ReadKB=" + rkb +
+ " ReadUnits=" + runits +
+ " WriteKB=" + wkb +
+ " WriteUnits=" + wunits);
+ }
+ dto.items.addAll(results);
+ dto.numRows.add(cnt);
+ dto.readKB.add(rkb);
+ dto.readUnits.add(runits);
+ dto.writeKB.add(wkb);
+ dto.writeUnits.add(wunits);
+ dto.numBatches.add(1);
+ dto.totalRateLimitDelay.add(queryRes.getRateLimitDelayedMs());
+
+ if (queryRes.getRetryStats() != null) {
+ RetryStats stats = queryRes.getRetryStats();
+ if (dto.totalRetryStats == null) {
+ dto.totalRetryStats = stats;
+ } else {
+ dto.totalRetryStats.addStats(stats);
+ }
+ }
+ if (!queryRequest.isDone()) {
+ return processQuery(queryRequest, numLimit, dto);
+ }
+ return CompletableFuture.completedFuture(null);
+ });
+ }
+ private static class QueryResultDTO {
+ private final LongAdder numRows = new LongAdder();
+ private final LongAdder readKB = new LongAdder();
+ private final LongAdder readUnits = new LongAdder();
+ private final LongAdder writeKB = new LongAdder();
+ private final LongAdder writeUnits = new LongAdder();
+ private final LongAdder numBatches = new LongAdder();
+ private final List items = new ArrayList<>();
+ private final LongAdder totalRateLimitDelay = new LongAdder();
+ private RetryStats totalRetryStats = null;
+ }
+
+ public static class TestSubscriber implements Flow.Subscriber> {
+
+ private final List items =
+ Collections.synchronizedList(new ArrayList<>());
+ private final CompletableFuture> future =
+ new CompletableFuture<>();
+ private Flow.Subscription subscription;
+
+ // Configurable request amount (defaults to unbounded)
+ private final long initialRequest;
+
+ public TestSubscriber() {
+ this(Long.MAX_VALUE);
+ }
+
+ public TestSubscriber(long initialRequest) {
+ this.initialRequest = initialRequest;
+ }
+
+ @Override
+ public void onSubscribe(Flow.Subscription subscription) {
+ this.subscription = subscription;
+ if (initialRequest > 0) {
+ subscription.request(initialRequest);
+ }
+ }
+
+ @Override
+ public void onNext(List item) {
+ items.addAll(item);
+ }
+
+ @Override
+ public void onError(Throwable throwable) {
+ // Fail the future so the test throws the exception
+ future.completeExceptionally(throwable);
+ }
+
+ @Override
+ public void onComplete() {
+ // complete the future with the final list of items
+ future.complete(items);
+ }
+
+ /**
+ * Blocks until the stream completes and returns the list of items received.
+ * Throws an exception if the stream signaled onError.
+ */
+ List awaitItems(long timeout, TimeUnit unit)
+ throws InterruptedException, ExecutionException, TimeoutException {
+ return future.get(timeout, unit);
+ }
+
+ /**
+ * Helper to request more items manually if you didn't request Long.MAX_VALUE initially
+ */
+ void request(long n) {
+ if (subscription != null) {
+ subscription.request(n);
+ }
+ }
+ }
+}
diff --git a/driver/src/test/java/oracle/nosql/driver/BasicAsyncTest.java b/driver/src/test/java/oracle/nosql/driver/BasicAsyncTest.java
new file mode 100644
index 00000000..7da08411
--- /dev/null
+++ b/driver/src/test/java/oracle/nosql/driver/BasicAsyncTest.java
@@ -0,0 +1,202 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver;
+
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.values.MapValue;
+import org.junit.Test;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+
+import static oracle.nosql.driver.util.BinaryProtocol.V4;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class BasicAsyncTest extends ProxyTestBase {
+
+ @Test
+ public void smokeTest() {
+
+ try {
+ MapValue key = new MapValue().put("id", 10);
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+
+ /* drop a table */
+ tableOperationAsync(asyncHandle,
+ "drop table if exists testusers",
+ null)
+ .whenComplete((tres, err) -> {
+ assertNotNull(tres.getTableName());
+ assertNull(tres.getTableLimits());
+ })
+ .thenCompose(ignored -> {
+ /* drop again without if exists -- should throw */
+ return tableOperationAsync(asyncHandle,
+ "drop table testusers",
+ null)
+ .handle((tres, err) -> {
+ assertNotNull("operation should have thrown", err);
+ assertTrue(err instanceof CompletionException);
+ assertTrue("Expecting TableNotFoundException",
+ err.getCause() instanceof TableNotFoundException);
+ return null;
+ });
+ })
+ .thenCompose(ignored -> {
+ /* Create a table */
+ return tableOperationAsync(
+ asyncHandle,
+ "create table if not exists testusers(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(500, 500, 50))
+ .thenAccept(tres -> {
+ assertNotNull(tres);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ });
+ })
+ .thenCompose(ignored -> {
+ /* Create an index */
+ return tableOperationAsync(
+ asyncHandle,
+ "create index if not exists Name on testusers(name)",
+ null)
+ .thenAccept(tres -> {
+ assertNotNull(tres);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ });
+ })
+ .thenCompose(ignored -> {
+ /* list tables */
+ ListTablesRequest listTables = new ListTablesRequest();
+ return asyncHandle.listTables(listTables)
+ .thenApply(lres -> {
+ assertNotNull(lres);
+ /*
+ * the test cases don't yet clean up so there
+ * may be additional tables present, be
+ * flexible in this assertion.
+ */
+ assertTrue(lres.getTables().length >= 1);
+ assertNotNull(lres.toString());
+ return lres;
+ });
+ })
+ .thenCompose(ignored -> {
+ /* getTableUsage. It won't return much in test mode */
+ if (!onprem) {
+ TableUsageRequest gtu = new TableUsageRequest()
+ .setTableName("testusers").setLimit(2)
+ .setEndTime(System.currentTimeMillis());
+ return asyncHandle.getTableUsage(gtu)
+ .thenAccept(gtuRes -> {
+ assertNotNull(gtuRes);
+ assertNotNull(gtuRes.getUsageRecords());
+ });
+ }
+ return CompletableFuture.completedFuture(null);
+ })
+ .thenCompose(ignored -> {
+ /* PUT */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusers");
+ return asyncHandle.put(putRequest)
+ .thenAccept(res -> {
+ assertNotNull(res.getVersion());
+ assertWriteKB(res);
+ });
+ })
+ .thenCompose(ignored -> {
+ /* GET */
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName("testusers");
+
+ return asyncHandle.get(getRequest)
+ .whenComplete((gres, err) -> {
+ assertNotNull(gres);
+ assertNotNull(gres.getJsonValue());
+ assertEquals("jane",
+ gres.getValue().getString("name"));
+ assertReadKB(gres);
+ });
+ })
+ .thenCompose(ignored -> {
+ /* DELETE */
+ DeleteRequest deleteRequest = new DeleteRequest()
+ .setKey(key)
+ .setTableName("testusers")
+ .setReturnRow(true);
+ return asyncHandle.delete(deleteRequest)
+ .whenComplete((dres, err) -> {
+ assertNotNull(dres);
+ assertTrue(dres.getSuccess());
+ assertWriteKB(dres);
+ if (proxySerialVersion <= V4) {
+ assertNull(dres.getExistingVersion());
+ } else {
+ assertEquals(value, dres.getExistingValue());
+ }
+ });
+ })
+ .thenCompose(ignored -> {
+ /* GET -- no row, it was removed above */
+ GetRequest getRequest = new GetRequest()
+ .setTableName("testusers")
+ .setKey(key);
+ return asyncHandle.get(getRequest)
+ .whenComplete((gres, err) -> {
+ assertNotNull(gres);
+ assertNull(gres.getValue());
+ });
+ }).join();
+
+ /* GET -- no table */
+ GetRequest getRequest = new GetRequest()
+ .setTableName("not_a_table")
+ .setKey(key);
+ asyncHandle.get(getRequest)
+ .handle((gres, err) -> {
+ assertTrue(err instanceof CompletionException);
+ assertTrue(
+ "Attempt to access missing table should "
+ + "have thrown",
+ err.getCause() instanceof TableNotFoundException);
+ return null;
+ }).join();
+
+ /* PUT -- invalid row -- this will throw */
+ value.remove("id");
+ value.put("not_a_field", 1);
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusers");
+ asyncHandle.put(putRequest)
+ .handle((pres, err) -> {
+ assertTrue(err instanceof CompletionException);
+ assertTrue(
+ "Attempt to put invalid row should have thrown",
+ err.getCause() instanceof IllegalArgumentException);
+ return null;
+ }).join();
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail("Exception in test");
+ }
+ }
+}
diff --git a/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java b/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java
index 3499cb6f..2eb09df8 100644
--- a/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java
+++ b/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java
@@ -9,6 +9,7 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -107,6 +108,47 @@ public void testSdkVersion() {
arr.length > 2);
}
+ @Test
+ public void testConnectionPoolConfig() {
+ NoSQLHandleConfig cfg = new NoSQLHandleConfig("http://foo.com");
+ /* verify default connection pool values */
+ assertEquals(NoSQLHandleConfig.DEFAULT_CONNECTION_POOL_SIZE,
+ cfg.getConnectionPoolSize());
+ assertEquals(NoSQLHandleConfig.DEFAULT_CONNECTION_PENDING_SIZE,
+ cfg.getPoolMaxPending());
+ try {
+ /* set connection pool properties and verify */
+ System.setProperty(NoSQLHandleConfig.CONNECTION_SIZE_PROPERTY,
+ "50");
+ System.setProperty(NoSQLHandleConfig.CONNECTION_PENDING_PROPERTY,
+ "100");
+
+ cfg = new NoSQLHandleConfig("http://foo.com");
+ assertEquals(50, cfg.getConnectionPoolSize());
+ assertEquals(100, cfg.getPoolMaxPending());
+
+ /* manually set connection pool values and verify */
+ cfg = new NoSQLHandleConfig("http://foo.com");
+ cfg.setConnectionPoolSize(5);
+ cfg.setPoolMaxPending(2);
+ assertEquals(5, cfg.getConnectionPoolSize());
+ assertEquals(2, cfg.getPoolMaxPending());
+
+ /* set invalid value for properties and verify */
+ System.setProperty(NoSQLHandleConfig.CONNECTION_SIZE_PROPERTY, "0");
+ System.setProperty(NoSQLHandleConfig.CONNECTION_PENDING_PROPERTY,
+ "0");
+ IllegalArgumentException iae =
+ assertThrows(IllegalArgumentException.class,
+ () -> new NoSQLHandleConfig("http://foo.com"));
+ assertTrue(iae.getMessage().contains("must be larger than zero"));
+
+ } finally {
+ System.clearProperty(NoSQLHandleConfig.CONNECTION_SIZE_PROPERTY);
+ System.clearProperty(NoSQLHandleConfig.CONNECTION_PENDING_PROPERTY);
+ }
+ }
+
private void expectIllegalArg(String endpoint) {
try {
new NoSQLHandleConfig(endpoint);
diff --git a/driver/src/test/java/oracle/nosql/driver/PerformanceTest.java b/driver/src/test/java/oracle/nosql/driver/PerformanceTest.java
new file mode 100644
index 00000000..56d76031
--- /dev/null
+++ b/driver/src/test/java/oracle/nosql/driver/PerformanceTest.java
@@ -0,0 +1,241 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver;
+
+import oracle.nosql.driver.http.Client;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.MapValue;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performance test for async APIs.
+ * The test has two phases, warm-up and load phase
+ * warm-up phase is to warm-up the netty connections
+ * load phase to randomly run one of put, get, delete and query
+ */
+@Ignore("Performance test is too heavy to run as unit test")
+public class PerformanceTest extends ProxyTestBase {
+ private static final String table = "perf_test";
+ private static final String ddl = "create table if not exists " + table +
+ "(id long, name string, primary key(id))";
+ private static final String dropDdl = "drop table if exists "+ table;
+ private static final int WARMUP_OPS = 100;
+ private static final int TOTAL_OPS = 100000;
+ private static final int THREADS = 100;
+ private static ExecutorService executor;
+
+ private static final int pipelineDepth = 100;
+
+ @BeforeClass
+ public static void setupTest() {
+ executor = Executors.newFixedThreadPool(THREADS);
+ }
+ @Before
+ public void setup() {
+ TableResult tres = tableOperationAsync(asyncHandle, ddl,
+ new TableLimits(1000, 1000,1)).join();
+ assertNotNull(tres.getTableName());
+ }
+
+ @After
+ public void teardown() {
+ TableResult tres =
+ tableOperationAsync(asyncHandle, dropDdl, null).join();
+ assertNotNull(tres.getTableName());
+ }
+
+ @Test
+ public void test() throws Exception {
+ Client client = ((NoSQLHandleAsyncImpl) asyncHandle).getClient();
+ client.enableRateLimiting(true, 100);
+
+ System.out.println("Warm-up phase");
+ //runOpsAsync(WARMUP_OPS, pipelineDepth);
+ runOpsAsync(WARMUP_OPS, pipelineDepth);
+
+ StatsControl statsControl = asyncHandle.getStatsControl();
+ statsControl.setProfile(StatsControl.Profile.ALL).setPrettyPrint(true);
+ statsControl.start();
+
+
+ System.out.println("Load phase");
+ long start = System.nanoTime();
+ //runOpsAsync(TOTAL_OPS, pipelineDepth);
+ runOpsAsync(TOTAL_OPS, pipelineDepth);
+ long end = System.nanoTime();
+
+
+ Duration duration = Duration.ofNanos(end - start);
+ double throughput = TOTAL_OPS / (duration.toMillis() / 1000.0);
+
+ System.out.println("Completed " + TOTAL_OPS + " operations");
+ System.out.println("Time = " + duration);
+ System.out.println("Throughput = " + throughput + " ops/sec");
+ statsControl.stop();
+ }
+
+ private void runOps(int count) throws Exception {
+ List> futures = new ArrayList<>(count);
+ Random random = new Random();
+ AtomicInteger failures = new AtomicInteger();
+ MapValue row = new MapValue()
+ .put("id", 1)
+ .put("name", "oracle");
+ MapValue key = new MapValue().put("id", 1);
+
+ for (int i = 0; i < count; i++) {
+ futures.add(CompletableFuture.runAsync(() -> {
+ try {
+ int op = random.nextInt(4);
+ switch (op) {
+ case 0 : {
+ //put op
+ PutRequest pr = new PutRequest()
+ .setTableName(table)
+ .setValue(row);
+ Result res = asyncHandle.put(pr).join();
+ assertNotNull(res);
+ break;
+ }
+ case 1 : {
+ GetRequest gr = new GetRequest()
+ .setTableName(table)
+ .setKey(key);
+ Result res = asyncHandle.get(gr).join();
+ assertNotNull(res);
+ break;
+ }
+ case 2 : {
+ DeleteRequest dr = new DeleteRequest()
+ .setTableName(table)
+ .setKey(key);
+ Result res = asyncHandle.delete(dr).join();
+ assertNotNull(res);
+ break;
+ } default : {
+ try(QueryRequest qr =
+ new QueryRequest()
+ .setStatement(
+ "select * from " + table + " where id=1")) {
+ Result res = asyncHandle.query(qr).join();
+ assertNotNull(res);
+ }
+ }
+ }
+ } catch (Exception e) {
+ failures.incrementAndGet();
+ }
+ }, executor));
+ }
+ CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get();
+ System.out.println("Failures = " + failures.get());
+ }
+
+ private void runOpsAsync(int count, int pipelineDepth) throws Exception {
+ final Semaphore semaphore = new Semaphore(pipelineDepth);
+ final List> futures = new ArrayList<>(count);
+ Random random = new Random();
+ AtomicInteger failures = new AtomicInteger();
+ MapValue row = new MapValue()
+ .put("id", 1)
+ .put("name", "oracle");
+ MapValue key = new MapValue().put("id", 1);
+
+ for (int i = 0; i < count; i++) {
+ try {
+ semaphore.acquire();
+ int op = random.nextInt(4);
+ switch (op) {
+ case 0 : {
+ //put op
+ PutRequest pr = new PutRequest()
+ .setTableName(table)
+ .setValue(row);
+ CompletableFuture fut =
+ asyncHandle.put(pr).whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ break;
+ }
+ case 1 : {
+ GetRequest gr = new GetRequest()
+ .setTableName(table)
+ .setKey(key);
+ CompletableFuture fut =
+ asyncHandle.get(gr).whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ break;
+ }
+ case 2 : {
+ DeleteRequest dr = new DeleteRequest()
+ .setTableName(table)
+ .setKey(key);
+ CompletableFuture fut =
+ asyncHandle.delete(dr).whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ break;
+ } default : {
+ try(QueryRequest qr =
+ new QueryRequest()
+ .setStatement(
+ "select * from " + table + " where id=1")) {
+ CompletableFuture fut =
+ asyncHandle.query(qr)
+ .whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ }
+ }
+ }
+ } catch (Exception e) {
+ failures.incrementAndGet();
+ }
+ }
+ CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get();
+ System.out.println("Failures = " + failures.get());
+ }
+}
diff --git a/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java b/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java
index dda592ef..80fd72b5 100644
--- a/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java
+++ b/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java
@@ -30,6 +30,7 @@
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -79,6 +80,7 @@ public class ProxyTestBase {
protected static String TRACE = "test.trace";
protected static int DEFAULT_DDL_TIMEOUT = 15000;
protected static int DEFAULT_DML_TIMEOUT = 5000;
+ protected static int DEFAULT_REQUEST_TIMEOUT = 5000;
protected static String TEST_TABLE_NAME = "drivertest";
protected static int INACTIVITY_PERIOD_SECS = 2;
protected static String NETTY_LEAK_PROP="test.detectleaks";
@@ -122,6 +124,8 @@ public class ProxyTestBase {
protected NoSQLHandle handle;
+ protected NoSQLHandleAsync asyncHandle;
+
/* serial version used at the proxy server */
protected int proxySerialVersion;
@@ -248,6 +252,35 @@ protected static TableResult tableOperation(NoSQLHandle handle,
handle.doTableRequest(tableRequest, waitMillis, 1000);
return tres;
}
+
+ protected static CompletableFuture
+ tableOperationAsync(NoSQLHandleAsync handle,
+ String statement,
+ TableLimits limits) {
+ return tableOperationAsync(handle,
+ statement,
+ limits,
+ DEFAULT_DDL_TIMEOUT);
+ }
+
+ /**
+ * run the statement, assumes success, exception is thrown on error
+ */
+ protected static CompletableFuture
+ tableOperationAsync(NoSQLHandleAsync handle,
+ String statement,
+ TableLimits limits,
+ int waitMillis) {
+ assertTrue(waitMillis > 500);
+ TableRequest tableRequest = new TableRequest()
+ .setStatement(statement)
+ .setTableLimits(limits)
+ .setTimeout(DEFAULT_DDL_TIMEOUT);
+
+ CompletableFuture tres =
+ handle.doTableRequest(tableRequest, waitMillis, 1000);
+ return tres;
+ }
/**
* run the statement, assumes success, exception is thrown on error
*/
@@ -271,6 +304,7 @@ public void beforeTest() throws Exception {
* Configure and get the handle
*/
handle = getHandle(endpoint);
+ asyncHandle = getAsyncHandle(endpoint);
/* track existing tables and don't drop them */
existingTables = new HashSet();
@@ -309,6 +343,9 @@ public void afterTest() throws Exception {
}
handle.close();
}
+ if (asyncHandle != null) {
+ asyncHandle.close();
+ }
}
protected static void dropAllTables(NoSQLHandle nosqlHandle,
@@ -424,13 +461,19 @@ protected NoSQLHandle getHandle(String ep) {
return setupHandle(config);
}
+ protected NoSQLHandleAsync getAsyncHandle(String ep) {
+ NoSQLHandleConfig config = new NoSQLHandleConfig(ep);
+ serviceURL = config.getServiceURL();
+ return setupAsyncHandle(config);
+ }
+
/* Set configuration values for the handle */
protected NoSQLHandle setupHandle(NoSQLHandleConfig config) {
/*
* 5 retries, default retry algorithm
*/
config.configureDefaultRetryHandler(5, 0);
- config.setRequestTimeout(30000);
+ config.setRequestTimeout(DEFAULT_REQUEST_TIMEOUT);
/* remove idle connections after this many seconds */
config.setConnectionPoolInactivityPeriod(INACTIVITY_PERIOD_SECS);
@@ -446,6 +489,23 @@ protected NoSQLHandle setupHandle(NoSQLHandleConfig config) {
return h;
}
+ /* Set configuration values for the aysnc handle */
+ protected NoSQLHandleAsync setupAsyncHandle(NoSQLHandleConfig config) {
+ /*
+ * 5 retries, default retry algorithm
+ */
+ config.configureDefaultRetryHandler(5, 0);
+ config.setRequestTimeout(DEFAULT_REQUEST_TIMEOUT);
+
+ /* remove idle connections after this many seconds */
+ config.setConnectionPoolInactivityPeriod(INACTIVITY_PERIOD_SECS);
+ configAuth(config);
+
+ /* allow test cases to add/modify handle config */
+ perTestHandleConfig(config);
+ return getAsyncHandle(config);
+ }
+
/**
* sub classes can override this to affect the handle config
*/
@@ -474,6 +534,27 @@ protected NoSQLHandle getHandle(NoSQLHandleConfig config) {
return NoSQLHandleFactory.createNoSQLHandle(config);
}
+ /**
+ * get a handle based on the config
+ */
+ protected NoSQLHandleAsync getAsyncHandle(NoSQLHandleConfig config) {
+ /*
+ * Create a Logger, set to WARNING by default.
+ */
+ Logger logger = Logger.getLogger(getClass().getName());
+ String level = System.getProperty("test.loglevel");
+ if (level == null) {
+ level = "WARNING";
+ }
+ logger.setLevel(Level.parse(level));
+ config.setLogger(logger);
+
+ /*
+ * Open the handle
+ */
+ return NoSQLHandleFactory.createNoSQLHandleAsync(config);
+ }
+
void assertReadKB(Result res) {
if (onprem) {
return;
@@ -518,6 +599,13 @@ public String getAuthorizationString(Request request) {
@Override
public void close() {
}
+
+ @Override
+ public CompletableFuture
+ getAuthorizationStringAsync(Request request) {
+ return CompletableFuture.completedFuture(
+ "Bearer cloudsim");
+ }
});
}
}
diff --git a/driver/src/test/java/oracle/nosql/driver/StatsTest.java b/driver/src/test/java/oracle/nosql/driver/StatsTest.java
index 3f56a525..262710eb 100644
--- a/driver/src/test/java/oracle/nosql/driver/StatsTest.java
+++ b/driver/src/test/java/oracle/nosql/driver/StatsTest.java
@@ -280,9 +280,14 @@ public void testStatsHandle()
statsList.stream()
.filter(s -> s.get("connections") != null &&
s.get("connections").isMap() &&
- s.get("connections").asMap().get("min") != null &&
- s.get("connections").asMap().get("max") != null &&
- s.get("connections").asMap().get("avg") != null )
+ s.get("connections").asMap()
+ .get("totalConnections") != null &&
+ s.get("connections").asMap().get("totalConnections").asMap()
+ .get("min") != null &&
+ s.get("connections").asMap().get("totalConnections").asMap()
+ .get("max") != null &&
+ s.get("connections").asMap().get("totalConnections").asMap()
+ .get("avg") != null)
.count();
assertTrue(count >=1);
@@ -492,7 +497,7 @@ public void testStopStart()
statsList.stream()
.filter(s -> s.get("connections") != null)
.count();
- assertEquals(count, 0);
+ assertTrue(count > 0);
// - All entries should not have any queries
count =
diff --git a/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java b/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java
index 48295fb4..b84eeede 100644
--- a/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java
+++ b/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java
@@ -8,14 +8,37 @@
package oracle.nosql.driver.httpclient;
import static org.junit.Assert.assertEquals;
-
-import java.util.concurrent.Future;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.net.ssl.SSLException;
import java.net.URL;
+import io.netty.bootstrap.Bootstrap;
+import io.netty.bootstrap.ServerBootstrap;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.local.LocalAddress;
+import io.netty.channel.local.LocalChannel;
+import io.netty.channel.local.LocalServerChannel;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.pool.ChannelPoolHandler;
+import io.netty.util.concurrent.Future;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
@@ -26,36 +49,59 @@
import oracle.nosql.driver.NoSQLHandleConfig;
/**
- * This test is excluded from the test profiles and must be run standalone.
- * This is because of the need to use a cloud endpoint for complete
- * testing. See header comment on testCloudTimeout().
- * It can be run explicitly using either the test-onprem or test-cloudsim
- * profile with a -Dtest directive, e.g.:
- * mvn -Ptest-cloudsim test \
- * -Dtest=oracle.nosql.driver.httpclient.ConnectionPoolTest \
- * -DargLine="-Dtest.endpoint=http://localhost:8080 \
- * -Dtest.cloudendpoint=some_cloud_endpoint"
+ * Tests for ConnectionPool
*/
public class ConnectionPoolTest {
private static String endpoint = System.getProperty("test.endpoint");
private static Logger logger = getLogger();
private URL serviceURL;
+ private EventLoopGroup group;
+ private Channel serverChannel;
+ private LocalAddress address;
@Before
- public void beforeTest() {
- if (endpoint == null) {
- throw new IllegalArgumentException(
- "Test requires test.endpoint system property");
- }
+ public void beforeTest() throws InterruptedException {
+ group = new NioEventLoopGroup();
+ address = new LocalAddress("test-port");
+ /* Start a fake Local Server so the pool can connect */
+ ServerBootstrap sb = new ServerBootstrap()
+ .group(group)
+ .channel(LocalServerChannel.class)
+ .childHandler(new ChannelInitializer() {
+ @Override
+ protected void initChannel(Channel ch) {
+ ch.pipeline().addLast(new ChannelInboundHandlerAdapter());
+ }
+ });
+ serverChannel = sb.bind(address).sync().channel();
+ }
- /* serviceURL is used in the test but a handle is not required */
- NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint);
- serviceURL = config.getServiceURL();
+ @After
+ public void tearDown() {
+ serverChannel.close();
+ group.shutdownGracefully();
}
+ /**
+ * This test is excluded from the test profiles and must be run standalone.
+ * This is because of the need to use a cloud endpoint for complete
+ * testing. See header comment on testCloudTimeout().
+ * It can be run explicitly using either the test-onprem or test-cloudsim
+ * profile with a -Dtest directive, e.g.:
+ * mvn -Ptest-cloudsim test \
+ * -Dtest=oracle.nosql.driver.httpclient.ConnectionPoolTest \
+ * -DargLine="-Dtest.endpoint=http://localhost:8080 \
+ * -Dtest.cloudendpoint=some_cloud_endpoint"
+ */
@Test
public void poolTest() throws Exception {
+ Assume.assumeTrue(endpoint != null);
+
+ /* serviceURL is used in the test but a handle is not required */
+ NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint);
+ serviceURL = config.getServiceURL();
+
final int poolSize = 4;
final int poolMinSize = 1;
final int poolInactivityPeriod = 1;
@@ -85,7 +131,7 @@ public boolean keepAlive(Channel ch) {
/*
* Acquire poolSize channels
*/
- Channel ch[] = new Channel[poolSize];
+ Channel[] ch = new Channel[poolSize];
for (int i = 0; i < poolSize; i++) {
ch[i] = getChannel(pool);
}
@@ -153,11 +199,11 @@ public void testCloudTimeout() throws Exception {
final int port = 443;
final int sleepTimeMs = 70000;
- if (endpoint == null) {
- throw new IllegalStateException(
- "testCloudTimeout requires setting of the system property, " +
- "\"test.cloudendpoint\"");
- }
+ Assume.assumeTrue(endpoint != null);
+
+ /* serviceURL is used in the test but a handle is not required */
+ NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint);
+ serviceURL = config.getServiceURL();
HttpClient client = new HttpClient(
endpoint,
@@ -185,7 +231,7 @@ public boolean keepAlive(Channel ch) {
* Acquire poolSize channels, then release them to the pool. Do this
* 2x to bump the use count on the channels
*/
- Channel ch[] = new Channel[poolSize];
+ Channel[] ch = new Channel[poolSize];
for (int count = 0; count < 2; count++) {
for (int i = 0; i < poolSize; i++) {
ch[i] = getChannel(pool);
@@ -205,7 +251,7 @@ public boolean keepAlive(Channel ch) {
Thread.sleep(sleepTimeMs);
/* assert that 2 channels have gone inactive and been pruned */
- assertEquals(poolSize - poolMinSize, pool.pruneChannels());
+ assertEquals(poolSize - poolMinSize, pool.getTotalChannels());
/* assert that the number of channels is the min size configured */
assertEquals(poolMinSize, pool.getTotalChannels());
@@ -213,6 +259,235 @@ public boolean keepAlive(Channel ch) {
client.shutdown();
}
+ @Test
+ public void testMetricsAndReuse() throws Exception {
+ /* Create Pool */
+ Bootstrap bootstrap = new Bootstrap()
+ .group(group)
+ .channel(LocalChannel.class)
+ .remoteAddress(address);
+
+ /* A dummy user handler (noop) */
+ ConnectionPool pool = getConnectionPool(bootstrap, 0, 2, 2);
+
+ /* CHECK 1: Initial */
+ assertStats(pool, 0, 0, 0, 0);
+
+ /* CHECK 2: Acquire */
+ Channel ch1 = pool.acquire().sync().getNow();
+ /* Total:1, Acquired:1, Idle:0 */
+ assertStats(pool, 1, 1, 0, 0);
+
+ /* CHECK 3: Release */
+ pool.release(ch1);
+ /* Total:1, Acquired:0, Idle:1 */
+ assertStats(pool, 1, 0, 1, 0);
+
+ /* CHECK 4: Reuse */
+ Channel ch2 = pool.acquire().sync().getNow();
+ /* Should be the SAME channel object (reused) */
+ assertEquals(ch1.id(), ch2.id());
+ /* Stats: Total:1, Acquired:1, Idle:0 */
+ assertStats(pool, 1, 1, 0, 0);
+
+ /* acquire another channel and check acquire count is 2 */
+ Channel ch3 = pool.acquire().sync().getNow();
+ /* Stats: Total:2, Acquired:2, Idle:0 */
+ assertStats(pool, 2, 2, 0, 0);
+
+ /* Try to acquire another channel, this should be put into pending */
+ Future ch4 = pool.acquire();
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:1 */
+ assertStats(pool, 2, 2, 0, 1);
+
+ /* Try to acquire another channel, this should be put into pending */
+ Future ch5 = pool.acquire();
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:2 */
+ assertStats(pool, 2, 2, 0, 2);
+
+ /* try to acquire more than max pending and check error is thrown */
+ Assert.assertThrows(IllegalStateException.class,
+ ()-> pool.acquire().sync().getNow());
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:2 */
+ assertStats(pool, 2, 2, 0, 2);
+
+ /* Release back a channel and verify that pending is reduced*/
+ pool.release(ch2);
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:1 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 2, 0, 1);
+ assertTrue(ch4.isSuccess());
+
+ /* Release back a channel and verify that pending is reduced*/
+ pool.release(ch3);
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:0 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 2, 0, 0);
+ assertTrue(ch5.isSuccess());
+
+ /* Release back a channel and verify Idle is increased */
+ pool.release(ch4.getNow());
+ /* Stats: Total:2, Acquired:1, Idle:1, Pending:0 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 1, 1, 0);
+
+ /* Release back a channel and verify Idle is increased */
+ pool.release(ch5.getNow());
+ /* Stats: Total:2, Acquired:0, Idle:2, Pending:0 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 0, 2, 0);
+
+ /* check pending tasks are completed when the pool is closed */
+ ch1 = pool.acquire().sync().getNow();
+ ch2 = pool.acquire().sync().getNow();
+ ch4 = pool.acquire();
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:1 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 2, 0, 1);
+
+ /* close the pool */
+ pool.close();
+
+ /* check pending ch4 is completed with exception */
+ Thread.sleep(10);
+ assertFalse(ch4.isSuccess());
+ assertTrue(ch4.cause() instanceof RejectedExecutionException);
+ }
+
+ @Test
+ public void testMaxConnectionsAndPendingQueue() throws InterruptedException {
+ int numberOfRequests = 5;
+ int maxConnections = 2;
+
+ /* Thread-safe list to hold the channels we successfully acquire */
+ List heldChannels =
+ Collections.synchronizedList(new ArrayList<>());
+
+ /* Latch to wait ONLY for the allowed connections to succeed */
+ CountDownLatch acquiredLatch = new CountDownLatch(maxConnections);
+
+ /* Create Pool */
+ Bootstrap bootstrap = new Bootstrap()
+ .group(group)
+ .channel(LocalChannel.class)
+ .remoteAddress(address);
+
+ /* A dummy user handler (noop) */
+ ConnectionPool pool = getConnectionPool(bootstrap, 60, maxConnections,
+ numberOfRequests + 1);
+ ExecutorService threadPool = Executors.newFixedThreadPool(10);
+
+ /* PHASE 1: Bombard the pool */
+ for (int i = 0; i < numberOfRequests; i++) {
+ threadPool.submit(() -> {
+ Future future = pool.acquire();
+ future.addListener(f -> {
+ if (f.isSuccess()) {
+ heldChannels.add((Channel) f.getNow());
+ acquiredLatch.countDown();
+ }
+ });
+ });
+ }
+ /* Wait for the pool to fill up (Max maxConnections) */
+ boolean success = acquiredLatch.await(5, TimeUnit.SECONDS);
+ if (!success) {
+ throw new RuntimeException("Timeout waiting for initial connections");
+ }
+
+ /* Give a tiny buffer for metrics to settle */
+ Thread.sleep(50);
+
+ /* PHASE 2: Assert Saturation */
+ assertEquals("Total should be capped at max",
+ maxConnections, pool.getTotalChannels());
+ assertEquals("Acquired should be capped at max",
+ maxConnections, pool.getAcquiredChannelCount());
+ assertEquals("Excess requests should be pending",
+ numberOfRequests - maxConnections,
+ pool.getPendingAcquires());
+
+ /* PHASE 3: Drain the Queue
+ * Now we manually release the channels we were holding.
+ * This should trigger the Pending requests to proceed.
+ */
+
+ /* We need a new latch to verify the REMAINING 3 requests finish
+ * (But we can't easily attach listeners now, so we just check stats)
+ */
+ for (Channel ch : heldChannels) {
+ pool.release(ch);
+ }
+
+ /* Wait a moment for the pending queue to drain */
+ Thread.sleep(200);
+
+ /* Expect: Pending should be one now. */
+ assertEquals("Pending queue should have 1", 1, pool.getPendingAcquires());
+
+ threadPool.shutdown();
+ pool.close();
+ }
+
+ private static ConnectionPool getConnectionPool(Bootstrap bootstrap,
+ int inactivityPeriodSeconds,
+ int maxConnections,
+ int numberOfRequests) {
+ ChannelPoolHandler noopHandler = new ChannelPoolHandler() {
+ @Override public void channelReleased(Channel ch) {}
+
+ @Override public void channelAcquired(Channel ch) {}
+
+ @Override public void channelCreated(Channel ch) {}
+ };
+
+ ConnectionPool pool =
+ new ConnectionPool(bootstrap,
+ noopHandler,
+ logger,
+ false, /* isMinimal*/
+ 0, /* pool min*/
+ inactivityPeriodSeconds, /* Inactivity seconds */
+ maxConnections,
+ numberOfRequests);
+ return pool;
+ }
+
+ @Test
+ public void testIdleEvictionInPool() throws InterruptedException {
+ /* Create Pool */
+ Bootstrap bootstrap = new Bootstrap()
+ .group(group)
+ .channel(LocalChannel.class)
+ .remoteAddress(address);
+
+ /* A dummy user handler (noop) */
+ ConnectionPool pool = getConnectionPool(bootstrap, 2, 2, 5);
+
+ /* 1. Acquire a channel */
+ Channel ch = pool.acquire().sync().getNow();
+
+ /* 2. Release it back to the pool (This starts the Idle Timer) */
+ pool.release(ch);
+
+ /* Verify it's currently Idle */
+ assertTrue(ch.isOpen());
+ assertEquals(1, pool.getFreeChannels());
+
+ /* 3. SIMULATE the channel close */
+ ch.close();
+
+ /* 4. wait for the refresh task to close the channel */
+ Thread.sleep(3000);
+
+ /* The metrics should update (Total drops to 0) */
+ assertEquals("Total count should drop to 0",
+ 0, pool.getTotalChannels());
+ assertEquals("Idle count should drop to 0",
+ 0, pool.getFreeChannels());
+ }
+
+
private static Logger getLogger() {
Logger tlogger = Logger.getLogger("oracle.nosql");
String level = System.getProperty("test.loglevel");
@@ -236,7 +511,9 @@ private static Logger getLogger() {
*/
private Channel getChannel(ConnectionPool pool) throws Exception {
Future fut = pool.acquire();
- return fut.get();
+ Channel ch = fut.get();
+ assert ch.isActive();
+ return ch;
}
private void releaseChannel(ConnectionPool pool, Channel ch) {
@@ -254,4 +531,12 @@ private SslContext buildSslContext() {
"Unable o create SSL context: " + e);
}
}
+
+ private void assertStats(ConnectionPool pool, int t,
+ int a, int i, int p) {
+ assertEquals("Total", t, pool.getTotalChannels());
+ assertEquals("Acquired", a, pool.getAcquiredChannelCount());
+ assertEquals("Idle", i, pool.getFreeChannels());
+ assertEquals("Pending", p, pool.getPendingAcquires());
+ }
}
diff --git a/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java b/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java
index 3f322987..ffb7e360 100644
--- a/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java
+++ b/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java
@@ -7,8 +7,7 @@
package oracle.nosql.driver.iam;
-import io.netty.channel.Channel;
-import io.netty.channel.embedded.EmbeddedChannel;
+import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.ssl.SslContext;
import oracle.nosql.driver.AuthorizationProvider;
@@ -18,7 +17,6 @@
import oracle.nosql.driver.SecurityInfoNotReadyException;
import oracle.nosql.driver.http.Client;
import oracle.nosql.driver.httpclient.HttpClient;
-import oracle.nosql.driver.httpclient.ResponseHandler;
import oracle.nosql.driver.ops.GetRequest;
import oracle.nosql.driver.ops.Request;
import oracle.nosql.driver.values.MapValue;
@@ -26,6 +24,8 @@
import org.junit.Test;
import java.net.URL;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Logger;
@@ -79,7 +79,7 @@ public void close() {
private class TestClient extends Client {
public TestClient(Logger logger, NoSQLHandleConfig config) {
- super(logger, config);
+ super(logger, config, Executors.newSingleThreadScheduledExecutor());
}
@Override
@@ -101,9 +101,8 @@ public TestHttpClient() {
}
@Override
- public void runRequest(HttpRequest request,
- ResponseHandler handler,
- Channel channel) {
+ public CompletableFuture
+ runRequest(HttpRequest request, int timeoutMS) {
/*
* Simulate an authentication failure scenario where the initial
* attempt throws SecurityInfoNotReadyException, and subsequent
@@ -111,26 +110,14 @@ public void runRequest(HttpRequest request,
*/
int count = execCount.incrementAndGet();
if (count == 1) {
- throw new SecurityInfoNotReadyException("test");
+ CompletableFuture.failedFuture(
+ new SecurityInfoNotReadyException("test"));
} else {
iaeCount.incrementAndGet();
- throw new InvalidAuthorizationException("test");
+ CompletableFuture.failedFuture(
+ new InvalidAuthorizationException("test"));
}
- }
-
- @Override
- public Channel getChannel(int timeoutMs) {
- /*
- * Utilize Netty's EmbeddedChannel to create a mock channel that
- * remains active, enabling the request execution to proceed with
- * a valid channel for error simulation purposes.
- */
- return new EmbeddedChannel() {
- @Override
- public boolean isActive() {
- return true;
- }
- };
+ return null;
}
}
}
diff --git a/driver/src/test/java/oracle/nosql/driver/ops/QueryPublisherVerifierTest.java b/driver/src/test/java/oracle/nosql/driver/ops/QueryPublisherVerifierTest.java
new file mode 100644
index 00000000..c85b7ace
--- /dev/null
+++ b/driver/src/test/java/oracle/nosql/driver/ops/QueryPublisherVerifierTest.java
@@ -0,0 +1,141 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.ops;
+
+import oracle.nosql.driver.AuthorizationProvider;
+import oracle.nosql.driver.NoSQLHandleAsync;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.NoSQLHandleFactory;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.StringValue;
+import org.reactivestreams.tck.TestEnvironment;
+import org.reactivestreams.tck.flow.FlowPublisherVerification;
+import org.testng.annotations.AfterClass;
+
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Flow;
+
+/**
+ * Tests to check for reactive stream specification TCK.
+ * This runs set of tests to check {@link QueryPublisher} conforms to
+ * reactive stream specification defined in
+ *
+ * reactive-streams-jvm
+ *
+ * This test is excluded from the test profiles and must be run standalone.
+ * It can be run expectedly against cloudsim using below command
+ *