Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 62 additions & 9 deletions handwritten/bigquery/src/bigquery.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1099,6 +1099,11 @@ export class BigQuery extends Service {
};
}),
};
} else if ((providedType as string).toUpperCase() === 'TIMESTAMP(12)') {
return {
type: 'TIMESTAMP',
timestampPrecision: '12',
};
}

providedType = (providedType as string).toUpperCase();
Expand Down Expand Up @@ -2249,11 +2254,30 @@ export class BigQuery extends Service {
if (res && res.jobComplete) {
let rows: any = [];
if (res.schema && res.rows) {
rows = BigQuery.mergeSchemaWithRows_(res.schema, res.rows, {
wrapIntegers: options.wrapIntegers || false,
parseJSON: options.parseJSON,
});
delete res.rows;
try {
/*
Without this try/catch block, calls to getRows will hang indefinitely if
a call to mergeSchemaWithRows_ fails because the error never makes it to
the callback. Instead, pass the error to the callback the user provides
so that the user can see the error.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const listParams = {
'formatOptions.timestampOutputFormat':
queryReq.formatOptions?.timestampOutputFormat,
'formatOptions.useInt64Timestamp':
queryReq.formatOptions?.useInt64Timestamp,
};
rows = BigQuery.mergeSchemaWithRows_(res.schema, res.rows, {
wrapIntegers: options.wrapIntegers || false,
parseJSON: options.parseJSON,
listParams,
});
delete res.rows;
} catch (e) {
(callback as SimpleQueryRowsCallback)(e as Error, null, job);
return;
}
}
this.trace_('[runJobsQuery] job complete');
options._cachedRows = rows;
Expand Down Expand Up @@ -2334,6 +2358,18 @@ export class BigQuery extends Service {
if (options.job) {
return undefined;
}
const hasAnyFormatOpts =
options['formatOptions.timestampOutputFormat'] !== undefined ||
options['formatOptions.useInt64Timestamp'] !== undefined;
const defaultOpts = hasAnyFormatOpts
? {}
: {
timestampOutputFormat: 'ISO8601_STRING',
};
const formatOptions = extend(defaultOpts, {
timestampOutputFormat: options['formatOptions.timestampOutputFormat'],
useInt64Timestamp: options['formatOptions.useInt64Timestamp'],
});
const req: bigquery.IQueryRequest = {
useQueryCache: queryObj.useQueryCache,
labels: queryObj.labels,
Expand All @@ -2342,9 +2378,7 @@ export class BigQuery extends Service {
maximumBytesBilled: queryObj.maximumBytesBilled,
timeoutMs: options.timeoutMs,
location: queryObj.location || options.location,
formatOptions: {
useInt64Timestamp: true,
},
formatOptions,
maxResults: queryObj.maxResults || options.maxResults,
query: queryObj.query,
useLegacySql: false,
Expand Down Expand Up @@ -2588,6 +2622,7 @@ function convertSchemaFieldValue(
value = BigQueryRange.fromSchemaValue_(
value,
schemaField.rangeElementType!.type!,
options.listParams, // Required to convert TIMESTAMP values
);
break;
}
Expand Down Expand Up @@ -2666,6 +2701,11 @@ export class BigQueryRange {
}

private static fromStringValue_(value: string): [start: string, end: string] {
/*
This method returns start and end values for RANGE typed values returned from
the server. It decodes the server RANGE value into start and end values so
they can be used to construct a BigQueryRange.
*/
let cleanedValue = value;
if (cleanedValue.startsWith('[') || cleanedValue.startsWith('(')) {
cleanedValue = cleanedValue.substring(1);
Expand All @@ -2684,14 +2724,27 @@ export class BigQueryRange {
return [start, end];
}

static fromSchemaValue_(value: string, elementType: string): BigQueryRange {
static fromSchemaValue_(
value: string,
elementType: string,
listParams?:
| bigquery.tabledata.IListParams
| bigquery.jobs.IGetQueryResultsParams,
): BigQueryRange {
/*
This method is only used by convertSchemaFieldValue and only when range
values are passed into convertSchemaFieldValue. It produces a value that is
delivered to the user for read calls and it needs to pass along listParams
to ensure TIMESTAMP types are converted properly.
*/
const [start, end] = BigQueryRange.fromStringValue_(value);
const convertRangeSchemaValue = (value: string) => {
if (value === 'UNBOUNDED' || value === 'NULL') {
return null;
}
return convertSchemaFieldValue({type: elementType}, value, {
wrapIntegers: false,
listParams,
});
};
return BigQuery.range(
Expand Down
19 changes: 15 additions & 4 deletions handwritten/bigquery/src/job.ts
Original file line number Diff line number Diff line change
Expand Up @@ -595,10 +595,21 @@ class Job extends Operation {
let rows: any = [];

if (resp.schema && resp.rows) {
rows = BigQuery.mergeSchemaWithRows_(resp.schema, resp.rows, {
wrapIntegers,
parseJSON,
});
try {
/*
Without this try/catch block, calls to /query endpoint will hang
indefinitely if a call to mergeSchemaWithRows_ fails because the
error never makes it to the callback. Instead, pass the error to the
callback the user provides so that the user can see the error.
*/
rows = BigQuery.mergeSchemaWithRows_(resp.schema, resp.rows, {
wrapIntegers,
parseJSON,
});
} catch (e) {
callback!(e as Error, null, null, resp);
return;
}
}

let nextQuery: QueryResultsOptions | null = null;
Expand Down
164 changes: 161 additions & 3 deletions handwritten/bigquery/system-test/bigquery.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1472,9 +1472,14 @@ describe('BigQuery', () => {
],
},
(err, rows) => {
assert.ifError(err);
assert.strictEqual(rows!.length, 1);
done();
try {
// Without this try block the test runner silently fails
assert.ifError(err);
assert.strictEqual(rows!.length, 1);
done();
} catch (e) {
done(e);
}
},
);
});
Expand All @@ -1498,6 +1503,159 @@ describe('BigQuery', () => {
},
);
});
describe.only('High Precision Query System Tests', () => {
let bigquery: BigQuery;
const expectedTsValueNanoseconds = '2023-01-01T12:00:00.123456000Z';
const expectedTsValuePicoseconds =
'2023-01-01T12:00:00.123456789123Z';
const expectedErrorMessage =
'Cannot specify both timestamp_as_int and timestamp_output_format.';

before(() => {
bigquery = new BigQuery();
});

const testCases = [
{
name: 'TOF: FLOAT64, UI64: true (error)',
timestampOutputFormat: 'FLOAT64',
useInt64Timestamp: true,
expectedTsValue: undefined,
expectedError: expectedErrorMessage,
},
{
name: 'TOF: omitted, UI64: omitted (default INT64)',
timestampOutputFormat: undefined,
useInt64Timestamp: undefined,
expectedTsValue: expectedTsValuePicoseconds,
},
{
name: 'TOF: omitted, UI64: true',
timestampOutputFormat: undefined,
useInt64Timestamp: true,
expectedTsValue: expectedTsValueNanoseconds,
},
];

testCases.forEach(testCase => {
it(`should handle ${testCase.name}`, async () => {
/*
The users use the new TIMESTAMP(12) type to indicate they want to
opt in to using timestampPrecision=12. The reason is that some queries
like `SELECT CAST(? as TIMESTAMP(12))` will fail if we set
timestampPrecision=12 and we don't want this code change to affect
existing users. Queries using TIMESTAMP_ADD are another example.
*/
const query = {
query: 'SELECT ? as ts',
params: [
bigquery.timestamp('2023-01-01T12:00:00.123456789123Z'),
],
types: ['TIMESTAMP(12)'],
};

const options: any = {};
if (testCase.timestampOutputFormat !== undefined) {
options['formatOptions.timestampOutputFormat'] =
testCase.timestampOutputFormat;
}
if (testCase.useInt64Timestamp !== undefined) {
options['formatOptions.useInt64Timestamp'] =
testCase.useInt64Timestamp;
}

try {
const [rows] = await bigquery.query(query, options);
if (testCase.expectedError) {
assert.fail(
`Query should have failed for ${testCase.name}, but succeeded`,
);
}
assert.ok(rows.length > 0);
assert.ok(rows[0].ts.value !== undefined);
assert.strictEqual(
rows[0].ts.value,
testCase.expectedTsValue,
);
} catch (err: any) {
if (!testCase.expectedError) {
throw err;
}

const message = err.message;
assert.strictEqual(
message,
testCase.expectedError,
`Expected ${testCase.expectedError} error for ${testCase.name}, got ${message} (${err.message})`,
);
}
});
it(`should handle nested ${testCase.name}`, async () => {
/*
The users use the new TIMESTAMP(12) type to indicate they want to
opt in to using timestampPrecision=12. The reason is that some queries
like `SELECT CAST(? as TIMESTAMP(12))` will fail if we set
timestampPrecision=12 and we don't want this code change to affect
existing users.
*/
const query = {
query: 'SELECT ? obj',
params: [
{
nested: {
a: bigquery.timestamp(
'2023-01-01T12:00:00.123456789123Z',
),
},
},
],
types: [
{
nested: {
a: 'TIMESTAMP(12)',
},
},
],
};

const options: any = {};
if (testCase.timestampOutputFormat !== undefined) {
options['formatOptions.timestampOutputFormat'] =
testCase.timestampOutputFormat;
}
if (testCase.useInt64Timestamp !== undefined) {
options['formatOptions.useInt64Timestamp'] =
testCase.useInt64Timestamp;
}

try {
const [rows] = await bigquery.query(query, options);
if (testCase.expectedError) {
assert.fail(
`Query should have failed for ${testCase.name}, but succeeded`,
);
}
assert.ok(rows.length > 0);
assert.ok(rows[0].obj.nested.a.value !== undefined);
assert.strictEqual(
rows[0].obj.nested.a.value,
testCase.expectedTsValue,
);
} catch (err: any) {
if (!testCase.expectedError) {
throw err;
}

const message = err.message;
assert.strictEqual(
message,
testCase.expectedError,
`Expected ${testCase.expectedError} error for ${testCase.name}, got ${message} (${err.message})`,
);
}
});
});
});
});

describe('named', () => {
Expand Down
Loading