Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions deps/undici/src/lib/web/cache/cache.js
Original file line number Diff line number Diff line change
Expand Up @@ -581,10 +581,6 @@ class Cache {
// 4.2.5.1
requestResponses = this.#queryCache(operation.request, operation.options)

// TODO: the spec is wrong, this is needed to pass WPTs
if (requestResponses.length === 0) {
return []
}

// 4.2.5.2
for (const requestResponse of requestResponses) {
Expand Down
17 changes: 0 additions & 17 deletions deps/undici/src/types/fetch.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,23 +34,6 @@ export class BodyMixin {
readonly arrayBuffer: () => Promise<ArrayBuffer>
readonly blob: () => Promise<Blob>
readonly bytes: () => Promise<Uint8Array>
/**
* @deprecated This method is not recommended for parsing multipart/form-data bodies in server environments.
* It is recommended to use a library such as [@fastify/busboy](https://www.npmjs.com/package/@fastify/busboy) as follows:
*
* @example
* ```js
* import { Busboy } from '@fastify/busboy'
* import { Readable } from 'node:stream'
*
* const response = await fetch('...')
* const busboy = new Busboy({ headers: { 'content-type': response.headers.get('content-type') } })
*
* // handle events emitted from `busboy`
*
* Readable.fromWeb(response.body).pipe(busboy)
* ```
*/
readonly formData: () => Promise<FormData>
readonly json: () => Promise<unknown>
readonly text: () => Promise<string>
Expand Down
2 changes: 1 addition & 1 deletion lib/internal/fs/utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ const kIoMaxLength = 2 ** 31 - 1;
// Use up to 512kb per read otherwise to partition reading big files to prevent
// blocking other threads in case the available threads are all in use.
const kReadFileUnknownBufferLength = 64 * 1024;
const kReadFileBufferLength = 512 * 1024;
const kReadFileBufferLength = 1024 * 1024;

const kWriteFileMaxChunkSize = 512 * 1024;

Expand Down
43 changes: 27 additions & 16 deletions lib/internal/http2/core.js
Original file line number Diff line number Diff line change
Expand Up @@ -1613,19 +1613,11 @@ class Http2Session extends EventEmitter {
closeSession(this, code, error);
}

// Closing the session will:
// 1. Send a goaway frame
// 2. Mark the session as closed
// 3. Prevent new inbound or outbound streams from being opened
// 4. Optionally register a 'close' event handler
// 5. Will cause the session to automatically destroy after the
// last currently open Http2Stream closes.
//
// Close always assumes a good, non-error shutdown (NGHTTP_NO_ERROR)
//
// If the session has not connected yet, the closed flag will still be
// set but the goaway will not be sent until after the connect event
// is emitted.
/**
* Gracefully closes the Http2Session.
* @param {() => void} [callback]
* @returns {void}
*/
close(callback) {
if (this.closed || this.destroyed)
return;
Expand Down Expand Up @@ -1804,8 +1796,19 @@ class ClientHttp2Session extends Http2Session {
this[kPendingRequestCalls] = null;
}

// Submits a new HTTP2 request to the connected peer. Returns the
// associated Http2Stream instance.
/**
* Submits a new HTTP2 request to the connected peer.
* @param {Record<string, string | string[]>} [headersParam]
* @param {{
* endStream?: boolean;
* exclusive?: boolean;
* parent?: number;
* weight?: number;
* waitForTrailers?: boolean;
* signal?: AbortSignal;
* }} [options]
* @returns {ClientHttp2Stream}
*/
request(headersParam, options) {
debugSessionObj(this, 'initiating request');

Expand Down Expand Up @@ -2982,7 +2985,15 @@ class ServerHttp2Stream extends Http2Stream {
}
}

// Initiate a response on this Http2Stream
/**
* Initiates a response on this Http2Stream.
* @param {Record<string, string | string[]>} [headersParam]
* @param {{
* endStream?: boolean;
* waitForTrailers?: boolean;
* }} [options]
* @returns {void}
*/
respond(headersParam, options) {
if (this.destroyed || this.closed)
throw new ERR_HTTP2_INVALID_STREAM();
Expand Down
7 changes: 1 addition & 6 deletions lib/internal/webstreams/readablestream.js
Original file line number Diff line number Diff line change
Expand Up @@ -527,14 +527,9 @@ class ReadableStream {
// await uses here do not trigger that failure because
// the test that fails does not trigger those code paths.
next() {
// If this is the first read, delay by one microtask
// to ensure that the controller has had an opportunity
// Ensure that the controller has had an opportunity
// to properly start and perform the initial pull.
// TODO(@jasnell): The spec doesn't call this out so
// need to investigate if it's a bug in our impl or
// the spec.
if (!started) {
state.current = PromiseResolve();
started = true;
}
state.current = state.current !== undefined ?
Expand Down
2 changes: 1 addition & 1 deletion lib/internal/webstreams/writablestream.js
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class WritableStream {
validateObject(strategy, 'strategy', kValidateObjectAllowObjectsAndNull);
const type = sink?.type;
if (type !== undefined)
throw new ERR_INVALID_ARG_VALUE.RangeError('type', type);
throw new ERR_INVALID_ARG_TYPE('sink.type', 'undefined', type);

this[kState] = createWritableStreamState();

Expand Down
71 changes: 44 additions & 27 deletions test/parallel/test-cluster-primary-error.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,23 @@
const common = require('../common');
const assert = require('assert');
const cluster = require('cluster');
const net = require('net');
const { once } = require('events');

const totalWorkers = 2;

// Cluster setup
if (cluster.isWorker) {
const http = require('http');
http.Server(() => {}).listen(0, '127.0.0.1');

// Connect to the tracker server to signal liveness
const trackerPort = process.env.TRACKER_PORT;
if (trackerPort) {
net.connect(trackerPort).on('error', () => {
// Ignore errors, we just want to keep the connection open
});
}
} else if (process.argv[2] === 'cluster') {
// Send PID to testcase process
let forkNum = 0;
Expand Down Expand Up @@ -67,38 +77,45 @@ if (cluster.isWorker) {
} else {
// This is the testcase

const fork = require('child_process').fork;
(async () => {
const tracker = net.createServer().listen(0);
await once(tracker, 'listening');
const { port } = tracker.address();

// List all workers
const workers = [];
let workersAlive = 0;
const workerDead = new Promise((resolve) => {
tracker.on('connection', (socket) => {
workersAlive++;
socket.on('error', () => {});
socket.on('close', () => {
if (--workersAlive === 0) {
resolve();
}
});
});
});

// Spawn a cluster process
const primary = fork(process.argv[1], ['cluster'], { silent: true });
const { fork } = require('child_process');

// Handle messages from the cluster
primary.on('message', common.mustCall((data) => {
// Add worker pid to list and progress tracker
if (data.cmd === 'worker') {
workers.push(data.workerPID);
}
}, totalWorkers));
// Spawn a cluster process
const primary = fork(process.argv[1], ['cluster'], {
silent: true,
env: { ...process.env, TRACKER_PORT: port }
});

// When cluster is dead
primary.on('exit', common.mustCall((code) => {
// Handle messages from the cluster
primary.on('message', common.mustCall((data) => {
// No longer need to track workers via PID for liveness
}, totalWorkers));

// When cluster is dead
const [code] = await once(primary, 'exit');
// Check that the cluster died accidentally (non-zero exit code)
assert.strictEqual(code, 1);

// XXX(addaleax): The fact that this uses raw PIDs makes the test inherently
// flaky – another process might end up being started right after the
// workers finished and receive the same PID.
const pollWorkers = () => {
// When primary is dead all workers should be dead too
if (workers.some((pid) => common.isAlive(pid))) {
setTimeout(pollWorkers, 50);
}
};

// Loop indefinitely until worker exit
pollWorkers();
}));
// Wait for all workers to close their connections to the tracker
// This ensures they are actually dead without relying on PIDs.
await workerDead;
tracker.close();
})().then(common.mustCall());
}
24 changes: 11 additions & 13 deletions test/parallel/test-http-no-content-length.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,15 @@ const http = require('http');
const server = net.createServer(function(socket) {
// Neither Content-Length nor Connection
socket.end('HTTP/1.1 200 ok\r\n\r\nHello');
}).listen(0, common.mustCall(function() {
http.get({ port: this.address().port }, common.mustCall(function(res) {
let body = '';

res.setEncoding('utf8');
res.on('data', function(chunk) {
body += chunk;
});
res.on('end', common.mustCall(function() {
assert.strictEqual(body, 'Hello');
server.close();
}));
}));
}).listen(0, common.mustCall(async function() {
const res = await new Promise((resolve) => {
http.get({ port: this.address().port }, resolve);
});
let body = '';
res.setEncoding('utf8');
for await (const chunk of res) {
body += chunk;
}
assert.strictEqual(body, 'Hello');
server.close();
}));
49 changes: 21 additions & 28 deletions test/parallel/test-https-simple.js
Original file line number Diff line number Diff line change
Expand Up @@ -55,15 +55,7 @@ for (const option of invalid_options) {

const server = https.createServer(options, serverCallback);

server.listen(0, common.mustCall(() => {
let tests = 0;

function done() {
if (--tests === 0)
server.close();
}

// Do a request ignoring the unauthorized server certs
server.listen(0, common.mustCall(async () => {
const port = server.address().port;

const options = {
Expand All @@ -73,27 +65,28 @@ server.listen(0, common.mustCall(() => {
method: 'GET',
rejectUnauthorized: false
};
tests++;
const req = https.request(options, common.mustCall((res) => {
let responseBody = '';
res.on('data', function(d) {
responseBody = responseBody + d;
});

res.on('end', common.mustCall(() => {
assert.strictEqual(responseBody, body);
done();
}));
}));
req.end();
// Do a request ignoring the unauthorized server certs
{
const req = https.request(options);
req.end();
const res = await new Promise((resolve) => req.on('response', resolve));
let responseBody = '';
res.setEncoding('utf8');
for await (const d of res) {
responseBody += d;
}
assert.strictEqual(responseBody, body);
}

// Do a request that errors due to the invalid server certs
options.rejectUnauthorized = true;
tests++;
const checkCertReq = https.request(options, common.mustNotCall()).end();
{
options.rejectUnauthorized = true;
const req = https.request(options, common.mustNotCall());
req.end();
const err = await new Promise((resolve) => req.on('error', resolve));
assert.strictEqual(err.code, 'UNABLE_TO_VERIFY_LEAF_SIGNATURE');
}

checkCertReq.on('error', common.mustCall((e) => {
assert.strictEqual(e.code, 'UNABLE_TO_VERIFY_LEAF_SIGNATURE');
done();
}));
server.close();
}));