Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/services-build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ jobs:
BRANCH: ${{ github.ref_name }}
COMMIT_SHA: ${{ github.sha }}
run: |
TARGETS="console rotor"
TARGETS="console rotor functions-server"
REGISTRY="${{ secrets.DOCKERHUB_USERNAME }}"
SHORT_SHA=$(git rev-parse --short=7 HEAD)

Expand Down
47 changes: 47 additions & 0 deletions all.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -200,3 +200,50 @@ ENV JITSU_VERSION_STRING=${JITSU_BUILD_VERSION}


ENTRYPOINT ["/app/entrypoint.sh"]

# ============================================================================
# FUNCTIONS-SERVER STAGE - Deno-based UDF execution with Web Worker isolation
# ============================================================================
# Sandboxed functions execution for free-tier workspaces
FROM denoland/deno:debian AS functions-server

ARG JITSU_BUILD_VERSION=dev,
ARG JITSU_BUILD_DOCKER_TAG=dev,
ARG JITSU_BUILD_COMMIT_SHA=unknown,

WORKDIR /app

# Install curl for healthchecks
RUN apt-get update && \
apt-get install -y --no-install-recommends ca-certificates curl && \
rm -rf /var/lib/apt/lists/*

EXPOSE 3401

# Copy Deno-specific build artifacts from builder
COPY --from=builder /app/services/rotor/dist/functions-server.mjs ./functions-server.mjs
COPY --from=builder /app/services/rotor/dist/workspace-worker.mjs ./workspace-worker.mjs
# Copy node_modules with native deps (installed by build.mts)
# Workspace packages and pure JS deps are bundled into functions-server.mjs by esbuild
COPY --from=builder /app/services/rotor/dist/node_modules ./node_modules
COPY --from=builder /app/services/rotor/dist/package.json ./package.json

ENV JITSU_VERSION_COMMIT_SHA=${JITSU_BUILD_COMMIT_SHA}
ENV JITSU_VERSION_DOCKER_TAG=${JITSU_BUILD_DOCKER_TAG}
ENV JITSU_VERSION_STRING=${JITSU_BUILD_VERSION}
ENV NODE_ENV=production

HEALTHCHECK CMD curl --fail http://localhost:3401/health || exit 1

ENTRYPOINT ["deno", "run", \
"--allow-net", \
"--allow-read", \
"--allow-write=/tmp/jitsu-udf,/data", \
"--allow-env", \
"--allow-sys", \
"--allow-ffi", \
"--allow-run=/app/node_modules/@esbuild/linux-arm64/bin/esbuild,/app/node_modules/@esbuild/linux-x64/bin/esbuild,/app/node_modules/esbuild/bin/esbuild", \
"--unstable-worker-options", \
"--no-check", \
"--v8-flags=--max-old-space-size=2048", \
"functions-server.mjs"]
19 changes: 19 additions & 0 deletions build-fs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/bin/bash
set -e

DATE_TAG=$(date +"%Y%m%d%H%M")
IMAGE="jitsucom/fs:dev-${DATE_TAG}"

echo "Building rotor image..."
docker buildx build \
--target functions-server \
--progress=plain \
--load \
-t "$IMAGE" \
-f all.Dockerfile \
.

echo "Loading image into minikube..."
minikube image load --overwrite=true "$IMAGE"

echo "Done: $IMAGE"
9 changes: 7 additions & 2 deletions builder.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,15 @@ FROM node:24-bookworm-slim
# Install Node.js 24 manually from NodeSource + all runtime dependencies
# This includes everything needed for building AND running the final images
RUN apt-get update && \
apt-get install -y ca-certificates gnupg git curl telnet python3 g++ make jq nano cron bash netcat-traditional procps && \
apt-get install -y ca-certificates gnupg git curl telnet python3 g++ make jq nano cron bash netcat-traditional procps unzip && \
rm -rf /var/lib/apt/lists/* && \
npm -g install pnpm@10 && \
npm cache clean --force
npm cache clean --force && \
ARCH=$(uname -m) && \
curl -fsSL "https://github.com/denoland/deno/releases/latest/download/deno-${ARCH}-unknown-linux-gnu.zip" -o /tmp/deno.zip && \
unzip -o /tmp/deno.zip -d /usr/local/bin && \
chmod +x /usr/local/bin/deno && \
rm /tmp/deno.zip

#print current user
RUN whoami && echo "Current user is $(whoami)"
Expand Down
120 changes: 81 additions & 39 deletions bulker/operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -1119,54 +1119,70 @@ func (o *Operator) buildDeploymentFromData(data *DeploymentData) *appsv1.Deploym
}
volumes := make([]corev1.Volume, 0)
volumeMounts := make([]corev1.VolumeMount, 0)

// Mount connections ConfigMaps as parts
// Files are stored with keys like ${workspaceId}__connections.json.gz
for i := 0; i < data.ConnectionsConfigMapCount; i++ {
volName := fmt.Sprintf("connections-%d", i)
cmName := fmt.Sprintf("%s%s-%d", data.DeploymentID, connectionsCMSuffix, i)

volumes = append(volumes, corev1.Volume{
Name: volName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cmName,
initVolumeMounts := make([]corev1.VolumeMount, 0)
// Free tier: init container copies ConfigMap parts into writable emptyDir (multiple workspaces merged).
// Dedicated/premium: ConfigMaps mounted directly into /data (no init container needed).
useCopyInit := data.FunctionsClass == FunctionsClassFree

// Add ConfigMap volumes for connections and functions
type cmVolConfig struct {
count int
prefix string // volume name prefix ("connections" or "functions")
suffix string // ConfigMap name suffix
subdir string // subdirectory under mount base ("connections" or "functions")
}
for _, cfg := range []cmVolConfig{
{data.ConnectionsConfigMapCount, "connections", connectionsCMSuffix, "connections"},
{data.FunctionsConfigMapCount, "functions", functionsCMSuffix, "functions"},
} {
for i := 0; i < cfg.count; i++ {
volName := fmt.Sprintf("%s-%d", cfg.prefix, i)
cmName := fmt.Sprintf("%s%s-%d", data.DeploymentID, cfg.suffix, i)

volumes = append(volumes, corev1.Volume{
Name: volName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cmName,
},
},
},
},
})

// Mount connections ConfigMaps to /data/connections/part-{n}
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: volName,
MountPath: fmt.Sprintf("/data/connections/part-%d", i),
ReadOnly: true,
})
})

if useCopyInit {
// Init container reads from /config-src, copies to writable /data
initVolumeMounts = append(initVolumeMounts, corev1.VolumeMount{
Name: volName,
MountPath: fmt.Sprintf("/config-src/%s/part-%d", cfg.subdir, i),
ReadOnly: true,
})
} else {
// Mount directly into /data for the main container
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: volName,
MountPath: fmt.Sprintf("/data/%s/part-%d", cfg.subdir, i),
ReadOnly: true,
})
}
}
}

// Add volumes for functions ConfigMaps
// Functions are stored with keys like ${workspaceId}__${functionId}.json.gz
for i := 0; i < data.FunctionsConfigMapCount; i++ {
volName := fmt.Sprintf("functions-%d", i)
cmName := fmt.Sprintf("%s%s-%d", data.DeploymentID, functionsCMSuffix, i)

if useCopyInit {
// Writable emptyDir for merged config data
volumes = append(volumes, corev1.Volume{
Name: volName,
Name: "config-data",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: cmName,
},
},
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
})

// Mount functions ConfigMaps to /data/functions/part-{n}
initVolumeMounts = append(initVolumeMounts, corev1.VolumeMount{
Name: "config-data",
MountPath: "/data",
})
volumeMounts = append(volumeMounts, corev1.VolumeMount{
Name: volName,
MountPath: fmt.Sprintf("/data/functions/part-%d", i),
ReadOnly: true,
Name: "config-data",
MountPath: "/data",
})
}

Expand Down Expand Up @@ -1216,6 +1232,31 @@ func (o *Operator) buildDeploymentFromData(data *DeploymentData) *appsv1.Deploym
},
}

// Init container: only needed for free tier to copy/merge ConfigMap parts into writable emptyDir
initContainers := []corev1.Container{}
if useCopyInit {
initCopyScript := `#!/bin/sh
set -e
mkdir -p /data/connections /data/functions
# Copy connections from all parts
for dir in /config-src/connections/part-*; do
[ -d "$dir" ] && cp "$dir"/* /data/connections/ 2>/dev/null || true
done
# Copy functions from all parts
for dir in /config-src/functions/part-*; do
[ -d "$dir" ] && cp "$dir"/* /data/functions/ 2>/dev/null || true
done
echo "Config data copied to /data"
ls -la /data/connections/ /data/functions/ 2>/dev/null || true
`
initContainers = append(initContainers, corev1.Container{
Name: "copy-config",
Image: "busybox:1.37",
Command: []string{"sh", "-c", initCopyScript},
VolumeMounts: initVolumeMounts,
})
}

// Build containers list
containers := []corev1.Container{}

Expand Down Expand Up @@ -1351,6 +1392,7 @@ func (o *Operator) buildDeploymentFromData(data *DeploymentData) *appsv1.Deploym
sec60 := int64(60)
podSpec := corev1.PodSpec{
TerminationGracePeriodSeconds: &sec60,
InitContainers: initContainers,
Containers: containers,
Volumes: volumes,
NodeSelector: nodeSelector,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ export type DataLayoutImpl<T> = (
) => MappedEvent[] | MappedEvent;

export function jitsuLegacy(event: AnalyticsServerEvent, ctx: FullContext<BulkerDestinationConfig>): MappedEvent {
const flat = toJitsuClassic(event, ctx);
const flat = toJitsuClassic(event, ctx, true);
return { event: omit(flat, JitsuInternalProperties), table: event[TableNameParameter] ?? "events" };
}

Expand Down
55 changes: 36 additions & 19 deletions libs/functions/__tests__/classic-mapping.test.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { AnalyticsServerEvent } from "@jitsu/protocols/analytics";
import type { Event as JitsuLegacyEvent } from "@jitsu/sdk-js";
import { FullContext, UserAgent } from "@jitsu/protocols/functions";
import { fromJitsuClassic, removeUndefined, TableNameParameter, toJitsuClassic, toSnakeCase } from "../src";
import { FullContext } from "@jitsu/protocols/functions";
import { fromJitsuClassic, toJitsuClassic } from "../src";
import { classicEvents } from "./data/classic-events";

const identify: AnalyticsServerEvent = {
Expand Down Expand Up @@ -264,14 +264,22 @@ const legacyPageExpectedWarehouse = {
};

test("legacy event s3", () => {
const identifyLegacyResult = toJitsuClassic(identify, {
props: { keepOriginalNames: true },
destination: { type: "s3" },
} as unknown as FullContext);
const pageLegacyResult = toJitsuClassic(page, {
props: { keepOriginalNames: true },
destination: { type: "s3" },
} as unknown as FullContext);
const identifyLegacyResult = toJitsuClassic(
identify,
{
props: { keepOriginalNames: true },
destination: { type: "s3" },
} as unknown as FullContext,
true
);
const pageLegacyResult = toJitsuClassic(
page,
{
props: { keepOriginalNames: true },
destination: { type: "s3" },
} as unknown as FullContext,
true
);
console.log(JSON.stringify(identifyLegacyResult, null, 2));
expect(identifyLegacyResult).toStrictEqual(legacyIdentifyExpectedS3);

Expand All @@ -280,14 +288,22 @@ test("legacy event s3", () => {
});

test("legacy event warehouse", () => {
const identifyLegacyResult = toJitsuClassic(identify, {
props: { keepOriginalNames: true },
destination: { type: "postgres" },
} as unknown as FullContext);
const pageLegacyResult = toJitsuClassic(page, {
props: { keepOriginalNames: true },
destination: { type: "postgres" },
} as unknown as FullContext);
const identifyLegacyResult = toJitsuClassic(
identify,
{
props: { keepOriginalNames: true },
destination: { type: "postgres" },
} as unknown as FullContext,
true
);
const pageLegacyResult = toJitsuClassic(
page,
{
props: { keepOriginalNames: true },
destination: { type: "postgres" },
} as unknown as FullContext,
true
);
console.log(JSON.stringify(identifyLegacyResult, null, 2));
expect(identifyLegacyResult).toStrictEqual(legacyIdentifyExpectedWarehouse);

Expand All @@ -307,7 +323,8 @@ test("classic events mapping", () => {
const restored = fromJitsuClassic(event);
const mapped = toJitsuClassic(
restored as AnalyticsServerEvent,
{ props: { keepOriginalNames: true }, destination: { type: "s3" } } as unknown as FullContext
{ props: { keepOriginalNames: true }, destination: { type: "s3" } } as unknown as FullContext,
true
);
delete mapped.anon_ip;
expect(mapped).toStrictEqual(event);
Expand Down
21 changes: 12 additions & 9 deletions libs/functions/src/lib/functions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -96,15 +96,18 @@ function anonymizeIp(ip: string | undefined) {
}
}

export function toJitsuClassic(event: AnalyticsServerEvent, ctx: FullContext): AnyEvent {
const keepOriginalNames = !!ctx.props.keepOriginalNames;
const fileStorage = ctx.destination.type === "s3" || ctx.destination.type === "gcs";
let transferFunc = transferAsSnakeCase;
if (keepOriginalNames) {
if (fileStorage) {
transferFunc = transfer;
} else {
transferFunc = transferAsClassic;
export function toJitsuClassic(event: AnalyticsServerEvent, ctx: FullContext, bulker: boolean = false): AnyEvent {
let transferFunc = transfer;
if (bulker) {
const keepOriginalNames = !!ctx?.props?.keepOriginalNames;
const fileStorage = ctx?.destination?.type === "s3" || ctx?.destination?.type === "gcs";
transferFunc = transferAsSnakeCase;
if (keepOriginalNames) {
if (fileStorage) {
transferFunc = transfer;
} else {
transferFunc = transferAsClassic;
}
}
}
let url: URL | undefined = undefined;
Expand Down
4 changes: 3 additions & 1 deletion libs/jitsu-js/src/analytics-plugin.ts
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,9 @@ export function ensureAnonymousId(opts: JitsuOptions): string | undefined {
secure: window.location.protocol === "https:",
});
if (opts.debug) {
console.log(`[JITSU DEBUG] preInitAnonymousId: created anonymous ID cookie '${cookieName}'=${id} on domain '${domain}'`);
console.log(
`[JITSU DEBUG] preInitAnonymousId: created anonymous ID cookie '${cookieName}'=${id} on domain '${domain}'`
);
}
return id;
}
Expand Down
Loading
Loading