refactor progress
This commit is contained in:
37
packages/queue/package.json
Normal file
37
packages/queue/package.json
Normal file
@@ -0,0 +1,37 @@
|
||||
{
|
||||
"name": "@fluxer/queue",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"exports": {
|
||||
"./*": "./*"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "vitest run",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
"test:watch": "vitest",
|
||||
"typecheck": "tsgo --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fluxer/errors": "workspace:*",
|
||||
"@fluxer/hono": "workspace:*",
|
||||
"@fluxer/hono_types": "workspace:*",
|
||||
"@fluxer/logger": "workspace:*",
|
||||
"@fluxer/rate_limit": "workspace:*",
|
||||
"@fluxer/sentry": "workspace:*",
|
||||
"@fluxer/time": "workspace:*",
|
||||
"crc-32": "catalog:",
|
||||
"cron-parser": "catalog:",
|
||||
"hono": "catalog:",
|
||||
"msgpackr": "catalog:",
|
||||
"uuid": "catalog:",
|
||||
"zod": "catalog:"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "catalog:",
|
||||
"@typescript/native-preview": "catalog:",
|
||||
"@vitest/coverage-v8": "catalog:",
|
||||
"vite-tsconfig-paths": "catalog:",
|
||||
"vitest": "catalog:"
|
||||
}
|
||||
}
|
||||
155
packages/queue/src/App.tsx
Normal file
155
packages/queue/src/App.tsx
Normal file
@@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import {AppNotFoundHandler} from '@fluxer/errors/src/domains/core/ErrorHandlers';
|
||||
import {createErrorHandler} from '@fluxer/errors/src/ErrorHandler';
|
||||
import {createInternalAuth} from '@fluxer/hono/src/middleware/InternalAuth';
|
||||
import {applyMiddlewareStack} from '@fluxer/hono/src/middleware/MiddlewareStack';
|
||||
import type {MetricsCollector} from '@fluxer/hono_types/src/MetricsTypes';
|
||||
import type {TracingOptions} from '@fluxer/hono_types/src/TracingTypes';
|
||||
import type {LoggerFactory} from '@fluxer/logger/src/LoggerInterface';
|
||||
import type {AppEnv} from '@fluxer/queue/src/api/QueueApiTypes';
|
||||
import {createRoutes} from '@fluxer/queue/src/api/Routes';
|
||||
import {CronScheduler} from '@fluxer/queue/src/cron/CronScheduler';
|
||||
import {QueueEngine} from '@fluxer/queue/src/engine/QueueEngine';
|
||||
import type {QueueConfig} from '@fluxer/queue/src/types/QueueConfig';
|
||||
import type {RateLimitService} from '@fluxer/rate_limit/src/RateLimitService';
|
||||
import {captureException} from '@fluxer/sentry/src/Sentry';
|
||||
import {Hono} from 'hono';
|
||||
import {createMiddleware} from 'hono/factory';
|
||||
|
||||
export interface CreateQueueAppOptions {
|
||||
config: QueueConfig;
|
||||
loggerFactory: LoggerFactory;
|
||||
metricsCollector?: MetricsCollector;
|
||||
tracing?: TracingOptions;
|
||||
rateLimitService?: RateLimitService | null;
|
||||
rateLimitConfig?: {
|
||||
enabled: boolean;
|
||||
maxAttempts: number;
|
||||
windowMs: number;
|
||||
skipPaths?: Array<string>;
|
||||
} | null;
|
||||
internalSecret?: string;
|
||||
}
|
||||
|
||||
export interface QueueAppResult {
|
||||
app: Hono<AppEnv>;
|
||||
engine: QueueEngine;
|
||||
cronScheduler: CronScheduler;
|
||||
start: () => Promise<void>;
|
||||
shutdown: () => Promise<void>;
|
||||
}
|
||||
|
||||
export function createQueueApp(options: CreateQueueAppOptions): QueueAppResult {
|
||||
const {config, loggerFactory, metricsCollector, tracing, rateLimitService, rateLimitConfig, internalSecret} = options;
|
||||
const logger = loggerFactory('QueueApp');
|
||||
|
||||
const engine = new QueueEngine(config, loggerFactory);
|
||||
const cronScheduler = new CronScheduler(config, engine, loggerFactory);
|
||||
|
||||
const start = async (): Promise<void> => {
|
||||
await engine.start();
|
||||
await cronScheduler.start();
|
||||
};
|
||||
|
||||
const shutdown = async (): Promise<void> => {
|
||||
await cronScheduler.stop();
|
||||
await engine.stop();
|
||||
};
|
||||
|
||||
const ServiceMiddleware = createMiddleware<AppEnv>(async (ctx, next) => {
|
||||
ctx.set('queueEngine', engine);
|
||||
ctx.set('cronScheduler', cronScheduler);
|
||||
ctx.set('logger', logger);
|
||||
await next();
|
||||
});
|
||||
|
||||
const app = new Hono<AppEnv>();
|
||||
|
||||
applyMiddlewareStack(app, {
|
||||
requestId: {},
|
||||
tracing,
|
||||
metrics: metricsCollector
|
||||
? {
|
||||
enabled: true,
|
||||
collector: metricsCollector,
|
||||
skipPaths: ['/_health'],
|
||||
}
|
||||
: undefined,
|
||||
rateLimit:
|
||||
rateLimitService && rateLimitConfig?.enabled
|
||||
? {
|
||||
enabled: true,
|
||||
service: rateLimitService,
|
||||
maxAttempts: rateLimitConfig.maxAttempts,
|
||||
windowMs: rateLimitConfig.windowMs,
|
||||
skipPaths: rateLimitConfig.skipPaths ?? ['/_health'],
|
||||
}
|
||||
: undefined,
|
||||
logger: {
|
||||
log: (data: {method: string; path: string; status: number; durationMs: number}) => {
|
||||
if (data.path !== '/_health') {
|
||||
logger.debug(
|
||||
{
|
||||
method: data.method,
|
||||
path: data.path,
|
||||
status: data.status,
|
||||
durationMs: data.durationMs,
|
||||
},
|
||||
'Request completed',
|
||||
);
|
||||
}
|
||||
},
|
||||
},
|
||||
customMiddleware: internalSecret
|
||||
? [createInternalAuth({secret: internalSecret, skipPaths: ['/_health']}), ServiceMiddleware]
|
||||
: [ServiceMiddleware],
|
||||
skipErrorHandler: true,
|
||||
});
|
||||
|
||||
const errorHandler = createErrorHandler({
|
||||
includeStack: false,
|
||||
logError: (err: Error, ctx: {req: {path: string; method: string}}) => {
|
||||
logger.error(
|
||||
{
|
||||
error: err.message,
|
||||
stack: err.stack,
|
||||
path: ctx.req.path,
|
||||
method: ctx.req.method,
|
||||
},
|
||||
'Request error',
|
||||
);
|
||||
|
||||
const isExpectedError = err instanceof Error && 'isExpected' in err && err.isExpected;
|
||||
if (!isExpectedError) {
|
||||
captureException(err);
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
app.onError(errorHandler);
|
||||
|
||||
const routes = createRoutes();
|
||||
app.route('/', routes);
|
||||
|
||||
app.notFound(AppNotFoundHandler);
|
||||
|
||||
return {app, engine, cronScheduler, start, shutdown};
|
||||
}
|
||||
368
packages/queue/src/__tests__/CronScheduler.test.tsx
Normal file
368
packages/queue/src/__tests__/CronScheduler.test.tsx
Normal file
@@ -0,0 +1,368 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import * as fs from 'node:fs/promises';
|
||||
import type {LoggerFactory} from '@fluxer/logger/src/LoggerInterface';
|
||||
import {createMockLogger} from '@fluxer/logger/src/mock';
|
||||
import {CronScheduler, type QueueEngineClient} from '@fluxer/queue/src/cron/CronScheduler';
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import type {QueueConfig} from '@fluxer/queue/src/types/QueueConfig';
|
||||
import {afterEach, beforeEach, describe, expect, it, vi} from 'vitest';
|
||||
|
||||
const testRoot = `/tmp/fluxer-cron-scheduler-test-${Date.now()}`;
|
||||
|
||||
interface EnqueueCall {
|
||||
taskType: string;
|
||||
payload: JsonValue;
|
||||
priority?: number;
|
||||
runAtMs?: number | null;
|
||||
maxAttempts?: number;
|
||||
deduplicationId?: string | null;
|
||||
}
|
||||
|
||||
function createTestConfig(overrides: Partial<QueueConfig> = {}): QueueConfig {
|
||||
return {
|
||||
dataDir: testRoot,
|
||||
snapshotEveryMs: 60000,
|
||||
snapshotAfterOps: 100000,
|
||||
snapshotZstdLevel: 3,
|
||||
defaultVisibilityTimeoutMs: 30000,
|
||||
visibilityTimeoutBackoffMs: 10000,
|
||||
maxReceiveBatch: 100,
|
||||
commandBuffer: 8192,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function createLoggerFactory(): LoggerFactory {
|
||||
const mockLogger = createMockLogger();
|
||||
return () => mockLogger;
|
||||
}
|
||||
|
||||
function createMockQueueEngine(): QueueEngineClient & {calls: Array<EnqueueCall>} {
|
||||
const calls: Array<EnqueueCall> = [];
|
||||
let jobIdCounter = 0;
|
||||
|
||||
return {
|
||||
calls,
|
||||
async enqueue(taskType, payload, priority, runAtMs, maxAttempts, deduplicationId) {
|
||||
calls.push({taskType, payload, priority, runAtMs, maxAttempts, deduplicationId});
|
||||
return {job: {id: `job-${++jobIdCounter}`}, enqueued: true};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe('CronScheduler', () => {
|
||||
let scheduler: CronScheduler;
|
||||
let config: QueueConfig;
|
||||
let mockQueueEngine: QueueEngineClient & {calls: Array<EnqueueCall>};
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.useFakeTimers();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
await fs.mkdir(testRoot, {recursive: true});
|
||||
config = createTestConfig();
|
||||
mockQueueEngine = createMockQueueEngine();
|
||||
scheduler = new CronScheduler(config, mockQueueEngine, createLoggerFactory());
|
||||
await scheduler.start();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.useRealTimers();
|
||||
await scheduler.stop();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
});
|
||||
|
||||
describe('upsert', () => {
|
||||
it('should create a new schedule', async () => {
|
||||
const schedule = await scheduler.upsert('test-cron', 'test-task', {key: 'value'}, '* * * * *', true);
|
||||
|
||||
expect(schedule.id).toBe('test-cron');
|
||||
expect(schedule.taskType).toBe('test-task');
|
||||
expect(schedule.cronExpression).toBe('* * * * *');
|
||||
expect(schedule.enabled).toBe(true);
|
||||
expect(schedule.nextRunMs).not.toBeNull();
|
||||
});
|
||||
|
||||
it('should update an existing schedule', async () => {
|
||||
await scheduler.upsert('test-cron', 'task-v1', {}, '* * * * *', true);
|
||||
const updated = await scheduler.upsert('test-cron', 'task-v2', {updated: true}, '0 * * * *', true);
|
||||
|
||||
expect(updated.taskType).toBe('task-v2');
|
||||
expect(updated.cronExpression).toBe('0 * * * *');
|
||||
|
||||
const list = scheduler.list();
|
||||
expect(list).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should preserve lastRunMs when updating', async () => {
|
||||
await scheduler.upsert('test-cron', 'test-task', {}, '* * * * *', true);
|
||||
|
||||
vi.advanceTimersByTime(120000);
|
||||
|
||||
const before = scheduler.get('test-cron');
|
||||
const lastRunMs = before?.lastRunMs;
|
||||
|
||||
const updated = await scheduler.upsert('test-cron', 'test-task', {updated: true}, '* * * * *', true);
|
||||
|
||||
expect(updated.lastRunMs).toBe(lastRunMs);
|
||||
});
|
||||
|
||||
it('should not rewrite an unchanged schedule', async () => {
|
||||
const original = await scheduler.upsert('test-cron', 'test-task', {unchanged: true}, '* * * * *', true);
|
||||
const originalUpdatedAtMs = original.updatedAtMs;
|
||||
|
||||
vi.advanceTimersByTime(1000);
|
||||
|
||||
const unchanged = await scheduler.upsert('test-cron', 'test-task', {unchanged: true}, '* * * * *', true);
|
||||
|
||||
expect(unchanged).toBe(original);
|
||||
expect(unchanged.updatedAtMs).toBe(originalUpdatedAtMs);
|
||||
expect(scheduler.list()).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should throw error for invalid cron expression', async () => {
|
||||
await expect(scheduler.upsert('test-cron', 'test-task', {}, 'invalid cron', true)).rejects.toThrow(
|
||||
'Invalid cron expression',
|
||||
);
|
||||
});
|
||||
|
||||
it('should set nextRunMs to null for disabled schedules', async () => {
|
||||
const schedule = await scheduler.upsert('test-cron', 'test-task', {}, '* * * * *', false);
|
||||
|
||||
expect(schedule.enabled).toBe(false);
|
||||
expect(schedule.nextRunMs).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('delete', () => {
|
||||
it('should delete an existing schedule', async () => {
|
||||
await scheduler.upsert('test-cron', 'test-task', {}, '* * * * *', true);
|
||||
|
||||
const result = await scheduler.delete('test-cron');
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(scheduler.get('test-cron')).toBeNull();
|
||||
});
|
||||
|
||||
it('should return false when deleting non-existent schedule', async () => {
|
||||
const result = await scheduler.delete('non-existent');
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('get', () => {
|
||||
it('should return a schedule by id', async () => {
|
||||
await scheduler.upsert('test-cron', 'test-task', {}, '* * * * *', true);
|
||||
|
||||
const schedule = scheduler.get('test-cron');
|
||||
|
||||
expect(schedule).not.toBeNull();
|
||||
expect(schedule?.id).toBe('test-cron');
|
||||
});
|
||||
|
||||
it('should return null for non-existent schedule', () => {
|
||||
const schedule = scheduler.get('non-existent');
|
||||
|
||||
expect(schedule).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('list', () => {
|
||||
it('should return empty array when no schedules exist', () => {
|
||||
const schedules = scheduler.list();
|
||||
|
||||
expect(schedules).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should return all schedules', async () => {
|
||||
await scheduler.upsert('cron-1', 'task-1', {}, '* * * * *', true);
|
||||
await scheduler.upsert('cron-2', 'task-2', {}, '0 * * * *', true);
|
||||
await scheduler.upsert('cron-3', 'task-3', {}, '0 0 * * *', false);
|
||||
|
||||
const schedules = scheduler.list();
|
||||
|
||||
expect(schedules).toHaveLength(3);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getStats', () => {
|
||||
it('should return correct counts', async () => {
|
||||
await scheduler.upsert('cron-1', 'task-1', {}, '0 0 1 1 *', true);
|
||||
await scheduler.upsert('cron-2', 'task-2', {}, '0 0 2 1 *', true);
|
||||
await scheduler.upsert('cron-3', 'task-3', {}, '0 0 3 1 *', false);
|
||||
await scheduler.upsert('cron-4', 'task-4', {}, '0 0 4 1 *', false);
|
||||
|
||||
const stats = scheduler.getStats();
|
||||
|
||||
expect(stats.total).toBe(4);
|
||||
expect(stats.enabled).toBe(2);
|
||||
expect(stats.disabled).toBe(2);
|
||||
});
|
||||
|
||||
it('should return zero counts for empty scheduler', () => {
|
||||
const stats = scheduler.getStats();
|
||||
|
||||
expect(stats.total).toBe(0);
|
||||
expect(stats.enabled).toBe(0);
|
||||
expect(stats.disabled).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('cron execution', () => {
|
||||
it('should enqueue job when cron fires', async () => {
|
||||
await scheduler.upsert('test-cron', 'test-task', {message: 'hello'}, '* * * * *', true);
|
||||
|
||||
vi.advanceTimersByTime(120000);
|
||||
|
||||
expect(mockQueueEngine.calls.length).toBeGreaterThan(0);
|
||||
expect(mockQueueEngine.calls[0].taskType).toBe('test-task');
|
||||
});
|
||||
|
||||
it('should not enqueue job for disabled schedule', async () => {
|
||||
await scheduler.upsert('test-cron', 'test-task', {}, '* * * * *', false);
|
||||
|
||||
vi.advanceTimersByTime(120000);
|
||||
|
||||
expect(mockQueueEngine.calls).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should update lastRunMs after execution', async () => {
|
||||
await scheduler.upsert('test-cron', 'test-task', {}, '* * * * *', true);
|
||||
|
||||
const before = scheduler.get('test-cron');
|
||||
expect(before?.lastRunMs).toBeNull();
|
||||
|
||||
await vi.advanceTimersByTimeAsync(120000);
|
||||
|
||||
const after = scheduler.get('test-cron');
|
||||
expect(after?.lastRunMs).not.toBeNull();
|
||||
});
|
||||
|
||||
it('should update nextRunMs after execution', async () => {
|
||||
await scheduler.upsert('test-cron', 'test-task', {}, '* * * * *', true);
|
||||
|
||||
const before = scheduler.get('test-cron');
|
||||
const nextRunBefore = before?.nextRunMs;
|
||||
|
||||
await vi.advanceTimersByTimeAsync(120000);
|
||||
|
||||
const after = scheduler.get('test-cron');
|
||||
expect(after?.nextRunMs).toBeGreaterThan(nextRunBefore!);
|
||||
});
|
||||
|
||||
it('should handle multiple schedules', async () => {
|
||||
await scheduler.upsert('cron-1', 'task-1', {}, '* * * * *', true);
|
||||
await scheduler.upsert('cron-2', 'task-2', {}, '* * * * *', true);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(120000);
|
||||
|
||||
const taskTypes = mockQueueEngine.calls.map((c) => c.taskType);
|
||||
expect(taskTypes).toContain('task-1');
|
||||
expect(taskTypes).toContain('task-2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('cron expressions', () => {
|
||||
it('should support standard cron expressions', async () => {
|
||||
await scheduler.upsert('minutely', 'task', {}, '* * * * *', true);
|
||||
await scheduler.upsert('hourly', 'task', {}, '0 * * * *', true);
|
||||
await scheduler.upsert('daily', 'task', {}, '0 0 * * *', true);
|
||||
await scheduler.upsert('weekly', 'task', {}, '0 0 * * 0', true);
|
||||
await scheduler.upsert('monthly', 'task', {}, '0 0 1 * *', true);
|
||||
|
||||
const schedules = scheduler.list();
|
||||
expect(schedules).toHaveLength(5);
|
||||
schedules.forEach((s) => {
|
||||
expect(s.nextRunMs).not.toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
it('should support specific minute patterns', async () => {
|
||||
await scheduler.upsert('every-5-min', 'task', {}, '*/5 * * * *', true);
|
||||
await scheduler.upsert('at-15-30', 'task', {}, '15,30 * * * *', true);
|
||||
await scheduler.upsert('range', 'task', {}, '0-10 * * * *', true);
|
||||
|
||||
const schedules = scheduler.list();
|
||||
expect(schedules).toHaveLength(3);
|
||||
schedules.forEach((s) => {
|
||||
expect(s.nextRunMs).not.toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('CronScheduler persistence', () => {
|
||||
let config: QueueConfig;
|
||||
let mockQueueEngine: QueueEngineClient & {calls: Array<EnqueueCall>};
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.useFakeTimers();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
await fs.mkdir(testRoot, {recursive: true});
|
||||
config = createTestConfig({snapshotEveryMs: 1000});
|
||||
mockQueueEngine = createMockQueueEngine();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.useRealTimers();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
});
|
||||
|
||||
it('should persist and restore schedules', async () => {
|
||||
const scheduler1 = new CronScheduler(config, mockQueueEngine, createLoggerFactory());
|
||||
await scheduler1.start();
|
||||
|
||||
await scheduler1.upsert('persist-test', 'test-task', {persistent: true}, '* * * * *', true);
|
||||
await scheduler1.stop();
|
||||
|
||||
const scheduler2 = new CronScheduler(config, createMockQueueEngine(), createLoggerFactory());
|
||||
await scheduler2.start();
|
||||
|
||||
const schedule = scheduler2.get('persist-test');
|
||||
expect(schedule).not.toBeNull();
|
||||
expect(schedule?.taskType).toBe('test-task');
|
||||
expect(schedule?.cronExpression).toBe('* * * * *');
|
||||
|
||||
await scheduler2.stop();
|
||||
});
|
||||
|
||||
it('should persist lastRunMs', async () => {
|
||||
const scheduler1 = new CronScheduler(config, mockQueueEngine, createLoggerFactory());
|
||||
await scheduler1.start();
|
||||
|
||||
await scheduler1.upsert('persist-test', 'test-task', {}, '* * * * *', true);
|
||||
await vi.advanceTimersByTimeAsync(120000);
|
||||
|
||||
const before = scheduler1.get('persist-test');
|
||||
const lastRunMs = before?.lastRunMs;
|
||||
expect(lastRunMs).not.toBeNull();
|
||||
|
||||
await scheduler1.stop();
|
||||
|
||||
const scheduler2 = new CronScheduler(config, createMockQueueEngine(), createLoggerFactory());
|
||||
await scheduler2.start();
|
||||
|
||||
const after = scheduler2.get('persist-test');
|
||||
expect(after?.lastRunMs).toBe(lastRunMs);
|
||||
|
||||
await scheduler2.stop();
|
||||
});
|
||||
});
|
||||
304
packages/queue/src/__tests__/DelayQueue.test.tsx
Normal file
304
packages/queue/src/__tests__/DelayQueue.test.tsx
Normal file
@@ -0,0 +1,304 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import {DelayQueue} from '@fluxer/queue/src/engine/DelayQueue';
|
||||
import {afterEach, beforeEach, describe, expect, it, vi} from 'vitest';
|
||||
|
||||
interface TestItem {
|
||||
id: string;
|
||||
data: string;
|
||||
}
|
||||
|
||||
describe('DelayQueue', () => {
|
||||
let queue: DelayQueue<TestItem>;
|
||||
const keyExtractor = (item: TestItem) => item.id;
|
||||
|
||||
beforeEach(() => {
|
||||
queue = new DelayQueue<TestItem>(keyExtractor);
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
describe('basic operations', () => {
|
||||
it('should start empty', () => {
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
expect(queue.size).toBe(0);
|
||||
});
|
||||
|
||||
it('should push items with deadlines', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now + 1000);
|
||||
|
||||
expect(queue.isEmpty).toBe(false);
|
||||
expect(queue.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should clear all items', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test1'}, now + 1000);
|
||||
queue.push({id: 'item-2', data: 'test2'}, now + 2000);
|
||||
|
||||
queue.clear();
|
||||
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
expect(queue.size).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('popExpired', () => {
|
||||
it('should return empty array when no items are expired', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now + 10000);
|
||||
|
||||
const expired = queue.popExpired();
|
||||
|
||||
expect(expired).toHaveLength(0);
|
||||
expect(queue.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should pop expired items', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test1'}, now + 100);
|
||||
queue.push({id: 'item-2', data: 'test2'}, now + 200);
|
||||
queue.push({id: 'item-3', data: 'test3'}, now + 10000);
|
||||
|
||||
vi.advanceTimersByTime(250);
|
||||
|
||||
const expired = queue.popExpired();
|
||||
|
||||
expect(expired).toHaveLength(2);
|
||||
expect(expired.map((i) => i.id)).toContain('item-1');
|
||||
expect(expired.map((i) => i.id)).toContain('item-2');
|
||||
expect(queue.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should pop items in deadline order', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-3', data: 'third'}, now + 300);
|
||||
queue.push({id: 'item-1', data: 'first'}, now + 100);
|
||||
queue.push({id: 'item-2', data: 'second'}, now + 200);
|
||||
|
||||
vi.advanceTimersByTime(350);
|
||||
|
||||
const expired = queue.popExpired();
|
||||
|
||||
expect(expired).toHaveLength(3);
|
||||
expect(expired[0].id).toBe('item-1');
|
||||
expect(expired[1].id).toBe('item-2');
|
||||
expect(expired[2].id).toBe('item-3');
|
||||
});
|
||||
|
||||
it('should include items with deadline equal to now', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now);
|
||||
|
||||
const expired = queue.popExpired();
|
||||
|
||||
expect(expired).toHaveLength(1);
|
||||
expect(expired[0].id).toBe('item-1');
|
||||
});
|
||||
});
|
||||
|
||||
describe('remove operations', () => {
|
||||
it('should remove item by reference', () => {
|
||||
const now = Date.now();
|
||||
const item = {id: 'item-1', data: 'test'};
|
||||
queue.push(item, now + 1000);
|
||||
|
||||
const removed = queue.remove(item);
|
||||
|
||||
expect(removed).toBe(true);
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
});
|
||||
|
||||
it('should remove item by key', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now + 1000);
|
||||
|
||||
const removed = queue.removeByKey('item-1');
|
||||
|
||||
expect(removed).toBe(true);
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false when removing non-existent item', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now + 1000);
|
||||
|
||||
const removed = queue.removeByKey('non-existent');
|
||||
|
||||
expect(removed).toBe(false);
|
||||
expect(queue.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should update item when pushing with same key', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'original'}, now + 1000);
|
||||
queue.push({id: 'item-1', data: 'updated'}, now + 2000);
|
||||
|
||||
expect(queue.size).toBe(1);
|
||||
|
||||
vi.advanceTimersByTime(1500);
|
||||
const expired1 = queue.popExpired();
|
||||
expect(expired1).toHaveLength(0);
|
||||
|
||||
vi.advanceTimersByTime(1000);
|
||||
const expired2 = queue.popExpired();
|
||||
expect(expired2).toHaveLength(1);
|
||||
expect(expired2[0].data).toBe('updated');
|
||||
});
|
||||
});
|
||||
|
||||
describe('has operations', () => {
|
||||
it('should return true for existing item', () => {
|
||||
const now = Date.now();
|
||||
const item = {id: 'item-1', data: 'test'};
|
||||
queue.push(item, now + 1000);
|
||||
|
||||
expect(queue.has(item)).toBe(true);
|
||||
expect(queue.hasByKey('item-1')).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for non-existent item', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now + 1000);
|
||||
|
||||
expect(queue.has({id: 'item-2', data: 'test'})).toBe(false);
|
||||
expect(queue.hasByKey('item-2')).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false after item is removed', () => {
|
||||
const now = Date.now();
|
||||
const item = {id: 'item-1', data: 'test'};
|
||||
queue.push(item, now + 1000);
|
||||
queue.remove(item);
|
||||
|
||||
expect(queue.has(item)).toBe(false);
|
||||
expect(queue.hasByKey('item-1')).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('nextDelay', () => {
|
||||
it('should return null for empty queue', () => {
|
||||
expect(queue.nextDelay()).toBeNull();
|
||||
});
|
||||
|
||||
it('should return delay until next deadline', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now + 5000);
|
||||
|
||||
const delay = queue.nextDelay();
|
||||
|
||||
expect(delay).toBe(5000);
|
||||
});
|
||||
|
||||
it('should return 0 for expired items', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now + 100);
|
||||
|
||||
vi.advanceTimersByTime(200);
|
||||
|
||||
const delay = queue.nextDelay();
|
||||
|
||||
expect(delay).toBe(0);
|
||||
});
|
||||
|
||||
it('should return delay to earliest deadline', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-2', data: 'later'}, now + 10000);
|
||||
queue.push({id: 'item-1', data: 'sooner'}, now + 3000);
|
||||
|
||||
const delay = queue.nextDelay();
|
||||
|
||||
expect(delay).toBe(3000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('toArray', () => {
|
||||
it('should return copy of internal items', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test1'}, now + 1000);
|
||||
queue.push({id: 'item-2', data: 'test2'}, now + 2000);
|
||||
|
||||
const arr = queue.toArray();
|
||||
|
||||
expect(arr).toHaveLength(2);
|
||||
expect(arr[0].item.id).toBe('item-1');
|
||||
expect(arr[0].deadlineMs).toBe(now + 1000);
|
||||
expect(arr[1].item.id).toBe('item-2');
|
||||
expect(arr[1].deadlineMs).toBe(now + 2000);
|
||||
});
|
||||
|
||||
it('should return items sorted by deadline', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-3', data: 'third'}, now + 3000);
|
||||
queue.push({id: 'item-1', data: 'first'}, now + 1000);
|
||||
queue.push({id: 'item-2', data: 'second'}, now + 2000);
|
||||
|
||||
const arr = queue.toArray();
|
||||
|
||||
expect(arr[0].item.id).toBe('item-1');
|
||||
expect(arr[1].item.id).toBe('item-2');
|
||||
expect(arr[2].item.id).toBe('item-3');
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle items with same deadline', () => {
|
||||
const now = Date.now();
|
||||
const deadline = now + 1000;
|
||||
queue.push({id: 'item-1', data: 'first'}, deadline);
|
||||
queue.push({id: 'item-2', data: 'second'}, deadline);
|
||||
queue.push({id: 'item-3', data: 'third'}, deadline);
|
||||
|
||||
vi.advanceTimersByTime(1100);
|
||||
|
||||
const expired = queue.popExpired();
|
||||
|
||||
expect(expired).toHaveLength(3);
|
||||
});
|
||||
|
||||
it('should handle negative deadline (already expired)', () => {
|
||||
const now = Date.now();
|
||||
queue.push({id: 'item-1', data: 'test'}, now - 1000);
|
||||
|
||||
const expired = queue.popExpired();
|
||||
|
||||
expect(expired).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('should handle large number of items', () => {
|
||||
const now = Date.now();
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
queue.push({id: `item-${i}`, data: `data-${i}`}, now + (i + 1) * 10);
|
||||
}
|
||||
|
||||
expect(queue.size).toBe(1000);
|
||||
|
||||
vi.advanceTimersByTime(5000);
|
||||
const expired = queue.popExpired();
|
||||
|
||||
expect(expired).toHaveLength(500);
|
||||
expect(queue.size).toBe(500);
|
||||
});
|
||||
});
|
||||
});
|
||||
294
packages/queue/src/__tests__/PriorityQueue.test.tsx
Normal file
294
packages/queue/src/__tests__/PriorityQueue.test.tsx
Normal file
@@ -0,0 +1,294 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import {PriorityQueue} from '@fluxer/queue/src/engine/PriorityQueue';
|
||||
import {createJobID, type ReadyItem} from '@fluxer/queue/src/types/JobTypes';
|
||||
import {beforeEach, describe, expect, it} from 'vitest';
|
||||
|
||||
function createReadyItem(
|
||||
jobId: string,
|
||||
priority: number,
|
||||
runAtMs: number = 1000,
|
||||
createdAtMs: number = 1000,
|
||||
sequence: number = 0,
|
||||
): ReadyItem {
|
||||
return {
|
||||
jobId: createJobID(jobId),
|
||||
priority,
|
||||
runAtMs,
|
||||
createdAtMs,
|
||||
sequence,
|
||||
};
|
||||
}
|
||||
|
||||
describe('PriorityQueue', () => {
|
||||
let queue: PriorityQueue;
|
||||
|
||||
beforeEach(() => {
|
||||
queue = new PriorityQueue();
|
||||
});
|
||||
|
||||
describe('basic operations', () => {
|
||||
it('should start empty', () => {
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
expect(queue.size).toBe(0);
|
||||
expect(queue.peek()).toBeUndefined();
|
||||
expect(queue.pop()).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should push and pop a single item', () => {
|
||||
const item = createReadyItem('job-1', 5);
|
||||
queue.push(item);
|
||||
|
||||
expect(queue.isEmpty).toBe(false);
|
||||
expect(queue.size).toBe(1);
|
||||
expect(queue.peek()).toEqual(item);
|
||||
|
||||
const popped = queue.pop();
|
||||
expect(popped).toEqual(item);
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
});
|
||||
|
||||
it('should push multiple items and maintain size', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
queue.push(createReadyItem('job-2', 2));
|
||||
queue.push(createReadyItem('job-3', 3));
|
||||
|
||||
expect(queue.size).toBe(3);
|
||||
});
|
||||
|
||||
it('should clear all items', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
queue.push(createReadyItem('job-2', 2));
|
||||
|
||||
queue.clear();
|
||||
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
expect(queue.size).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('priority ordering', () => {
|
||||
it('should pop highest priority first', () => {
|
||||
queue.push(createReadyItem('low', 1));
|
||||
queue.push(createReadyItem('high', 10));
|
||||
queue.push(createReadyItem('medium', 5));
|
||||
|
||||
expect(queue.pop()?.jobId).toBe('high');
|
||||
expect(queue.pop()?.jobId).toBe('medium');
|
||||
expect(queue.pop()?.jobId).toBe('low');
|
||||
});
|
||||
|
||||
it('should order by runAtMs when priorities are equal', () => {
|
||||
const baseTime = 1000;
|
||||
queue.push(createReadyItem('later', 5, baseTime + 200));
|
||||
queue.push(createReadyItem('earlier', 5, baseTime + 100));
|
||||
queue.push(createReadyItem('earliest', 5, baseTime));
|
||||
|
||||
expect(queue.pop()?.jobId).toBe('earliest');
|
||||
expect(queue.pop()?.jobId).toBe('earlier');
|
||||
expect(queue.pop()?.jobId).toBe('later');
|
||||
});
|
||||
|
||||
it('should order by createdAtMs when priority and runAtMs are equal', () => {
|
||||
const baseTime = 1000;
|
||||
queue.push(createReadyItem('third', 5, baseTime, baseTime + 200));
|
||||
queue.push(createReadyItem('first', 5, baseTime, baseTime));
|
||||
queue.push(createReadyItem('second', 5, baseTime, baseTime + 100));
|
||||
|
||||
expect(queue.pop()?.jobId).toBe('first');
|
||||
expect(queue.pop()?.jobId).toBe('second');
|
||||
expect(queue.pop()?.jobId).toBe('third');
|
||||
});
|
||||
|
||||
it('should order by sequence when all other fields are equal', () => {
|
||||
const baseTime = 1000;
|
||||
queue.push(createReadyItem('third', 5, baseTime, baseTime, 3));
|
||||
queue.push(createReadyItem('first', 5, baseTime, baseTime, 1));
|
||||
queue.push(createReadyItem('second', 5, baseTime, baseTime, 2));
|
||||
|
||||
expect(queue.pop()?.jobId).toBe('first');
|
||||
expect(queue.pop()?.jobId).toBe('second');
|
||||
expect(queue.pop()?.jobId).toBe('third');
|
||||
});
|
||||
|
||||
it('should maintain heap property after multiple insertions', () => {
|
||||
const priorities = [3, 1, 4, 1, 5, 9, 2, 6, 5, 3];
|
||||
priorities.forEach((p, i) => {
|
||||
queue.push(createReadyItem(`job-${i}`, p, 1000, 1000, i));
|
||||
});
|
||||
|
||||
const sorted = [...priorities].sort((a, b) => b - a);
|
||||
const popped: Array<number> = [];
|
||||
|
||||
while (!queue.isEmpty) {
|
||||
const item = queue.pop();
|
||||
if (item) {
|
||||
popped.push(item.priority);
|
||||
}
|
||||
}
|
||||
|
||||
expect(popped).toEqual(sorted);
|
||||
});
|
||||
});
|
||||
|
||||
describe('remove operation', () => {
|
||||
it('should remove an existing item', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
queue.push(createReadyItem('job-2', 2));
|
||||
queue.push(createReadyItem('job-3', 3));
|
||||
|
||||
const removed = queue.remove(createJobID('job-2'));
|
||||
|
||||
expect(removed).toBe(true);
|
||||
expect(queue.size).toBe(2);
|
||||
expect(queue.has(createJobID('job-2'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false when removing non-existent item', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
|
||||
const removed = queue.remove(createJobID('non-existent'));
|
||||
|
||||
expect(removed).toBe(false);
|
||||
expect(queue.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should maintain heap property after removal', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
queue.push(createReadyItem('job-2', 5));
|
||||
queue.push(createReadyItem('job-3', 3));
|
||||
queue.push(createReadyItem('job-4', 7));
|
||||
queue.push(createReadyItem('job-5', 2));
|
||||
|
||||
queue.remove(createJobID('job-2'));
|
||||
|
||||
expect(queue.pop()?.jobId).toBe('job-4');
|
||||
expect(queue.pop()?.jobId).toBe('job-3');
|
||||
expect(queue.pop()?.jobId).toBe('job-5');
|
||||
expect(queue.pop()?.jobId).toBe('job-1');
|
||||
});
|
||||
|
||||
it('should handle removing the only item', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
|
||||
const removed = queue.remove(createJobID('job-1'));
|
||||
|
||||
expect(removed).toBe(true);
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle removing from the front of the queue', () => {
|
||||
queue.push(createReadyItem('job-1', 10));
|
||||
queue.push(createReadyItem('job-2', 5));
|
||||
queue.push(createReadyItem('job-3', 1));
|
||||
|
||||
queue.remove(createJobID('job-1'));
|
||||
|
||||
expect(queue.peek()?.jobId).toBe('job-2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('has operation', () => {
|
||||
it('should return true for existing item', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
|
||||
expect(queue.has(createJobID('job-1'))).toBe(true);
|
||||
});
|
||||
|
||||
it('should return false for non-existent item', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
|
||||
expect(queue.has(createJobID('job-2'))).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false after item is removed', () => {
|
||||
queue.push(createReadyItem('job-1', 1));
|
||||
queue.remove(createJobID('job-1'));
|
||||
|
||||
expect(queue.has(createJobID('job-1'))).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('toArray and fromArray', () => {
|
||||
it('should convert queue to array', () => {
|
||||
const items = [createReadyItem('job-1', 1), createReadyItem('job-2', 2), createReadyItem('job-3', 3)];
|
||||
|
||||
items.forEach((item) => queue.push(item));
|
||||
|
||||
const arr = queue.toArray();
|
||||
|
||||
expect(arr.length).toBe(3);
|
||||
items.forEach((item) => {
|
||||
expect(arr.some((a) => a.jobId === item.jobId)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it('should create queue from array', () => {
|
||||
const items = [createReadyItem('job-1', 1), createReadyItem('job-2', 5), createReadyItem('job-3', 3)];
|
||||
|
||||
const newQueue = PriorityQueue.fromArray(items);
|
||||
|
||||
expect(newQueue.size).toBe(3);
|
||||
expect(newQueue.pop()?.jobId).toBe('job-2');
|
||||
expect(newQueue.pop()?.jobId).toBe('job-3');
|
||||
expect(newQueue.pop()?.jobId).toBe('job-1');
|
||||
});
|
||||
|
||||
it('should create empty queue from empty array', () => {
|
||||
const newQueue = PriorityQueue.fromArray([]);
|
||||
|
||||
expect(newQueue.isEmpty).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle items with same priority correctly', () => {
|
||||
for (let i = 0; i < 10; i++) {
|
||||
queue.push(createReadyItem(`job-${i}`, 5, 1000, 1000, i));
|
||||
}
|
||||
|
||||
let lastSequence = -1;
|
||||
while (!queue.isEmpty) {
|
||||
const item = queue.pop();
|
||||
if (item) {
|
||||
expect(item.sequence).toBeGreaterThan(lastSequence);
|
||||
lastSequence = item.sequence;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
it('should handle large number of items', () => {
|
||||
for (let i = 0; i < 1000; i++) {
|
||||
queue.push(createReadyItem(`job-${i}`, Math.floor(Math.random() * 100)));
|
||||
}
|
||||
|
||||
expect(queue.size).toBe(1000);
|
||||
|
||||
let lastPriority = Infinity;
|
||||
while (!queue.isEmpty) {
|
||||
const item = queue.pop();
|
||||
if (item) {
|
||||
expect(item.priority).toBeLessThanOrEqual(lastPriority);
|
||||
lastPriority = item.priority;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
612
packages/queue/src/__tests__/QueueEngine.test.tsx
Normal file
612
packages/queue/src/__tests__/QueueEngine.test.tsx
Normal file
@@ -0,0 +1,612 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import * as fs from 'node:fs/promises';
|
||||
import type {LoggerFactory} from '@fluxer/logger/src/LoggerInterface';
|
||||
import {createMockLogger} from '@fluxer/logger/src/mock';
|
||||
import {QueueEngine} from '@fluxer/queue/src/engine/QueueEngine';
|
||||
import {JobStatus} from '@fluxer/queue/src/types/JobTypes';
|
||||
import type {QueueConfig} from '@fluxer/queue/src/types/QueueConfig';
|
||||
import {afterEach, beforeEach, describe, expect, it, vi} from 'vitest';
|
||||
|
||||
const testRoot = `/tmp/fluxer-queue-engine-test-${Date.now()}`;
|
||||
|
||||
function createTestConfig(overrides: Partial<QueueConfig> = {}): QueueConfig {
|
||||
return {
|
||||
dataDir: testRoot,
|
||||
snapshotEveryMs: 60000,
|
||||
snapshotAfterOps: 100000,
|
||||
snapshotZstdLevel: 3,
|
||||
defaultVisibilityTimeoutMs: 30000,
|
||||
visibilityTimeoutBackoffMs: 10000,
|
||||
maxReceiveBatch: 100,
|
||||
commandBuffer: 8192,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function createLoggerFactory(): LoggerFactory {
|
||||
const mockLogger = createMockLogger();
|
||||
return () => mockLogger;
|
||||
}
|
||||
|
||||
describe('QueueEngine', () => {
|
||||
let engine: QueueEngine;
|
||||
let config: QueueConfig;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.useFakeTimers();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
config = createTestConfig();
|
||||
engine = new QueueEngine(config, createLoggerFactory());
|
||||
await engine.start();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.useRealTimers();
|
||||
await engine.stop();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
});
|
||||
|
||||
describe('enqueue', () => {
|
||||
it('should enqueue a job with default values', async () => {
|
||||
const result = await engine.enqueue('test-task', {message: 'hello'});
|
||||
|
||||
expect(result.enqueued).toBe(true);
|
||||
expect(result.job.taskType).toBe('test-task');
|
||||
expect(result.job.priority).toBe(0);
|
||||
expect(result.job.attempts).toBe(0);
|
||||
expect(result.job.maxAttempts).toBe(3);
|
||||
});
|
||||
|
||||
it('should enqueue a job with custom priority', async () => {
|
||||
const result = await engine.enqueue('test-task', {}, 10);
|
||||
|
||||
expect(result.enqueued).toBe(true);
|
||||
expect(result.job.priority).toBe(10);
|
||||
});
|
||||
|
||||
it('should enqueue a scheduled job', async () => {
|
||||
const now = Date.now();
|
||||
const runAt = now + 60000;
|
||||
const result = await engine.enqueue('test-task', {}, 0, runAt);
|
||||
|
||||
expect(result.enqueued).toBe(true);
|
||||
expect(result.job.runAtMs).toBe(runAt);
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.scheduled).toBe(1);
|
||||
expect(stats.ready).toBe(0);
|
||||
});
|
||||
|
||||
it('should enqueue a job with custom max attempts', async () => {
|
||||
const result = await engine.enqueue('test-task', {}, 0, null, 5);
|
||||
|
||||
expect(result.enqueued).toBe(true);
|
||||
expect(result.job.maxAttempts).toBe(5);
|
||||
});
|
||||
|
||||
it('should clamp max attempts to valid range', async () => {
|
||||
const resultLow = await engine.enqueue('test-task', {}, 0, null, 0);
|
||||
const resultHigh = await engine.enqueue('test-task', {}, 0, null, 9999);
|
||||
|
||||
expect(resultLow.job.maxAttempts).toBe(1);
|
||||
expect(resultHigh.job.maxAttempts).toBe(1000);
|
||||
});
|
||||
|
||||
it('should handle deduplication', async () => {
|
||||
const result1 = await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
const result2 = await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
|
||||
expect(result1.enqueued).toBe(true);
|
||||
expect(result2.enqueued).toBe(false);
|
||||
expect(result2.job.id).toBe(result1.job.id);
|
||||
});
|
||||
|
||||
it('should allow re-enqueue with same deduplication key after job completes', async () => {
|
||||
const result1 = await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
expect(result1.enqueued).toBe(true);
|
||||
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
expect(jobs).toHaveLength(1);
|
||||
|
||||
await engine.ack(jobs[0].receipt);
|
||||
|
||||
const result2 = await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
expect(result2.enqueued).toBe(true);
|
||||
expect(result2.job.id).not.toBe(result1.job.id);
|
||||
});
|
||||
});
|
||||
|
||||
describe('dequeue', () => {
|
||||
it('should return empty array when queue is empty', async () => {
|
||||
const jobs = await engine.dequeue(null, 10, 0, 5000);
|
||||
|
||||
expect(jobs).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should dequeue a single job', async () => {
|
||||
await engine.enqueue('test-task', {message: 'hello'});
|
||||
|
||||
const jobs = await engine.dequeue(null, 10, 0, 5000);
|
||||
|
||||
expect(jobs).toHaveLength(1);
|
||||
expect(jobs[0].job.taskType).toBe('test-task');
|
||||
expect(jobs[0].receipt).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should dequeue multiple jobs respecting limit', async () => {
|
||||
for (let i = 0; i < 5; i++) {
|
||||
await engine.enqueue('test-task', {index: i});
|
||||
}
|
||||
|
||||
const jobs = await engine.dequeue(null, 3, 0, 5000);
|
||||
|
||||
expect(jobs).toHaveLength(3);
|
||||
});
|
||||
|
||||
it('should filter by task type', async () => {
|
||||
await engine.enqueue('type-a', {});
|
||||
await engine.enqueue('type-b', {});
|
||||
await engine.enqueue('type-a', {});
|
||||
|
||||
const jobs = await engine.dequeue(['type-a'], 10, 0, 5000);
|
||||
|
||||
expect(jobs).toHaveLength(2);
|
||||
jobs.forEach((job) => {
|
||||
expect(job.job.taskType).toBe('type-a');
|
||||
});
|
||||
});
|
||||
|
||||
it('should increment attempt count on dequeue', async () => {
|
||||
await engine.enqueue('test-task', {});
|
||||
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
|
||||
expect(jobs[0].job.attempts).toBe(1);
|
||||
});
|
||||
|
||||
it('should set visibility deadline', async () => {
|
||||
const visibilityTimeout = 10000;
|
||||
await engine.enqueue('test-task', {});
|
||||
|
||||
const jobs = await engine.dequeue(null, 1, 0, visibilityTimeout);
|
||||
const now = Date.now();
|
||||
|
||||
expect(jobs[0].visibilityDeadlineMs).toBeGreaterThanOrEqual(now);
|
||||
expect(jobs[0].visibilityDeadlineMs).toBeLessThanOrEqual(now + visibilityTimeout + 100);
|
||||
});
|
||||
|
||||
it('should dequeue jobs in priority order', async () => {
|
||||
await engine.enqueue('low-priority', {}, 1);
|
||||
await engine.enqueue('high-priority', {}, 10);
|
||||
await engine.enqueue('medium-priority', {}, 5);
|
||||
|
||||
const jobs = await engine.dequeue(null, 3, 0, 5000);
|
||||
|
||||
expect(jobs[0].job.taskType).toBe('high-priority');
|
||||
expect(jobs[1].job.taskType).toBe('medium-priority');
|
||||
expect(jobs[2].job.taskType).toBe('low-priority');
|
||||
});
|
||||
|
||||
it('should not return scheduled jobs before their run time', async () => {
|
||||
const now = Date.now();
|
||||
await engine.enqueue('scheduled-task', {}, 0, now + 60000);
|
||||
|
||||
const jobs = await engine.dequeue(null, 10, 0, 5000);
|
||||
|
||||
expect(jobs).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should return scheduled jobs after their run time', async () => {
|
||||
const now = Date.now();
|
||||
await engine.enqueue('scheduled-task', {}, 0, now + 1000);
|
||||
|
||||
vi.advanceTimersByTime(2000);
|
||||
|
||||
const jobs = await engine.dequeue(null, 10, 0, 5000);
|
||||
|
||||
expect(jobs).toHaveLength(1);
|
||||
expect(jobs[0].job.taskType).toBe('scheduled-task');
|
||||
});
|
||||
});
|
||||
|
||||
describe('ack', () => {
|
||||
it('should acknowledge a job and remove it from the queue', async () => {
|
||||
await engine.enqueue('test-task', {});
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
|
||||
const result = await engine.ack(jobs[0].receipt);
|
||||
|
||||
expect(result).toBe(true);
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.ready).toBe(0);
|
||||
expect(stats.processing).toBe(0);
|
||||
});
|
||||
|
||||
it('should return false for invalid receipt', async () => {
|
||||
const result = await engine.ack('invalid-receipt');
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should clear deduplication index on ack', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
await engine.ack(jobs[0].receipt);
|
||||
|
||||
const result = await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
|
||||
expect(result.enqueued).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('nack', () => {
|
||||
it('should nack a job and schedule it for retry', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 3);
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
|
||||
const result = await engine.nack(jobs[0].receipt, 'processing error');
|
||||
|
||||
expect(result).toBe(true);
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.processing).toBe(0);
|
||||
expect(stats.scheduled).toBe(1);
|
||||
});
|
||||
|
||||
it('should return false for invalid receipt', async () => {
|
||||
const result = await engine.nack('invalid-receipt');
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should move job to dead letter after max attempts', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 1);
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
|
||||
await engine.nack(jobs[0].receipt, 'failed');
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.deadLetter).toBe(1);
|
||||
});
|
||||
|
||||
it('should store error message on job', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 1);
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
|
||||
await engine.nack(jobs[0].receipt, 'custom error');
|
||||
|
||||
const jobRecord = engine.getJob(jobs[0].job.id);
|
||||
expect(jobRecord?.job.error).toBe('custom error');
|
||||
});
|
||||
|
||||
it('should schedule retry with exponential backoff', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 5);
|
||||
|
||||
const jobs1 = await engine.dequeue(null, 1, 0, 5000);
|
||||
await engine.nack(jobs1[0].receipt);
|
||||
|
||||
const jobRecord = engine.getJob(jobs1[0].job.id);
|
||||
expect(jobRecord?.status).toBe(JobStatus.Scheduled);
|
||||
});
|
||||
});
|
||||
|
||||
describe('changeVisibility', () => {
|
||||
it('should extend visibility timeout', async () => {
|
||||
await engine.enqueue('test-task', {});
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
const originalDeadline = jobs[0].visibilityDeadlineMs;
|
||||
|
||||
const result = await engine.changeVisibility(jobs[0].receipt, 60000);
|
||||
|
||||
expect(result).toBe(true);
|
||||
|
||||
const jobRecord = engine.getJob(jobs[0].job.id);
|
||||
expect(jobRecord?.visibilityDeadlineMs).toBeGreaterThan(originalDeadline);
|
||||
});
|
||||
|
||||
it('should return false for invalid receipt', async () => {
|
||||
const result = await engine.changeVisibility('invalid-receipt', 60000);
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should return false for non-inflight job', async () => {
|
||||
await engine.enqueue('test-task', {});
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
await engine.ack(jobs[0].receipt);
|
||||
|
||||
const result = await engine.changeVisibility(jobs[0].receipt, 60000);
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('retryJob', () => {
|
||||
it('should retry a dead letter job', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 1);
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
await engine.nack(jobs[0].receipt);
|
||||
|
||||
const stats1 = engine.getStats();
|
||||
expect(stats1.deadLetter).toBe(1);
|
||||
|
||||
const retried = await engine.retryJob(jobs[0].job.id);
|
||||
|
||||
expect(retried).not.toBeNull();
|
||||
expect(retried?.attempts).toBe(0);
|
||||
|
||||
const stats2 = engine.getStats();
|
||||
expect(stats2.deadLetter).toBe(0);
|
||||
expect(stats2.ready).toBe(1);
|
||||
});
|
||||
|
||||
it('should return null for non-dead-letter job', async () => {
|
||||
await engine.enqueue('test-task', {});
|
||||
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
const retried = await engine.retryJob(jobs[0].job.id);
|
||||
|
||||
expect(retried).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for non-existent job', async () => {
|
||||
const retried = await engine.retryJob('non-existent-id');
|
||||
|
||||
expect(retried).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('deleteJob', () => {
|
||||
it('should delete a ready job', async () => {
|
||||
const {job} = await engine.enqueue('test-task', {});
|
||||
|
||||
const result = await engine.deleteJob(job.id);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(engine.getJob(job.id)).toBeNull();
|
||||
});
|
||||
|
||||
it('should delete a scheduled job', async () => {
|
||||
const now = Date.now();
|
||||
const {job} = await engine.enqueue('test-task', {}, 0, now + 60000);
|
||||
|
||||
const result = await engine.deleteJob(job.id);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(engine.getJob(job.id)).toBeNull();
|
||||
});
|
||||
|
||||
it('should delete an inflight job', async () => {
|
||||
await engine.enqueue('test-task', {});
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
|
||||
const result = await engine.deleteJob(jobs[0].job.id);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(engine.getJob(jobs[0].job.id)).toBeNull();
|
||||
});
|
||||
|
||||
it('should delete a dead letter job', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 1);
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
await engine.nack(jobs[0].receipt);
|
||||
|
||||
const result = await engine.deleteJob(jobs[0].job.id);
|
||||
|
||||
expect(result).toBe(true);
|
||||
expect(engine.getJob(jobs[0].job.id)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return false for non-existent job', async () => {
|
||||
const result = await engine.deleteJob('non-existent');
|
||||
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
|
||||
it('should clear deduplication index on delete', async () => {
|
||||
const {job} = await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
await engine.deleteJob(job.id);
|
||||
|
||||
const result = await engine.enqueue('test-task', {}, 0, null, 3, 'unique-key');
|
||||
|
||||
expect(result.enqueued).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getStats', () => {
|
||||
it('should return correct counts', async () => {
|
||||
const now = Date.now();
|
||||
|
||||
await engine.enqueue('ready-1', {});
|
||||
await engine.enqueue('ready-2', {});
|
||||
await engine.enqueue('scheduled-1', {}, 0, now + 60000);
|
||||
await engine.enqueue('to-process', {});
|
||||
await engine.enqueue('to-deadletter', {}, 0, null, 1);
|
||||
|
||||
const jobs = await engine.dequeue(['to-process'], 1, 0, 5000);
|
||||
expect(jobs).toHaveLength(1);
|
||||
|
||||
const dlJobs = await engine.dequeue(['to-deadletter'], 1, 0, 5000);
|
||||
expect(dlJobs).toHaveLength(1);
|
||||
await engine.nack(dlJobs[0].receipt);
|
||||
|
||||
const stats = engine.getStats();
|
||||
|
||||
expect(stats.ready).toBe(2);
|
||||
expect(stats.scheduled).toBe(1);
|
||||
expect(stats.processing).toBe(1);
|
||||
expect(stats.deadLetter).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getJob', () => {
|
||||
it('should return job record', async () => {
|
||||
const {job} = await engine.enqueue('test-task', {message: 'hello'});
|
||||
|
||||
const record = engine.getJob(job.id);
|
||||
|
||||
expect(record).not.toBeNull();
|
||||
expect(record?.job.id).toBe(job.id);
|
||||
expect(record?.status).toBe(JobStatus.Ready);
|
||||
});
|
||||
|
||||
it('should return null for non-existent job', async () => {
|
||||
const record = engine.getJob('non-existent');
|
||||
|
||||
expect(record).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('resetState', () => {
|
||||
it('should clear all state', async () => {
|
||||
await engine.enqueue('task-1', {});
|
||||
await engine.enqueue('task-2', {});
|
||||
await engine.enqueue('task-3', {}, 0, Date.now() + 60000);
|
||||
|
||||
await engine.resetState();
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.ready).toBe(0);
|
||||
expect(stats.scheduled).toBe(0);
|
||||
expect(stats.processing).toBe(0);
|
||||
expect(stats.deadLetter).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('QueueEngine visibility timeout', () => {
|
||||
let engine: QueueEngine;
|
||||
let config: QueueConfig;
|
||||
|
||||
beforeEach(async () => {
|
||||
vi.useFakeTimers();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
config = createTestConfig({
|
||||
defaultVisibilityTimeoutMs: 5000,
|
||||
visibilityTimeoutBackoffMs: 1000,
|
||||
});
|
||||
engine = new QueueEngine(config, createLoggerFactory());
|
||||
await engine.start();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
vi.useRealTimers();
|
||||
await engine.stop();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
});
|
||||
|
||||
it('should return job to ready queue after visibility timeout expires', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 3);
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
expect(jobs).toHaveLength(1);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(6000);
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.processing).toBe(0);
|
||||
|
||||
const jobRecord = engine.getJob(jobs[0].job.id);
|
||||
expect(jobRecord).not.toBeNull();
|
||||
expect(jobRecord?.status).toBe(JobStatus.Ready);
|
||||
expect(jobRecord?.job.error).toBe('visibility timeout');
|
||||
expect(jobRecord?.job.attempts).toBe(1);
|
||||
});
|
||||
|
||||
it('should move job to dead letter after max attempts with visibility timeout', async () => {
|
||||
await engine.enqueue('test-task', {}, 0, null, 1);
|
||||
|
||||
const jobs = await engine.dequeue(null, 1, 0, 5000);
|
||||
expect(jobs).toHaveLength(1);
|
||||
|
||||
await vi.advanceTimersByTimeAsync(6000);
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.deadLetter).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('QueueEngine concurrency', () => {
|
||||
let engine: QueueEngine;
|
||||
let config: QueueConfig;
|
||||
|
||||
beforeEach(async () => {
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
config = createTestConfig();
|
||||
engine = new QueueEngine(config, createLoggerFactory());
|
||||
await engine.start();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await engine.stop();
|
||||
await fs.rm(testRoot, {recursive: true, force: true});
|
||||
});
|
||||
|
||||
it('should handle concurrent enqueue operations', async () => {
|
||||
const promises: Array<Promise<{job: {id: string}; enqueued: boolean}>> = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
promises.push(engine.enqueue(`task-${i}`, {index: i}));
|
||||
}
|
||||
|
||||
const results = await Promise.all(promises);
|
||||
|
||||
expect(results.every((r) => r.enqueued)).toBe(true);
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.ready).toBe(100);
|
||||
});
|
||||
|
||||
it('should handle concurrent dequeue operations', async () => {
|
||||
for (let i = 0; i < 50; i++) {
|
||||
await engine.enqueue(`task-${i}`, {index: i});
|
||||
}
|
||||
|
||||
const dequeuePromises: Array<Promise<Array<{job: {id: string}}>>> = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
dequeuePromises.push(engine.dequeue(null, 10, 0, 5000));
|
||||
}
|
||||
|
||||
const results = await Promise.all(dequeuePromises);
|
||||
const allJobs = results.flat();
|
||||
|
||||
expect(allJobs.length).toBe(50);
|
||||
|
||||
const jobIds = new Set(allJobs.map((j) => j.job.id));
|
||||
expect(jobIds.size).toBe(50);
|
||||
});
|
||||
|
||||
it('should handle mixed enqueue and dequeue operations', async () => {
|
||||
const operations: Array<Promise<unknown>> = [];
|
||||
|
||||
for (let i = 0; i < 50; i++) {
|
||||
operations.push(engine.enqueue(`task-${i}`, {index: i}));
|
||||
}
|
||||
|
||||
for (let i = 0; i < 10; i++) {
|
||||
operations.push(engine.dequeue(null, 5, 0, 5000));
|
||||
}
|
||||
|
||||
await Promise.all(operations);
|
||||
|
||||
const stats = engine.getStats();
|
||||
expect(stats.ready + stats.processing).toBe(50);
|
||||
});
|
||||
});
|
||||
464
packages/queue/src/api/Handlers.tsx
Normal file
464
packages/queue/src/api/Handlers.tsx
Normal file
@@ -0,0 +1,464 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {AppEnv} from '@fluxer/queue/src/api/QueueApiTypes';
|
||||
import {
|
||||
AckRequestSchema,
|
||||
DeleteJobParamsSchema,
|
||||
DequeueQuerySchema,
|
||||
EnqueueRequestSchema,
|
||||
NackRequestSchema,
|
||||
RetryJobParamsSchema,
|
||||
UpsertCronRequestSchema,
|
||||
VisibilityRequestSchema,
|
||||
} from '@fluxer/queue/src/types/JobTypes';
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import {JsonValueSchema} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import {nowMs} from '@fluxer/time/src/Clock';
|
||||
import {formatRfc3339Timestamp, parseRfc3339TimestampToMs} from '@fluxer/time/src/Rfc3339Timestamp';
|
||||
import type {Context} from 'hono';
|
||||
|
||||
interface EnqueueResponse {
|
||||
job_id: string;
|
||||
enqueued: boolean;
|
||||
}
|
||||
|
||||
interface ApiJob {
|
||||
id: string;
|
||||
task_type: string;
|
||||
payload: JsonValue | null;
|
||||
priority: number;
|
||||
run_at: string;
|
||||
created_at: string;
|
||||
attempts: number;
|
||||
max_attempts: number;
|
||||
error: string | null;
|
||||
deduplication_id: string | null;
|
||||
}
|
||||
|
||||
interface ApiLeasedJob {
|
||||
receipt: string;
|
||||
visibility_deadline: string;
|
||||
job: ApiJob;
|
||||
}
|
||||
|
||||
interface QueueStatsResponse {
|
||||
ready: number;
|
||||
processing: number;
|
||||
scheduled: number;
|
||||
dead_letter: number;
|
||||
}
|
||||
|
||||
interface CronStatsResponse {
|
||||
id: string;
|
||||
task_type: string;
|
||||
cron_expression: string;
|
||||
enabled: boolean;
|
||||
last_run_at: string | null;
|
||||
next_run_at: string;
|
||||
last_run_age_ms: number | null;
|
||||
is_overdue: boolean;
|
||||
}
|
||||
|
||||
interface StatsResponse {
|
||||
queue: QueueStatsResponse;
|
||||
crons: Array<CronStatsResponse>;
|
||||
}
|
||||
|
||||
interface MetricsResponse {
|
||||
queue: QueueStatsResponse;
|
||||
}
|
||||
|
||||
interface HealthResponse {
|
||||
status: string;
|
||||
}
|
||||
|
||||
function toApiLeasedJob(leasedJob: {
|
||||
job: {
|
||||
id: string;
|
||||
taskType: string;
|
||||
payload: Uint8Array;
|
||||
priority: number;
|
||||
runAtMs: number;
|
||||
createdAtMs: number;
|
||||
attempts: number;
|
||||
maxAttempts: number;
|
||||
error: string | null;
|
||||
deduplicationId: string | null;
|
||||
};
|
||||
receipt: string;
|
||||
visibilityDeadlineMs: number;
|
||||
}): ApiLeasedJob {
|
||||
let parsedPayload: JsonValue | null;
|
||||
try {
|
||||
parsedPayload = JsonValueSchema.parse(JSON.parse(Buffer.from(leasedJob.job.payload).toString('utf-8')));
|
||||
} catch {
|
||||
parsedPayload = null;
|
||||
}
|
||||
|
||||
return {
|
||||
receipt: leasedJob.receipt,
|
||||
visibility_deadline: formatRfc3339Timestamp(leasedJob.visibilityDeadlineMs),
|
||||
job: {
|
||||
id: leasedJob.job.id,
|
||||
task_type: leasedJob.job.taskType,
|
||||
payload: parsedPayload,
|
||||
priority: leasedJob.job.priority,
|
||||
run_at: formatRfc3339Timestamp(leasedJob.job.runAtMs),
|
||||
created_at: formatRfc3339Timestamp(leasedJob.job.createdAtMs),
|
||||
attempts: leasedJob.job.attempts,
|
||||
max_attempts: leasedJob.job.maxAttempts,
|
||||
error: leasedJob.job.error,
|
||||
deduplication_id: leasedJob.job.deduplicationId,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export async function enqueueJob(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const logger = ctx.get('logger');
|
||||
|
||||
let body: JsonValue;
|
||||
try {
|
||||
body = await ctx.req.json<JsonValue>();
|
||||
} catch {
|
||||
return ctx.text('Error: Invalid JSON body', 400);
|
||||
}
|
||||
|
||||
const parsed = EnqueueRequestSchema.safeParse(body);
|
||||
if (!parsed.success) {
|
||||
logger.warn({errors: parsed.error.issues}, 'Invalid enqueue request');
|
||||
return ctx.text('Error: invalid request body', 400);
|
||||
}
|
||||
|
||||
const {task_type, payload, priority, run_at, max_attempts, deduplication_id} = parsed.data;
|
||||
|
||||
const runAtMs = run_at ? parseRfc3339TimestampToMs(run_at) : null;
|
||||
|
||||
try {
|
||||
const {job, enqueued} = await queueEngine.enqueue(
|
||||
task_type,
|
||||
payload,
|
||||
priority,
|
||||
runAtMs,
|
||||
max_attempts,
|
||||
deduplication_id ?? null,
|
||||
);
|
||||
|
||||
const response: EnqueueResponse = {
|
||||
job_id: job.id,
|
||||
enqueued,
|
||||
};
|
||||
|
||||
return ctx.json(response, 200);
|
||||
} catch (err) {
|
||||
logger.error({err, taskType: task_type}, 'Failed to enqueue job');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function dequeueJobs(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const logger = ctx.get('logger');
|
||||
|
||||
const query = ctx.req.query();
|
||||
const parsed = DequeueQuerySchema.safeParse({
|
||||
task_types: query['task_types'],
|
||||
limit: query['limit'],
|
||||
wait_time_ms: query['wait_time_ms'],
|
||||
visibility_timeout_ms: query['visibility_timeout_ms'],
|
||||
});
|
||||
|
||||
if (!parsed.success) {
|
||||
logger.warn({errors: parsed.error.issues}, 'Invalid dequeue request');
|
||||
return ctx.text('Error: invalid request parameters', 400);
|
||||
}
|
||||
|
||||
const {task_types, limit, wait_time_ms, visibility_timeout_ms} = parsed.data;
|
||||
|
||||
if (!task_types || task_types.trim() === '') {
|
||||
return ctx.text('Error: task_types must not be empty', 400);
|
||||
}
|
||||
|
||||
const taskTypesArray = task_types
|
||||
.split(',')
|
||||
.map((t) => t.trim())
|
||||
.filter((t) => t.length > 0);
|
||||
|
||||
if (taskTypesArray.length === 0) {
|
||||
return ctx.text('Error: task_types must not be empty', 400);
|
||||
}
|
||||
|
||||
const effectiveLimit = Math.min(Math.max(limit, 1), 100);
|
||||
const effectiveWaitTime = Math.min(wait_time_ms, 20000);
|
||||
const effectiveVisibilityTimeout = visibility_timeout_ms
|
||||
? Math.min(Math.max(visibility_timeout_ms, 1000), 12 * 60 * 60 * 1000)
|
||||
: null;
|
||||
|
||||
try {
|
||||
const leasedJobs = await queueEngine.dequeue(
|
||||
taskTypesArray,
|
||||
effectiveLimit,
|
||||
effectiveWaitTime,
|
||||
effectiveVisibilityTimeout,
|
||||
);
|
||||
|
||||
const response: Array<ApiLeasedJob> = leasedJobs.map(toApiLeasedJob);
|
||||
|
||||
return ctx.json(response, 200);
|
||||
} catch (err) {
|
||||
logger.error({err}, 'Failed to dequeue jobs');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function ackJob(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const logger = ctx.get('logger');
|
||||
|
||||
let body: JsonValue;
|
||||
try {
|
||||
body = await ctx.req.json<JsonValue>();
|
||||
} catch {
|
||||
return ctx.text('Error: Invalid JSON body', 400);
|
||||
}
|
||||
|
||||
const parsed = AckRequestSchema.safeParse(body);
|
||||
if (!parsed.success) {
|
||||
return ctx.text('Error: invalid receipt', 400);
|
||||
}
|
||||
|
||||
const {receipt} = parsed.data;
|
||||
|
||||
try {
|
||||
const success = await queueEngine.ack(receipt);
|
||||
if (!success) {
|
||||
return ctx.text('Error: receipt not found', 404);
|
||||
}
|
||||
|
||||
return ctx.json(null, 200);
|
||||
} catch (err) {
|
||||
logger.error({err, receipt}, 'Failed to ack job');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function nackJob(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const logger = ctx.get('logger');
|
||||
|
||||
let body: JsonValue;
|
||||
try {
|
||||
body = await ctx.req.json<JsonValue>();
|
||||
} catch {
|
||||
return ctx.text('Error: Invalid JSON body', 400);
|
||||
}
|
||||
|
||||
const parsed = NackRequestSchema.safeParse(body);
|
||||
if (!parsed.success) {
|
||||
return ctx.text('Error: invalid receipt', 400);
|
||||
}
|
||||
|
||||
const {receipt, error} = parsed.data;
|
||||
|
||||
try {
|
||||
const success = await queueEngine.nack(receipt, error);
|
||||
if (!success) {
|
||||
return ctx.text('Error: receipt not found', 404);
|
||||
}
|
||||
|
||||
return ctx.json(null, 200);
|
||||
} catch (err) {
|
||||
logger.error({err, receipt}, 'Failed to nack job');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function changeVisibility(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const logger = ctx.get('logger');
|
||||
|
||||
let body: JsonValue;
|
||||
try {
|
||||
body = await ctx.req.json<JsonValue>();
|
||||
} catch {
|
||||
return ctx.text('Error: Invalid JSON body', 400);
|
||||
}
|
||||
|
||||
const parsed = VisibilityRequestSchema.safeParse(body);
|
||||
if (!parsed.success) {
|
||||
return ctx.text('Error: invalid receipt', 400);
|
||||
}
|
||||
|
||||
const {receipt, timeout_ms} = parsed.data;
|
||||
|
||||
const effectiveTimeout = Math.min(Math.max(timeout_ms, 1000), 12 * 60 * 60 * 1000);
|
||||
|
||||
try {
|
||||
const success = await queueEngine.changeVisibility(receipt, effectiveTimeout);
|
||||
if (!success) {
|
||||
return ctx.text('Error: receipt not found', 404);
|
||||
}
|
||||
|
||||
return ctx.json(null, 200);
|
||||
} catch (err) {
|
||||
logger.error({err, receipt}, 'Failed to change visibility');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function upsertCron(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const cronScheduler = ctx.get('cronScheduler');
|
||||
const logger = ctx.get('logger');
|
||||
|
||||
let body: JsonValue;
|
||||
try {
|
||||
body = await ctx.req.json<JsonValue>();
|
||||
} catch {
|
||||
return ctx.text('Error: Invalid JSON body', 400);
|
||||
}
|
||||
|
||||
const parsed = UpsertCronRequestSchema.safeParse(body);
|
||||
if (!parsed.success) {
|
||||
logger.warn({errors: parsed.error.issues}, 'Invalid cron request');
|
||||
return ctx.text('Error: invalid request body', 400);
|
||||
}
|
||||
|
||||
const {id, task_type, payload, cron_expression, enabled} = parsed.data;
|
||||
|
||||
try {
|
||||
await cronScheduler.upsert(id, task_type, payload, cron_expression, enabled);
|
||||
|
||||
return ctx.json(null, 200);
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : '';
|
||||
if (message.includes('Invalid cron expression')) {
|
||||
return ctx.text('Error: invalid cron expression', 400);
|
||||
}
|
||||
logger.error({err, id, cronExpression: cron_expression}, 'Failed to upsert cron');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function retryJob(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const logger = ctx.get('logger');
|
||||
const jobId = ctx.req.param('job_id');
|
||||
const parsed = RetryJobParamsSchema.safeParse({job_id: jobId});
|
||||
if (!parsed.success) {
|
||||
return ctx.text('Error: invalid job_id', 400);
|
||||
}
|
||||
|
||||
try {
|
||||
const job = await queueEngine.retryJob(parsed.data.job_id);
|
||||
if (!job) {
|
||||
return ctx.text('Error: job not found in dead letter', 404);
|
||||
}
|
||||
|
||||
return ctx.json(null, 200);
|
||||
} catch (err) {
|
||||
logger.error({err, jobId: parsed.data.job_id}, 'Failed to retry job');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function deleteJob(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const logger = ctx.get('logger');
|
||||
const jobId = ctx.req.param('job_id');
|
||||
const parsed = DeleteJobParamsSchema.safeParse({job_id: jobId});
|
||||
if (!parsed.success) {
|
||||
return ctx.text('Error: invalid job_id', 400);
|
||||
}
|
||||
|
||||
try {
|
||||
const success = await queueEngine.deleteJob(parsed.data.job_id);
|
||||
if (!success) {
|
||||
return ctx.text('Error: job not found', 404);
|
||||
}
|
||||
|
||||
return ctx.json(null, 200);
|
||||
} catch (err) {
|
||||
logger.error({err, jobId: parsed.data.job_id}, 'Failed to delete job');
|
||||
return ctx.text('Error: internal server error', 500);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getStats(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
const cronScheduler = ctx.get('cronScheduler');
|
||||
|
||||
const queueStats = queueEngine.getStats();
|
||||
const cronList = cronScheduler.list();
|
||||
const now = nowMs();
|
||||
|
||||
const cronStats: Array<CronStatsResponse> = cronList.map((schedule) => {
|
||||
const lastRunAt = schedule.lastRunMs ? formatRfc3339Timestamp(schedule.lastRunMs) : null;
|
||||
const nextRunAt = schedule.nextRunMs ? formatRfc3339Timestamp(schedule.nextRunMs) : formatRfc3339Timestamp(now);
|
||||
const lastRunAgeMs = schedule.lastRunMs ? now - schedule.lastRunMs : null;
|
||||
const isOverdue = schedule.enabled && schedule.nextRunMs !== null && schedule.nextRunMs <= now;
|
||||
|
||||
return {
|
||||
id: schedule.id,
|
||||
task_type: schedule.taskType,
|
||||
cron_expression: schedule.cronExpression,
|
||||
enabled: schedule.enabled,
|
||||
last_run_at: lastRunAt,
|
||||
next_run_at: nextRunAt,
|
||||
last_run_age_ms: lastRunAgeMs,
|
||||
is_overdue: isOverdue,
|
||||
};
|
||||
});
|
||||
|
||||
const response: StatsResponse = {
|
||||
queue: {
|
||||
ready: queueStats.ready,
|
||||
processing: queueStats.processing,
|
||||
scheduled: queueStats.scheduled,
|
||||
dead_letter: queueStats.deadLetter,
|
||||
},
|
||||
crons: cronStats,
|
||||
};
|
||||
|
||||
return ctx.json(response, 200);
|
||||
}
|
||||
|
||||
export async function getMetrics(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const queueEngine = ctx.get('queueEngine');
|
||||
|
||||
const queueStats = queueEngine.getStats();
|
||||
|
||||
const response: MetricsResponse = {
|
||||
queue: {
|
||||
ready: queueStats.ready,
|
||||
processing: queueStats.processing,
|
||||
scheduled: queueStats.scheduled,
|
||||
dead_letter: queueStats.deadLetter,
|
||||
},
|
||||
};
|
||||
|
||||
return ctx.json(response, 200);
|
||||
}
|
||||
|
||||
export async function healthCheck(ctx: Context<AppEnv>): Promise<Response> {
|
||||
const response: HealthResponse = {
|
||||
status: 'ok',
|
||||
};
|
||||
return ctx.json(response, 200);
|
||||
}
|
||||
36
packages/queue/src/api/QueueApiTypes.tsx
Normal file
36
packages/queue/src/api/QueueApiTypes.tsx
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {ErrorI18nService} from '@fluxer/errors/src/i18n/ErrorI18nService';
|
||||
import type {LoggerInterface} from '@fluxer/logger/src/LoggerInterface';
|
||||
import type {CronScheduler} from '@fluxer/queue/src/cron/CronScheduler';
|
||||
import type {QueueEngine} from '@fluxer/queue/src/engine/QueueEngine';
|
||||
|
||||
export interface AppEnv {
|
||||
Variables: {
|
||||
queueEngine: QueueEngine;
|
||||
cronScheduler: CronScheduler;
|
||||
logger: LoggerInterface;
|
||||
errorI18nService?: ErrorI18nService;
|
||||
requestLocale?: string;
|
||||
requestId?: string;
|
||||
};
|
||||
}
|
||||
|
||||
export const APP_ENV_VARIABLE_KEYS = ['queueEngine', 'cronScheduler', 'logger'] as const;
|
||||
83
packages/queue/src/api/QueueRequestSchemas.tsx
Normal file
83
packages/queue/src/api/QueueRequestSchemas.tsx
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import {JsonValueSchema} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import {z} from 'zod';
|
||||
|
||||
export const EnqueueRequestSchema = z.object({
|
||||
task_type: z.string().min(1).max(256),
|
||||
payload: JsonValueSchema,
|
||||
priority: z.number().int().min(0).max(100).default(0),
|
||||
run_at: z.iso.datetime().optional(),
|
||||
max_attempts: z.number().int().min(1).max(100).default(3),
|
||||
deduplication_id: z.string().max(256).optional(),
|
||||
});
|
||||
|
||||
export type EnqueueRequest = z.infer<typeof EnqueueRequestSchema>;
|
||||
|
||||
export const DequeueQuerySchema = z.object({
|
||||
task_types: z.string().optional(),
|
||||
limit: z.coerce.number().int().min(1).max(100).default(1),
|
||||
wait_time_ms: z.coerce.number().int().min(0).max(30000).default(0),
|
||||
visibility_timeout_ms: z.coerce.number().int().min(1000).max(43200000).optional(),
|
||||
});
|
||||
|
||||
export type DequeueQuery = z.infer<typeof DequeueQuerySchema>;
|
||||
|
||||
export const AckRequestSchema = z.object({
|
||||
receipt: z.string().uuid(),
|
||||
});
|
||||
|
||||
export type AckRequest = z.infer<typeof AckRequestSchema>;
|
||||
|
||||
export const NackRequestSchema = z.object({
|
||||
receipt: z.string().uuid(),
|
||||
error: z.string().max(4096).optional(),
|
||||
});
|
||||
|
||||
export type NackRequest = z.infer<typeof NackRequestSchema>;
|
||||
|
||||
export const VisibilityRequestSchema = z.object({
|
||||
receipt: z.string().uuid(),
|
||||
timeout_ms: z.number().int().min(1000).max(43200000),
|
||||
});
|
||||
|
||||
export type VisibilityRequest = z.infer<typeof VisibilityRequestSchema>;
|
||||
|
||||
export const UpsertCronRequestSchema = z.object({
|
||||
id: z.string().min(1).max(256),
|
||||
task_type: z.string().min(1).max(256),
|
||||
payload: JsonValueSchema,
|
||||
cron_expression: z.string().min(1).max(256),
|
||||
enabled: z.boolean().default(true),
|
||||
});
|
||||
|
||||
export type UpsertCronRequest = z.infer<typeof UpsertCronRequestSchema>;
|
||||
|
||||
export const RetryJobParamsSchema = z.object({
|
||||
job_id: z.string().uuid(),
|
||||
});
|
||||
|
||||
export type RetryJobParams = z.infer<typeof RetryJobParamsSchema>;
|
||||
|
||||
export const DeleteJobParamsSchema = z.object({
|
||||
job_id: z.string().uuid(),
|
||||
});
|
||||
|
||||
export type DeleteJobParams = z.infer<typeof DeleteJobParamsSchema>;
|
||||
75
packages/queue/src/api/QueueResponseTypes.tsx
Normal file
75
packages/queue/src/api/QueueResponseTypes.tsx
Normal file
@@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
|
||||
export interface QueueApiEnqueueResponse {
|
||||
job_id: string;
|
||||
enqueued: boolean;
|
||||
}
|
||||
|
||||
export interface QueueApiJob {
|
||||
id: string;
|
||||
task_type: string;
|
||||
payload: JsonValue | null;
|
||||
priority: number;
|
||||
run_at: string;
|
||||
created_at: string;
|
||||
attempts: number;
|
||||
max_attempts: number;
|
||||
error: string | null;
|
||||
deduplication_id: string | null;
|
||||
}
|
||||
|
||||
export interface QueueApiLeasedJob {
|
||||
receipt: string;
|
||||
visibility_deadline: string;
|
||||
job: QueueApiJob;
|
||||
}
|
||||
|
||||
export interface QueueApiStats {
|
||||
ready: number;
|
||||
processing: number;
|
||||
scheduled: number;
|
||||
dead_letter: number;
|
||||
}
|
||||
|
||||
export interface QueueApiCronStatus {
|
||||
id: string;
|
||||
task_type: string;
|
||||
cron_expression: string;
|
||||
enabled: boolean;
|
||||
last_run_at: string | null;
|
||||
next_run_at: string;
|
||||
last_run_age_ms: number | null;
|
||||
is_overdue: boolean;
|
||||
}
|
||||
|
||||
export interface QueueApiStatsResponse {
|
||||
queue: QueueApiStats;
|
||||
crons: Array<QueueApiCronStatus>;
|
||||
}
|
||||
|
||||
export interface QueueApiMetricsResponse {
|
||||
queue: QueueApiStats;
|
||||
}
|
||||
|
||||
export interface QueueApiHealthResponse {
|
||||
status: string;
|
||||
}
|
||||
67
packages/queue/src/api/Routes.tsx
Normal file
67
packages/queue/src/api/Routes.tsx
Normal file
@@ -0,0 +1,67 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import {
|
||||
ackJob,
|
||||
changeVisibility,
|
||||
deleteJob,
|
||||
dequeueJobs,
|
||||
enqueueJob,
|
||||
getMetrics,
|
||||
getStats,
|
||||
healthCheck,
|
||||
nackJob,
|
||||
retryJob,
|
||||
upsertCron,
|
||||
} from '@fluxer/queue/src/api/Handlers';
|
||||
import type {AppEnv} from '@fluxer/queue/src/api/QueueApiTypes';
|
||||
import {Hono} from 'hono';
|
||||
import {bodyLimit} from 'hono/body-limit';
|
||||
|
||||
const MAX_BODY_SIZE = 1024 * 1024;
|
||||
|
||||
export function createRoutes(): Hono<AppEnv> {
|
||||
const app = new Hono<AppEnv>();
|
||||
|
||||
app.use(
|
||||
'*',
|
||||
bodyLimit({
|
||||
maxSize: MAX_BODY_SIZE,
|
||||
onError: (c) => c.text('Error: request body too large', 413),
|
||||
}),
|
||||
);
|
||||
|
||||
app.get('/_health', healthCheck);
|
||||
|
||||
app.post('/enqueue', enqueueJob);
|
||||
app.get('/dequeue', dequeueJobs);
|
||||
app.post('/ack', ackJob);
|
||||
app.post('/nack', nackJob);
|
||||
app.post('/visibility', changeVisibility);
|
||||
|
||||
app.post('/cron', upsertCron);
|
||||
|
||||
app.post('/retry/:job_id', retryJob);
|
||||
app.delete('/job/:job_id', deleteJob);
|
||||
|
||||
app.get('/stats', getStats);
|
||||
app.get('/metrics', getMetrics);
|
||||
|
||||
return app;
|
||||
}
|
||||
37
packages/queue/src/contracts/ICronScheduler.tsx
Normal file
37
packages/queue/src/contracts/ICronScheduler.tsx
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {CronSchedule, CronStats} from '@fluxer/queue/src/domain/QueueDomainTypes';
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
|
||||
export interface ICronScheduler {
|
||||
start(): Promise<void>;
|
||||
stop(): Promise<void>;
|
||||
upsert(
|
||||
id: string,
|
||||
taskType: string,
|
||||
payload: JsonValue,
|
||||
cronExpression: string,
|
||||
enabled?: boolean,
|
||||
): Promise<CronSchedule>;
|
||||
delete(id: string): Promise<boolean>;
|
||||
get(id: string): CronSchedule | null;
|
||||
list(): Array<CronSchedule>;
|
||||
getStats(): CronStats;
|
||||
}
|
||||
48
packages/queue/src/contracts/IQueueEngine.tsx
Normal file
48
packages/queue/src/contracts/IQueueEngine.tsx
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {EnqueueResult, Job, JobRecord, LeasedJob, QueueStats} from '@fluxer/queue/src/domain/QueueDomainTypes';
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
|
||||
export interface IQueueEngine {
|
||||
start(): Promise<void>;
|
||||
stop(): Promise<void>;
|
||||
enqueue(
|
||||
taskType: string,
|
||||
payload: JsonValue,
|
||||
priority?: number,
|
||||
runAtMs?: number | null,
|
||||
maxAttempts?: number,
|
||||
deduplicationId?: string | null,
|
||||
): Promise<EnqueueResult>;
|
||||
dequeue(
|
||||
taskTypes: Array<string> | null,
|
||||
limit: number,
|
||||
waitTimeMs: number,
|
||||
visibilityTimeoutMs: number | null,
|
||||
): Promise<Array<LeasedJob>>;
|
||||
ack(receipt: string): Promise<boolean>;
|
||||
nack(receipt: string, error?: string): Promise<boolean>;
|
||||
changeVisibility(receipt: string, timeoutMs: number): Promise<boolean>;
|
||||
retryJob(jobId: string): Promise<Job | null>;
|
||||
deleteJob(jobId: string): Promise<boolean>;
|
||||
getStats(): QueueStats;
|
||||
getJob(jobId: string): JobRecord | null;
|
||||
resetState(): Promise<void>;
|
||||
}
|
||||
32
packages/queue/src/contracts/IQueueEnqueueClient.tsx
Normal file
32
packages/queue/src/contracts/IQueueEnqueueClient.tsx
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {EnqueueResult} from '@fluxer/queue/src/domain/QueueDomainTypes';
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
|
||||
export interface IQueueEnqueueClient {
|
||||
enqueue(
|
||||
taskType: string,
|
||||
payload: JsonValue,
|
||||
priority?: number,
|
||||
runAtMs?: number | null,
|
||||
maxAttempts?: number,
|
||||
deduplicationId?: string | null,
|
||||
): Promise<EnqueueResult>;
|
||||
}
|
||||
423
packages/queue/src/cron/CronScheduler.tsx
Normal file
423
packages/queue/src/cron/CronScheduler.tsx
Normal file
@@ -0,0 +1,423 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import * as fs from 'node:fs/promises';
|
||||
import * as path from 'node:path';
|
||||
import {promisify} from 'node:util';
|
||||
import * as zlib from 'node:zlib';
|
||||
import type {LoggerFactory, LoggerInterface} from '@fluxer/logger/src/LoggerInterface';
|
||||
import {type CronSchedule, type CronStats, createCronID} from '@fluxer/queue/src/types/JobTypes';
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import {JsonValueSchema} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import type {QueueConfig} from '@fluxer/queue/src/types/QueueConfig';
|
||||
import {nowMs} from '@fluxer/time/src/Clock';
|
||||
import crc32 from 'crc-32';
|
||||
import {CronExpressionParser} from 'cron-parser';
|
||||
import {pack, unpack} from 'msgpackr';
|
||||
|
||||
const deflate = promisify(zlib.deflate);
|
||||
|
||||
const CRON_SNAPSHOT_FILENAME = 'cron_snapshot.msgpack.zstd';
|
||||
const CRON_SNAPSHOT_VERSION = 1;
|
||||
|
||||
interface CronSnapshot {
|
||||
version: number;
|
||||
schedules: Array<[string, SerializableCronSchedule]>;
|
||||
}
|
||||
|
||||
interface SerializableCronSchedule {
|
||||
id: string;
|
||||
taskType: string;
|
||||
payload: Array<number>;
|
||||
cronExpression: string;
|
||||
enabled: boolean;
|
||||
lastRunMs: number | null;
|
||||
nextRunMs: number | null;
|
||||
createdAtMs: number;
|
||||
updatedAtMs: number;
|
||||
}
|
||||
|
||||
export interface QueueEngineClient {
|
||||
enqueue(
|
||||
taskType: string,
|
||||
payload: JsonValue,
|
||||
priority?: number,
|
||||
runAtMs?: number | null,
|
||||
maxAttempts?: number,
|
||||
deduplicationId?: string | null,
|
||||
): Promise<{job: {id: string}; enqueued: boolean}>;
|
||||
}
|
||||
|
||||
export class CronScheduler {
|
||||
private config: QueueConfig;
|
||||
private queueEngine: QueueEngineClient;
|
||||
private logger: LoggerInterface;
|
||||
|
||||
private schedules: Map<string, CronSchedule> = new Map();
|
||||
private running: boolean = false;
|
||||
private tickTimer: NodeJS.Timeout | null = null;
|
||||
private snapshotTimer: NodeJS.Timeout | null = null;
|
||||
private operationsSinceSnapshot: number = 0;
|
||||
|
||||
constructor(config: QueueConfig, queueEngine: QueueEngineClient, loggerFactory: LoggerFactory) {
|
||||
this.config = config;
|
||||
this.queueEngine = queueEngine;
|
||||
this.logger = loggerFactory('CronScheduler');
|
||||
}
|
||||
|
||||
async start(): Promise<void> {
|
||||
this.logger.info({}, 'Starting cron scheduler');
|
||||
|
||||
await this.loadSnapshot();
|
||||
|
||||
for (const schedule of this.schedules.values()) {
|
||||
if (schedule.enabled) {
|
||||
this.updateNextRun(schedule);
|
||||
}
|
||||
}
|
||||
|
||||
this.running = true;
|
||||
this.startTickLoop();
|
||||
this.startSnapshotLoop();
|
||||
|
||||
this.logger.info({schedules: this.schedules.size}, 'Cron scheduler started');
|
||||
}
|
||||
|
||||
async stop(): Promise<void> {
|
||||
this.logger.info({}, 'Stopping cron scheduler');
|
||||
this.running = false;
|
||||
|
||||
if (this.tickTimer) {
|
||||
clearTimeout(this.tickTimer);
|
||||
}
|
||||
if (this.snapshotTimer) {
|
||||
clearTimeout(this.snapshotTimer);
|
||||
}
|
||||
|
||||
await this.saveSnapshot();
|
||||
|
||||
this.logger.info({}, 'Cron scheduler stopped');
|
||||
}
|
||||
|
||||
async upsert(
|
||||
id: string,
|
||||
taskType: string,
|
||||
payload: JsonValue,
|
||||
cronExpression: string,
|
||||
enabled: boolean = true,
|
||||
): Promise<CronSchedule> {
|
||||
try {
|
||||
CronExpressionParser.parse(cronExpression);
|
||||
} catch (_err) {
|
||||
throw new Error(`Invalid cron expression: ${cronExpression}`);
|
||||
}
|
||||
|
||||
const payloadBytes = new Uint8Array(Buffer.from(JSON.stringify(payload)));
|
||||
const existingSchedule = this.schedules.get(id);
|
||||
if (
|
||||
existingSchedule &&
|
||||
this.isScheduleDefinitionUnchanged(existingSchedule, taskType, payloadBytes, cronExpression, enabled)
|
||||
) {
|
||||
return existingSchedule;
|
||||
}
|
||||
|
||||
const now = nowMs();
|
||||
|
||||
const schedule: CronSchedule = {
|
||||
id: createCronID(id),
|
||||
taskType,
|
||||
payload: payloadBytes,
|
||||
cronExpression,
|
||||
enabled,
|
||||
lastRunMs: existingSchedule?.lastRunMs ?? null,
|
||||
nextRunMs: null,
|
||||
createdAtMs: existingSchedule?.createdAtMs ?? now,
|
||||
updatedAtMs: now,
|
||||
};
|
||||
|
||||
if (enabled) {
|
||||
this.updateNextRun(schedule);
|
||||
}
|
||||
|
||||
this.schedules.set(id, schedule);
|
||||
this.operationsSinceSnapshot++;
|
||||
|
||||
this.logger.info({id, taskType, cronExpression, enabled, nextRunMs: schedule.nextRunMs}, 'Cron schedule upserted');
|
||||
|
||||
return schedule;
|
||||
}
|
||||
|
||||
private isScheduleDefinitionUnchanged(
|
||||
schedule: CronSchedule,
|
||||
taskType: string,
|
||||
payload: Uint8Array,
|
||||
cronExpression: string,
|
||||
enabled: boolean,
|
||||
): boolean {
|
||||
return (
|
||||
schedule.taskType === taskType &&
|
||||
schedule.cronExpression === cronExpression &&
|
||||
schedule.enabled === enabled &&
|
||||
this.areEqualPayloadBytes(schedule.payload, payload)
|
||||
);
|
||||
}
|
||||
|
||||
private areEqualPayloadBytes(left: Uint8Array, right: Uint8Array): boolean {
|
||||
if (left.length !== right.length) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (let i = 0; i < left.length; i++) {
|
||||
if (left[i] !== right[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
async delete(id: string): Promise<boolean> {
|
||||
if (!this.schedules.has(id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
this.schedules.delete(id);
|
||||
this.operationsSinceSnapshot++;
|
||||
|
||||
this.logger.info({id}, 'Cron schedule deleted');
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
get(id: string): CronSchedule | null {
|
||||
return this.schedules.get(id) ?? null;
|
||||
}
|
||||
|
||||
list(): Array<CronSchedule> {
|
||||
return Array.from(this.schedules.values());
|
||||
}
|
||||
|
||||
getStats(): CronStats {
|
||||
let enabled = 0;
|
||||
let disabled = 0;
|
||||
|
||||
for (const schedule of this.schedules.values()) {
|
||||
if (schedule.enabled) {
|
||||
enabled++;
|
||||
} else {
|
||||
disabled++;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
total: this.schedules.size,
|
||||
enabled,
|
||||
disabled,
|
||||
};
|
||||
}
|
||||
|
||||
private updateNextRun(schedule: CronSchedule): void {
|
||||
try {
|
||||
const cron = CronExpressionParser.parse(schedule.cronExpression, {
|
||||
currentDate: new Date(),
|
||||
});
|
||||
const next = cron.next();
|
||||
schedule.nextRunMs = next.getTime();
|
||||
} catch (err) {
|
||||
this.logger.error({id: schedule.id, err}, 'Failed to parse cron expression');
|
||||
schedule.nextRunMs = null;
|
||||
}
|
||||
}
|
||||
|
||||
private startTickLoop(): void {
|
||||
const tick = async () => {
|
||||
if (!this.running) return;
|
||||
|
||||
await this.processDueSchedules();
|
||||
|
||||
let nextTickMs = 60000;
|
||||
const now = nowMs();
|
||||
|
||||
for (const schedule of this.schedules.values()) {
|
||||
if (schedule.enabled && schedule.nextRunMs !== null) {
|
||||
const delay = schedule.nextRunMs - now;
|
||||
if (delay > 0 && delay < nextTickMs) {
|
||||
nextTickMs = delay;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nextTickMs = Math.max(100, nextTickMs);
|
||||
|
||||
this.tickTimer = setTimeout(tick, nextTickMs);
|
||||
};
|
||||
|
||||
tick();
|
||||
}
|
||||
|
||||
private async processDueSchedules(): Promise<void> {
|
||||
const now = nowMs();
|
||||
|
||||
for (const schedule of this.schedules.values()) {
|
||||
if (!schedule.enabled || schedule.nextRunMs === null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (schedule.nextRunMs <= now) {
|
||||
try {
|
||||
let payload: JsonValue;
|
||||
try {
|
||||
payload = JsonValueSchema.parse(JSON.parse(Buffer.from(schedule.payload).toString('utf-8')));
|
||||
} catch {
|
||||
payload = {};
|
||||
}
|
||||
|
||||
const {enqueued} = await this.queueEngine.enqueue(schedule.taskType, payload, 0, null, 3, null);
|
||||
if (!enqueued) {
|
||||
this.logger.warn({id: schedule.id}, 'Cron job enqueue returned false');
|
||||
}
|
||||
|
||||
this.logger.debug({id: schedule.id, taskType: schedule.taskType}, 'Cron job enqueued');
|
||||
|
||||
schedule.lastRunMs = now;
|
||||
this.updateNextRun(schedule);
|
||||
this.operationsSinceSnapshot++;
|
||||
} catch (err) {
|
||||
this.logger.error({id: schedule.id, err}, 'Failed to enqueue cron job');
|
||||
this.updateNextRun(schedule);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private startSnapshotLoop(): void {
|
||||
const tick = async () => {
|
||||
if (!this.running) return;
|
||||
|
||||
if (this.operationsSinceSnapshot > 0) {
|
||||
await this.saveSnapshot();
|
||||
}
|
||||
|
||||
this.snapshotTimer = setTimeout(tick, this.config.snapshotEveryMs);
|
||||
};
|
||||
|
||||
this.snapshotTimer = setTimeout(tick, this.config.snapshotEveryMs);
|
||||
}
|
||||
|
||||
private async saveSnapshot(): Promise<void> {
|
||||
try {
|
||||
const snapshot: CronSnapshot = {
|
||||
version: CRON_SNAPSHOT_VERSION,
|
||||
schedules: Array.from(this.schedules.entries()).map(([key, schedule]) => [
|
||||
key,
|
||||
{
|
||||
id: schedule.id,
|
||||
taskType: schedule.taskType,
|
||||
payload: Array.from(schedule.payload),
|
||||
cronExpression: schedule.cronExpression,
|
||||
enabled: schedule.enabled,
|
||||
lastRunMs: schedule.lastRunMs,
|
||||
nextRunMs: schedule.nextRunMs,
|
||||
createdAtMs: schedule.createdAtMs,
|
||||
updatedAtMs: schedule.updatedAtMs,
|
||||
},
|
||||
]),
|
||||
};
|
||||
|
||||
const packed = pack(snapshot);
|
||||
const compressed = await deflate(Buffer.from(packed), {level: this.config.snapshotZstdLevel});
|
||||
|
||||
const checksum = crc32.buf(compressed);
|
||||
|
||||
const finalBuffer = Buffer.alloc(4 + compressed.length);
|
||||
finalBuffer.writeInt32LE(checksum, 0);
|
||||
finalBuffer.set(compressed, 4);
|
||||
|
||||
const snapshotPath = path.join(this.config.dataDir, CRON_SNAPSHOT_FILENAME);
|
||||
const tempPath = `${snapshotPath}.tmp`;
|
||||
|
||||
await fs.writeFile(tempPath, finalBuffer);
|
||||
await fs.rename(tempPath, snapshotPath);
|
||||
|
||||
this.operationsSinceSnapshot = 0;
|
||||
|
||||
this.logger.debug({schedules: this.schedules.size, path: snapshotPath}, 'Cron snapshot saved');
|
||||
} catch (err) {
|
||||
this.logger.error({err}, 'Failed to save cron snapshot');
|
||||
}
|
||||
}
|
||||
|
||||
private async loadSnapshot(): Promise<void> {
|
||||
const snapshotPath = path.join(this.config.dataDir, CRON_SNAPSHOT_FILENAME);
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(snapshotPath);
|
||||
|
||||
if (data.length < 4) {
|
||||
this.logger.warn('Cron snapshot file too small, starting fresh');
|
||||
return;
|
||||
}
|
||||
|
||||
const storedChecksum = data.readInt32LE(0);
|
||||
const compressed = data.subarray(4);
|
||||
|
||||
const calculatedChecksum = crc32.buf(compressed);
|
||||
if (storedChecksum !== calculatedChecksum) {
|
||||
this.logger.error({storedChecksum, calculatedChecksum}, 'Cron snapshot checksum mismatch');
|
||||
return;
|
||||
}
|
||||
|
||||
const decompressed = await promisify(zlib.inflate)(compressed);
|
||||
const snapshot = unpack(Buffer.from(decompressed)) as CronSnapshot;
|
||||
|
||||
if (snapshot.version !== CRON_SNAPSHOT_VERSION) {
|
||||
this.logger.warn(
|
||||
{version: snapshot.version, expected: CRON_SNAPSHOT_VERSION},
|
||||
'Cron snapshot version mismatch',
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
this.schedules.clear();
|
||||
|
||||
for (const [key, serialized] of snapshot.schedules) {
|
||||
const schedule: CronSchedule = {
|
||||
id: createCronID(serialized.id),
|
||||
taskType: serialized.taskType,
|
||||
payload: new Uint8Array(serialized.payload),
|
||||
cronExpression: serialized.cronExpression,
|
||||
enabled: serialized.enabled,
|
||||
lastRunMs: serialized.lastRunMs,
|
||||
nextRunMs: serialized.nextRunMs,
|
||||
createdAtMs: serialized.createdAtMs,
|
||||
updatedAtMs: serialized.updatedAtMs,
|
||||
};
|
||||
this.schedules.set(key, schedule);
|
||||
}
|
||||
|
||||
this.logger.info({schedules: this.schedules.size}, 'Cron snapshot loaded');
|
||||
} catch (err) {
|
||||
if ((err as NodeJS.ErrnoException).code === 'ENOENT') {
|
||||
this.logger.info({}, 'No cron snapshot found, starting fresh');
|
||||
} else {
|
||||
this.logger.error({err}, 'Failed to load cron snapshot');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
137
packages/queue/src/domain/QueueDomainTypes.tsx
Normal file
137
packages/queue/src/domain/QueueDomainTypes.tsx
Normal file
@@ -0,0 +1,137 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
export type JobID = string & {readonly __brand: 'JobID'};
|
||||
export type Receipt = string & {readonly __brand: 'Receipt'};
|
||||
export type CronID = string & {readonly __brand: 'CronID'};
|
||||
export type DeduplicationID = string & {readonly __brand: 'DeduplicationID'};
|
||||
|
||||
export function createJobID(id: string): JobID {
|
||||
return id as JobID;
|
||||
}
|
||||
|
||||
export function createReceipt(id: string): Receipt {
|
||||
return id as Receipt;
|
||||
}
|
||||
|
||||
export function createCronID(id: string): CronID {
|
||||
return id as CronID;
|
||||
}
|
||||
|
||||
export function createDeduplicationID(id: string): DeduplicationID {
|
||||
return id as DeduplicationID;
|
||||
}
|
||||
|
||||
export enum JobStatus {
|
||||
Ready = 'ready',
|
||||
Scheduled = 'scheduled',
|
||||
Inflight = 'inflight',
|
||||
DeadLetter = 'dead_letter',
|
||||
}
|
||||
|
||||
export interface Job {
|
||||
id: JobID;
|
||||
taskType: string;
|
||||
payload: Uint8Array;
|
||||
priority: number;
|
||||
runAtMs: number;
|
||||
createdAtMs: number;
|
||||
attempts: number;
|
||||
maxAttempts: number;
|
||||
error: string | null;
|
||||
deduplicationId: DeduplicationID | null;
|
||||
}
|
||||
|
||||
export interface EnqueueResult {
|
||||
job: Job;
|
||||
enqueued: boolean;
|
||||
}
|
||||
|
||||
export interface LeasedJob {
|
||||
job: Job;
|
||||
receipt: Receipt;
|
||||
visibilityDeadlineMs: number;
|
||||
}
|
||||
|
||||
export interface CronSchedule {
|
||||
id: CronID;
|
||||
taskType: string;
|
||||
payload: Uint8Array;
|
||||
cronExpression: string;
|
||||
enabled: boolean;
|
||||
lastRunMs: number | null;
|
||||
nextRunMs: number | null;
|
||||
createdAtMs: number;
|
||||
updatedAtMs: number;
|
||||
}
|
||||
|
||||
export interface QueueStats {
|
||||
ready: number;
|
||||
processing: number;
|
||||
scheduled: number;
|
||||
deadLetter: number;
|
||||
}
|
||||
|
||||
export interface CronStats {
|
||||
total: number;
|
||||
enabled: number;
|
||||
disabled: number;
|
||||
}
|
||||
|
||||
export interface ReadyItem {
|
||||
jobId: JobID;
|
||||
priority: number;
|
||||
runAtMs: number;
|
||||
createdAtMs: number;
|
||||
sequence: number;
|
||||
}
|
||||
|
||||
export interface JobRecord {
|
||||
job: Job;
|
||||
status: JobStatus;
|
||||
receipt: Receipt | null;
|
||||
visibilityDeadlineMs: number | null;
|
||||
}
|
||||
|
||||
export interface SerializableJob {
|
||||
id: JobID;
|
||||
taskType: string;
|
||||
payload: Array<number>;
|
||||
priority: number;
|
||||
runAtMs: number;
|
||||
createdAtMs: number;
|
||||
attempts: number;
|
||||
maxAttempts: number;
|
||||
error: string | null;
|
||||
deduplicationId: DeduplicationID | null;
|
||||
}
|
||||
|
||||
export interface SerializableJobRecord {
|
||||
job: SerializableJob;
|
||||
status: JobStatus;
|
||||
receipt: Receipt | null;
|
||||
visibilityDeadlineMs: number | null;
|
||||
}
|
||||
|
||||
export interface SerializableSnapshot {
|
||||
version: number;
|
||||
jobs: Array<[string, SerializableJobRecord]>;
|
||||
sequenceCounter: number;
|
||||
deduplicationIndex: Array<[string, string]>;
|
||||
}
|
||||
45
packages/queue/src/domain/QueuePayloadCodec.tsx
Normal file
45
packages/queue/src/domain/QueuePayloadCodec.tsx
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import {JsonValueSchema} from '@fluxer/queue/src/types/JsonTypes';
|
||||
|
||||
export function encodeQueuePayload(payload: JsonValue): Uint8Array {
|
||||
return new Uint8Array(Buffer.from(JSON.stringify(payload)));
|
||||
}
|
||||
|
||||
export function decodeQueuePayload(payload: Uint8Array): JsonValue {
|
||||
return JsonValueSchema.parse(JSON.parse(Buffer.from(payload).toString('utf-8')));
|
||||
}
|
||||
|
||||
export function tryDecodeQueuePayload(payload: Uint8Array): JsonValue | null {
|
||||
try {
|
||||
return decodeQueuePayload(payload);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function decodeQueuePayloadWithFallback(payload: Uint8Array, fallback: JsonValue): JsonValue {
|
||||
const decoded = tryDecodeQueuePayload(payload);
|
||||
if (decoded === null) {
|
||||
return fallback;
|
||||
}
|
||||
return decoded;
|
||||
}
|
||||
117
packages/queue/src/engine/DelayQueue.tsx
Normal file
117
packages/queue/src/engine/DelayQueue.tsx
Normal file
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import {nowMs} from '@fluxer/time/src/Clock';
|
||||
import {computeRemainingDelayMs} from '@fluxer/time/src/DelayMath';
|
||||
|
||||
export interface DelayedItem<T> {
|
||||
item: T;
|
||||
deadlineMs: number;
|
||||
}
|
||||
|
||||
export class DelayQueue<T> {
|
||||
private items: Array<DelayedItem<T>> = [];
|
||||
private keyExtractor: (item: T) => string;
|
||||
|
||||
constructor(keyExtractor: (item: T) => string) {
|
||||
this.keyExtractor = keyExtractor;
|
||||
}
|
||||
|
||||
push(item: T, deadlineMs: number): void {
|
||||
this.remove(item);
|
||||
|
||||
let left = 0;
|
||||
let right = this.items.length;
|
||||
while (left < right) {
|
||||
const mid = Math.floor((left + right) / 2);
|
||||
if (this.items[mid]!.deadlineMs <= deadlineMs) {
|
||||
left = mid + 1;
|
||||
} else {
|
||||
right = mid;
|
||||
}
|
||||
}
|
||||
|
||||
this.items.splice(left, 0, {item, deadlineMs});
|
||||
}
|
||||
|
||||
popExpired(): Array<T> {
|
||||
const now = nowMs();
|
||||
const expired: Array<T> = [];
|
||||
|
||||
while (this.items.length > 0 && this.items[0]!.deadlineMs <= now) {
|
||||
expired.push(this.items.shift()!.item);
|
||||
}
|
||||
|
||||
return expired;
|
||||
}
|
||||
|
||||
remove(item: T): boolean {
|
||||
const key = this.keyExtractor(item);
|
||||
const index = this.items.findIndex((di) => this.keyExtractor(di.item) === key);
|
||||
if (index !== -1) {
|
||||
this.items.splice(index, 1);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
removeByKey(key: string): boolean {
|
||||
const index = this.items.findIndex((di) => this.keyExtractor(di.item) === key);
|
||||
if (index !== -1) {
|
||||
this.items.splice(index, 1);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
has(item: T): boolean {
|
||||
const key = this.keyExtractor(item);
|
||||
return this.items.some((di) => this.keyExtractor(di.item) === key);
|
||||
}
|
||||
|
||||
hasByKey(key: string): boolean {
|
||||
return this.items.some((di) => this.keyExtractor(di.item) === key);
|
||||
}
|
||||
|
||||
nextDelay(): number | null {
|
||||
if (this.items.length === 0) {
|
||||
return null;
|
||||
}
|
||||
return computeRemainingDelayMs({
|
||||
fromMs: nowMs(),
|
||||
toMs: this.items[0]!.deadlineMs,
|
||||
});
|
||||
}
|
||||
|
||||
get size(): number {
|
||||
return this.items.length;
|
||||
}
|
||||
|
||||
get isEmpty(): boolean {
|
||||
return this.items.length === 0;
|
||||
}
|
||||
|
||||
clear(): void {
|
||||
this.items = [];
|
||||
}
|
||||
|
||||
toArray(): Array<DelayedItem<T>> {
|
||||
return [...this.items];
|
||||
}
|
||||
}
|
||||
146
packages/queue/src/engine/PriorityQueue.tsx
Normal file
146
packages/queue/src/engine/PriorityQueue.tsx
Normal file
@@ -0,0 +1,146 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {JobID, ReadyItem} from '@fluxer/queue/src/domain/QueueDomainTypes';
|
||||
|
||||
export class PriorityQueue {
|
||||
private heap: Array<ReadyItem> = [];
|
||||
|
||||
private compare(a: ReadyItem, b: ReadyItem): number {
|
||||
if (a.priority !== b.priority) {
|
||||
return b.priority - a.priority;
|
||||
}
|
||||
if (a.runAtMs !== b.runAtMs) {
|
||||
return a.runAtMs - b.runAtMs;
|
||||
}
|
||||
if (a.createdAtMs !== b.createdAtMs) {
|
||||
return a.createdAtMs - b.createdAtMs;
|
||||
}
|
||||
return a.sequence - b.sequence;
|
||||
}
|
||||
|
||||
private swap(i: number, j: number): void {
|
||||
const temp = this.heap[i]!;
|
||||
this.heap[i] = this.heap[j]!;
|
||||
this.heap[j] = temp;
|
||||
}
|
||||
|
||||
private bubbleUp(index: number): void {
|
||||
while (index > 0) {
|
||||
const parentIndex = Math.floor((index - 1) / 2);
|
||||
if (this.compare(this.heap[index]!, this.heap[parentIndex]!) >= 0) {
|
||||
break;
|
||||
}
|
||||
this.swap(index, parentIndex);
|
||||
index = parentIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private bubbleDown(index: number): void {
|
||||
const length = this.heap.length;
|
||||
while (true) {
|
||||
const leftChild = 2 * index + 1;
|
||||
const rightChild = 2 * index + 2;
|
||||
let smallest = index;
|
||||
|
||||
if (leftChild < length && this.compare(this.heap[leftChild]!, this.heap[smallest]!) < 0) {
|
||||
smallest = leftChild;
|
||||
}
|
||||
if (rightChild < length && this.compare(this.heap[rightChild]!, this.heap[smallest]!) < 0) {
|
||||
smallest = rightChild;
|
||||
}
|
||||
|
||||
if (smallest === index) {
|
||||
break;
|
||||
}
|
||||
|
||||
this.swap(index, smallest);
|
||||
index = smallest;
|
||||
}
|
||||
}
|
||||
|
||||
push(item: ReadyItem): void {
|
||||
this.heap.push(item);
|
||||
this.bubbleUp(this.heap.length - 1);
|
||||
}
|
||||
|
||||
pop(): ReadyItem | undefined {
|
||||
if (this.heap.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const result = this.heap[0];
|
||||
const last = this.heap.pop();
|
||||
|
||||
if (this.heap.length > 0 && last !== undefined) {
|
||||
this.heap[0] = last;
|
||||
this.bubbleDown(0);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
peek(): ReadyItem | undefined {
|
||||
return this.heap[0];
|
||||
}
|
||||
|
||||
remove(jobId: JobID): boolean {
|
||||
const index = this.heap.findIndex((item) => item.jobId === jobId);
|
||||
if (index === -1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const last = this.heap.pop();
|
||||
if (index < this.heap.length && last !== undefined) {
|
||||
this.heap[index] = last;
|
||||
this.bubbleUp(index);
|
||||
this.bubbleDown(index);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
has(jobId: JobID): boolean {
|
||||
return this.heap.some((item) => item.jobId === jobId);
|
||||
}
|
||||
|
||||
get size(): number {
|
||||
return this.heap.length;
|
||||
}
|
||||
|
||||
get isEmpty(): boolean {
|
||||
return this.heap.length === 0;
|
||||
}
|
||||
|
||||
clear(): void {
|
||||
this.heap = [];
|
||||
}
|
||||
|
||||
toArray(): Array<ReadyItem> {
|
||||
return [...this.heap];
|
||||
}
|
||||
|
||||
static fromArray(items: Array<ReadyItem>): PriorityQueue {
|
||||
const queue = new PriorityQueue();
|
||||
for (const item of items) {
|
||||
queue.push(item);
|
||||
}
|
||||
return queue;
|
||||
}
|
||||
}
|
||||
779
packages/queue/src/engine/QueueEngine.tsx
Normal file
779
packages/queue/src/engine/QueueEngine.tsx
Normal file
@@ -0,0 +1,779 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import * as fs from 'node:fs/promises';
|
||||
import * as path from 'node:path';
|
||||
import {promisify} from 'node:util';
|
||||
import * as zlib from 'node:zlib';
|
||||
import type {LoggerFactory, LoggerInterface} from '@fluxer/logger/src/LoggerInterface';
|
||||
import {DelayQueue} from '@fluxer/queue/src/engine/DelayQueue';
|
||||
import {PriorityQueue} from '@fluxer/queue/src/engine/PriorityQueue';
|
||||
import {
|
||||
createDeduplicationID,
|
||||
createJobID,
|
||||
createReceipt,
|
||||
type Job,
|
||||
type JobID,
|
||||
type JobRecord,
|
||||
JobStatus,
|
||||
type LeasedJob,
|
||||
type QueueStats,
|
||||
type ReadyItem,
|
||||
type Receipt,
|
||||
type SerializableSnapshot,
|
||||
} from '@fluxer/queue/src/types/JobTypes';
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import type {QueueConfig} from '@fluxer/queue/src/types/QueueConfig';
|
||||
import {nowMs} from '@fluxer/time/src/Clock';
|
||||
import {computeExponentialBackoffSeconds} from '@fluxer/time/src/ExponentialBackoff';
|
||||
import crc32 from 'crc-32';
|
||||
import {pack, unpack} from 'msgpackr';
|
||||
import {v4 as uuidv4} from 'uuid';
|
||||
|
||||
const deflate = promisify(zlib.deflate);
|
||||
|
||||
const SNAPSHOT_VERSION = 1;
|
||||
const SNAPSHOT_FILENAME = 'snapshot.msgpack.zstd';
|
||||
|
||||
interface InflightEntry {
|
||||
jobId: JobID;
|
||||
receipt: Receipt;
|
||||
}
|
||||
|
||||
export class QueueEngine {
|
||||
private config: QueueConfig;
|
||||
private logger: LoggerInterface;
|
||||
|
||||
private jobs: Map<string, JobRecord> = new Map();
|
||||
|
||||
private readyQueue: PriorityQueue = new PriorityQueue();
|
||||
|
||||
private scheduledQueue: DelayQueue<JobID>;
|
||||
|
||||
private inflightQueue: DelayQueue<InflightEntry>;
|
||||
|
||||
private deduplicationIndex: Map<string, string> = new Map();
|
||||
|
||||
private sequenceCounter: number = 0;
|
||||
|
||||
private operationsSinceSnapshot: number = 0;
|
||||
private lastSnapshotTime: number = nowMs();
|
||||
private snapshotPromise: Promise<void> | null = null;
|
||||
|
||||
private schedulerTimer: NodeJS.Timeout | null = null;
|
||||
private visibilityTimer: NodeJS.Timeout | null = null;
|
||||
private snapshotTimer: NodeJS.Timeout | null = null;
|
||||
|
||||
private running: boolean = false;
|
||||
|
||||
constructor(config: QueueConfig, loggerFactory: LoggerFactory) {
|
||||
this.config = config;
|
||||
this.logger = loggerFactory('QueueEngine');
|
||||
|
||||
this.scheduledQueue = new DelayQueue<JobID>((id: JobID) => id);
|
||||
this.inflightQueue = new DelayQueue<InflightEntry>((entry: InflightEntry) => entry.receipt);
|
||||
}
|
||||
|
||||
async start(): Promise<void> {
|
||||
this.logger.info({}, 'Starting queue engine');
|
||||
|
||||
await fs.mkdir(this.config.dataDir, {recursive: true});
|
||||
|
||||
await this.loadSnapshot();
|
||||
|
||||
this.running = true;
|
||||
|
||||
this.startSchedulerLoop();
|
||||
this.startVisibilityLoop();
|
||||
this.startSnapshotLoop();
|
||||
|
||||
this.logger.info(
|
||||
{
|
||||
ready: this.readyQueue.size,
|
||||
scheduled: this.scheduledQueue.size,
|
||||
inflight: this.inflightQueue.size,
|
||||
deadLetter: this.countDeadLetter(),
|
||||
},
|
||||
'Queue engine started',
|
||||
);
|
||||
}
|
||||
|
||||
async stop(): Promise<void> {
|
||||
this.logger.info({}, 'Stopping queue engine');
|
||||
this.running = false;
|
||||
|
||||
if (this.schedulerTimer) {
|
||||
clearTimeout(this.schedulerTimer);
|
||||
}
|
||||
if (this.visibilityTimer) {
|
||||
clearTimeout(this.visibilityTimer);
|
||||
}
|
||||
if (this.snapshotTimer) {
|
||||
clearTimeout(this.snapshotTimer);
|
||||
}
|
||||
|
||||
await this.saveSnapshot();
|
||||
|
||||
this.logger.info({}, 'Queue engine stopped');
|
||||
}
|
||||
|
||||
async enqueue(
|
||||
taskType: string,
|
||||
payload: JsonValue,
|
||||
priority: number = 0,
|
||||
runAtMs: number | null = null,
|
||||
maxAttempts: number = 3,
|
||||
deduplicationId: string | null = null,
|
||||
): Promise<{job: Job; enqueued: boolean}> {
|
||||
const now = nowMs();
|
||||
const effectiveRunAt = runAtMs ?? now;
|
||||
|
||||
if (deduplicationId) {
|
||||
const existingJobId = this.deduplicationIndex.get(deduplicationId);
|
||||
if (existingJobId) {
|
||||
const existingRecord = this.jobs.get(existingJobId);
|
||||
if (existingRecord && existingRecord.status !== JobStatus.DeadLetter) {
|
||||
this.logger.debug({deduplicationId, existingJobId}, 'Duplicate job rejected');
|
||||
return {job: existingRecord.job, enqueued: false};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const jobId = createJobID(uuidv4());
|
||||
const payloadBytes = new Uint8Array(Buffer.from(JSON.stringify(payload)));
|
||||
|
||||
const effectiveMaxAttempts = Math.min(Math.max(maxAttempts, 1), 1000);
|
||||
|
||||
const job: Job = {
|
||||
id: jobId,
|
||||
taskType,
|
||||
payload: payloadBytes,
|
||||
priority,
|
||||
runAtMs: effectiveRunAt,
|
||||
createdAtMs: now,
|
||||
attempts: 0,
|
||||
maxAttempts: effectiveMaxAttempts,
|
||||
error: null,
|
||||
deduplicationId: deduplicationId ? createDeduplicationID(deduplicationId) : null,
|
||||
};
|
||||
|
||||
const isScheduled = effectiveRunAt > now;
|
||||
|
||||
const record: JobRecord = {
|
||||
job,
|
||||
status: isScheduled ? JobStatus.Scheduled : JobStatus.Ready,
|
||||
receipt: null,
|
||||
visibilityDeadlineMs: null,
|
||||
};
|
||||
|
||||
this.jobs.set(jobId, record);
|
||||
|
||||
if (deduplicationId) {
|
||||
this.deduplicationIndex.set(deduplicationId, jobId);
|
||||
}
|
||||
|
||||
if (isScheduled) {
|
||||
this.scheduledQueue.push(jobId, effectiveRunAt);
|
||||
} else {
|
||||
this.addToReadyQueue(job);
|
||||
}
|
||||
|
||||
this.recordOperation();
|
||||
|
||||
this.logger.debug({jobId, taskType, priority, runAtMs: effectiveRunAt, isScheduled}, 'Job enqueued');
|
||||
|
||||
return {job, enqueued: true};
|
||||
}
|
||||
|
||||
async dequeue(
|
||||
taskTypes: Array<string> | null,
|
||||
limit: number,
|
||||
waitTimeMs: number,
|
||||
visibilityTimeoutMs: number | null,
|
||||
): Promise<Array<LeasedJob>> {
|
||||
const effectiveTimeout = visibilityTimeoutMs ?? this.config.defaultVisibilityTimeoutMs;
|
||||
const deadline = nowMs() + waitTimeMs;
|
||||
|
||||
const results: Array<LeasedJob> = [];
|
||||
|
||||
while (results.length < limit) {
|
||||
const leasedJob = this.tryDequeueOne(taskTypes, effectiveTimeout);
|
||||
if (leasedJob) {
|
||||
results.push(leasedJob);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (waitTimeMs === 0 || nowMs() >= deadline) {
|
||||
break;
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, Math.min(100, deadline - nowMs())));
|
||||
}
|
||||
|
||||
if (results.length > 0) {
|
||||
this.recordOperation();
|
||||
this.logger.debug({count: results.length, taskTypes}, 'Jobs dequeued');
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
private tryDequeueOne(taskTypes: Array<string> | null, visibilityTimeoutMs: number): LeasedJob | null {
|
||||
this.processScheduledJobs();
|
||||
|
||||
const jobId = this.findMatchingJob(taskTypes);
|
||||
if (!jobId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const record = this.jobs.get(jobId);
|
||||
if (!record) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const receipt = createReceipt(uuidv4());
|
||||
const now = nowMs();
|
||||
const visibilityDeadline = now + visibilityTimeoutMs;
|
||||
|
||||
record.status = JobStatus.Inflight;
|
||||
record.receipt = receipt;
|
||||
record.visibilityDeadlineMs = visibilityDeadline;
|
||||
record.job.attempts += 1;
|
||||
|
||||
this.inflightQueue.push({jobId, receipt}, visibilityDeadline);
|
||||
|
||||
this.readyQueue.remove(jobId);
|
||||
|
||||
return {
|
||||
job: record.job,
|
||||
receipt,
|
||||
visibilityDeadlineMs: visibilityDeadline,
|
||||
};
|
||||
}
|
||||
|
||||
private findMatchingJob(taskTypes: Array<string> | null): JobID | null {
|
||||
if (!taskTypes || taskTypes.length === 0) {
|
||||
const item = this.readyQueue.peek();
|
||||
return item?.jobId ?? null;
|
||||
}
|
||||
|
||||
const taskTypeSet = new Set(taskTypes);
|
||||
const tempItems: Array<ReadyItem> = [];
|
||||
let found: JobID | null = null;
|
||||
|
||||
while (!this.readyQueue.isEmpty) {
|
||||
const item = this.readyQueue.pop();
|
||||
if (!item) break;
|
||||
|
||||
const record = this.jobs.get(item.jobId);
|
||||
if (record && taskTypeSet.has(record.job.taskType)) {
|
||||
found = item.jobId;
|
||||
for (const tempItem of tempItems) {
|
||||
this.readyQueue.push(tempItem);
|
||||
}
|
||||
this.readyQueue.push(item);
|
||||
break;
|
||||
}
|
||||
tempItems.push(item);
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
for (const item of tempItems) {
|
||||
this.readyQueue.push(item);
|
||||
}
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
async ack(receipt: string): Promise<boolean> {
|
||||
const record = this.findByReceipt(receipt);
|
||||
if (!record) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const jobId = record.job.id;
|
||||
const deduplicationId = record.job.deduplicationId;
|
||||
|
||||
this.inflightQueue.removeByKey(receipt);
|
||||
this.jobs.delete(jobId);
|
||||
|
||||
if (deduplicationId) {
|
||||
this.deduplicationIndex.delete(deduplicationId);
|
||||
}
|
||||
|
||||
this.recordOperation();
|
||||
this.logger.debug({jobId, receipt}, 'Job acknowledged');
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
async nack(receipt: string, error?: string): Promise<boolean> {
|
||||
const record = this.findByReceipt(receipt);
|
||||
if (!record) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const jobId = record.job.id;
|
||||
|
||||
this.inflightQueue.removeByKey(receipt);
|
||||
|
||||
if (error) {
|
||||
record.job.error = error;
|
||||
}
|
||||
|
||||
record.receipt = null;
|
||||
record.visibilityDeadlineMs = null;
|
||||
|
||||
if (record.job.attempts >= record.job.maxAttempts) {
|
||||
record.status = JobStatus.DeadLetter;
|
||||
record.job.error = error ?? 'max_attempts exceeded';
|
||||
|
||||
if (record.job.deduplicationId) {
|
||||
this.deduplicationIndex.delete(record.job.deduplicationId);
|
||||
}
|
||||
|
||||
this.logger.info({jobId, attempts: record.job.attempts, error}, 'Job moved to dead letter queue');
|
||||
} else {
|
||||
const backoffMs =
|
||||
computeExponentialBackoffSeconds({
|
||||
attemptCount: record.job.attempts,
|
||||
}) * 1000;
|
||||
const retryAtMs = nowMs() + backoffMs;
|
||||
|
||||
record.status = JobStatus.Scheduled;
|
||||
record.job.runAtMs = retryAtMs;
|
||||
record.job.error = error ?? null;
|
||||
this.scheduledQueue.push(jobId, retryAtMs);
|
||||
|
||||
this.logger.debug({jobId, attempts: record.job.attempts, retryAtMs, error}, 'Job scheduled for retry');
|
||||
}
|
||||
|
||||
this.recordOperation();
|
||||
return true;
|
||||
}
|
||||
|
||||
async changeVisibility(receipt: string, timeoutMs: number): Promise<boolean> {
|
||||
const record = this.findByReceipt(receipt);
|
||||
if (!record || record.status !== JobStatus.Inflight) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const newDeadline = nowMs() + timeoutMs;
|
||||
|
||||
this.inflightQueue.removeByKey(receipt);
|
||||
this.inflightQueue.push({jobId: record.job.id, receipt: record.receipt!}, newDeadline);
|
||||
|
||||
record.visibilityDeadlineMs = newDeadline;
|
||||
|
||||
this.recordOperation();
|
||||
this.logger.debug({jobId: record.job.id, newDeadline}, 'Visibility timeout changed');
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
async retryJob(jobId: string): Promise<Job | null> {
|
||||
const record = this.jobs.get(jobId);
|
||||
if (!record || record.status !== JobStatus.DeadLetter) {
|
||||
return null;
|
||||
}
|
||||
|
||||
record.job.attempts = 0;
|
||||
record.job.error = null;
|
||||
record.job.runAtMs = nowMs();
|
||||
record.status = JobStatus.Ready;
|
||||
record.receipt = null;
|
||||
record.visibilityDeadlineMs = null;
|
||||
|
||||
this.addToReadyQueue(record.job);
|
||||
|
||||
this.recordOperation();
|
||||
this.logger.info({jobId}, 'Job retried from dead letter queue');
|
||||
|
||||
return record.job;
|
||||
}
|
||||
|
||||
async deleteJob(jobId: string): Promise<boolean> {
|
||||
const record = this.jobs.get(jobId);
|
||||
if (!record) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (record.status) {
|
||||
case JobStatus.Ready:
|
||||
this.readyQueue.remove(createJobID(jobId));
|
||||
break;
|
||||
case JobStatus.Scheduled:
|
||||
this.scheduledQueue.removeByKey(jobId);
|
||||
break;
|
||||
case JobStatus.Inflight:
|
||||
if (record.receipt) {
|
||||
this.inflightQueue.removeByKey(record.receipt);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (record.job.deduplicationId) {
|
||||
this.deduplicationIndex.delete(record.job.deduplicationId);
|
||||
}
|
||||
|
||||
this.jobs.delete(jobId);
|
||||
|
||||
this.recordOperation();
|
||||
this.logger.debug({jobId}, 'Job deleted');
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
getStats(): QueueStats {
|
||||
const now = nowMs();
|
||||
let ready = 0;
|
||||
let scheduled = 0;
|
||||
let processing = 0;
|
||||
let deadLetter = 0;
|
||||
|
||||
for (const record of this.jobs.values()) {
|
||||
switch (record.status) {
|
||||
case JobStatus.Ready:
|
||||
ready++;
|
||||
break;
|
||||
case JobStatus.Scheduled:
|
||||
if (record.job.runAtMs <= now) {
|
||||
ready++;
|
||||
} else {
|
||||
scheduled++;
|
||||
}
|
||||
break;
|
||||
case JobStatus.Inflight:
|
||||
processing++;
|
||||
break;
|
||||
case JobStatus.DeadLetter:
|
||||
deadLetter++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return {ready, processing, scheduled, deadLetter};
|
||||
}
|
||||
|
||||
getJob(jobId: string): JobRecord | null {
|
||||
return this.jobs.get(jobId) ?? null;
|
||||
}
|
||||
|
||||
private findByReceipt(receipt: string): JobRecord | null {
|
||||
for (const record of this.jobs.values()) {
|
||||
if (record.receipt === receipt) {
|
||||
return record;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private addToReadyQueue(job: Job): void {
|
||||
const item: ReadyItem = {
|
||||
jobId: job.id,
|
||||
priority: job.priority,
|
||||
runAtMs: job.runAtMs,
|
||||
createdAtMs: job.createdAtMs,
|
||||
sequence: this.sequenceCounter++,
|
||||
};
|
||||
this.readyQueue.push(item);
|
||||
}
|
||||
|
||||
private processScheduledJobs(): void {
|
||||
const expiredIds = this.scheduledQueue.popExpired();
|
||||
for (const jobId of expiredIds) {
|
||||
const record = this.jobs.get(jobId);
|
||||
if (record && record.status === JobStatus.Scheduled) {
|
||||
record.status = JobStatus.Ready;
|
||||
this.addToReadyQueue(record.job);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private processVisibilityTimeouts(): void {
|
||||
const expired = this.inflightQueue.popExpired();
|
||||
for (const entry of expired) {
|
||||
const record = this.jobs.get(entry.jobId);
|
||||
if (record && record.status === JobStatus.Inflight && record.receipt === entry.receipt) {
|
||||
record.receipt = null;
|
||||
record.visibilityDeadlineMs = null;
|
||||
record.job.error = 'visibility timeout';
|
||||
|
||||
if (record.job.attempts >= record.job.maxAttempts) {
|
||||
record.status = JobStatus.DeadLetter;
|
||||
|
||||
if (record.job.deduplicationId) {
|
||||
this.deduplicationIndex.delete(record.job.deduplicationId);
|
||||
}
|
||||
|
||||
this.logger.info({jobId: entry.jobId}, 'Job moved to dead letter queue after visibility timeout');
|
||||
} else {
|
||||
const retryAtMs = nowMs() + this.config.visibilityTimeoutBackoffMs;
|
||||
record.status = JobStatus.Scheduled;
|
||||
record.job.runAtMs = retryAtMs;
|
||||
this.scheduledQueue.push(entry.jobId, retryAtMs);
|
||||
this.logger.debug({jobId: entry.jobId, retryAtMs}, 'Job scheduled for retry after visibility timeout');
|
||||
}
|
||||
|
||||
this.recordOperation();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private countDeadLetter(): number {
|
||||
let count = 0;
|
||||
for (const record of this.jobs.values()) {
|
||||
if (record.status === JobStatus.DeadLetter) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
private startSchedulerLoop(): void {
|
||||
const tick = () => {
|
||||
if (!this.running) return;
|
||||
|
||||
this.processScheduledJobs();
|
||||
|
||||
const nextDelay = this.scheduledQueue.nextDelay();
|
||||
const delay = nextDelay !== null ? Math.min(nextDelay, 1000) : 1000;
|
||||
|
||||
this.schedulerTimer = setTimeout(tick, delay);
|
||||
};
|
||||
|
||||
tick();
|
||||
}
|
||||
|
||||
private startVisibilityLoop(): void {
|
||||
const tick = () => {
|
||||
if (!this.running) return;
|
||||
|
||||
this.processVisibilityTimeouts();
|
||||
|
||||
const nextDelay = this.inflightQueue.nextDelay();
|
||||
const delay = nextDelay !== null ? Math.min(nextDelay, 1000) : 1000;
|
||||
|
||||
this.visibilityTimer = setTimeout(tick, delay);
|
||||
};
|
||||
|
||||
tick();
|
||||
}
|
||||
|
||||
private startSnapshotLoop(): void {
|
||||
const tick = () => {
|
||||
if (!this.running) return;
|
||||
|
||||
this.maybeSnapshot();
|
||||
|
||||
this.snapshotTimer = setTimeout(tick, this.config.snapshotEveryMs);
|
||||
};
|
||||
|
||||
this.snapshotTimer = setTimeout(tick, this.config.snapshotEveryMs);
|
||||
}
|
||||
|
||||
private recordOperation(): void {
|
||||
this.operationsSinceSnapshot++;
|
||||
}
|
||||
|
||||
private async maybeSnapshot(): Promise<void> {
|
||||
const now = nowMs();
|
||||
const timeSinceSnapshot = now - this.lastSnapshotTime;
|
||||
|
||||
if (
|
||||
!this.snapshotPromise &&
|
||||
(this.operationsSinceSnapshot >= this.config.snapshotAfterOps || timeSinceSnapshot >= this.config.snapshotEveryMs)
|
||||
) {
|
||||
await this.saveSnapshot();
|
||||
}
|
||||
}
|
||||
|
||||
private async saveSnapshot(): Promise<void> {
|
||||
if (this.snapshotPromise) return this.snapshotPromise;
|
||||
|
||||
this.snapshotPromise = (async () => {
|
||||
try {
|
||||
const snapshot: SerializableSnapshot = {
|
||||
version: SNAPSHOT_VERSION,
|
||||
jobs: Array.from(this.jobs.entries()).map(([key, record]) => [
|
||||
key,
|
||||
{
|
||||
job: {
|
||||
id: record.job.id,
|
||||
taskType: record.job.taskType,
|
||||
payload: Array.from(record.job.payload),
|
||||
priority: record.job.priority,
|
||||
runAtMs: record.job.runAtMs,
|
||||
createdAtMs: record.job.createdAtMs,
|
||||
attempts: record.job.attempts,
|
||||
maxAttempts: record.job.maxAttempts,
|
||||
error: record.job.error,
|
||||
deduplicationId: record.job.deduplicationId,
|
||||
},
|
||||
status: record.status,
|
||||
receipt: record.receipt,
|
||||
visibilityDeadlineMs: record.visibilityDeadlineMs,
|
||||
},
|
||||
]),
|
||||
cronSchedules: [],
|
||||
sequenceCounter: this.sequenceCounter,
|
||||
deduplicationIndex: Array.from(this.deduplicationIndex.entries()),
|
||||
};
|
||||
|
||||
const packed = pack(snapshot);
|
||||
const compressed = await deflate(Buffer.from(packed), {level: this.config.snapshotZstdLevel});
|
||||
|
||||
const checksum = crc32.buf(compressed);
|
||||
|
||||
const finalBuffer = Buffer.alloc(4 + compressed.length);
|
||||
finalBuffer.writeInt32LE(checksum, 0);
|
||||
finalBuffer.set(compressed, 4);
|
||||
|
||||
const snapshotPath = path.join(this.config.dataDir, SNAPSHOT_FILENAME);
|
||||
const tempPath = `${snapshotPath}.tmp`;
|
||||
|
||||
await fs.writeFile(tempPath, finalBuffer);
|
||||
await fs.rename(tempPath, snapshotPath);
|
||||
|
||||
this.operationsSinceSnapshot = 0;
|
||||
this.lastSnapshotTime = nowMs();
|
||||
} catch (err) {
|
||||
this.logger.error({err}, 'Failed to save snapshot');
|
||||
} finally {
|
||||
this.snapshotPromise = null;
|
||||
}
|
||||
})();
|
||||
|
||||
return this.snapshotPromise;
|
||||
}
|
||||
|
||||
private async loadSnapshot(): Promise<void> {
|
||||
const snapshotPath = path.join(this.config.dataDir, SNAPSHOT_FILENAME);
|
||||
|
||||
try {
|
||||
const data = await fs.readFile(snapshotPath);
|
||||
|
||||
if (data.length < 4) {
|
||||
this.logger.warn('Snapshot file too small, starting fresh');
|
||||
return;
|
||||
}
|
||||
|
||||
const storedChecksum = data.readInt32LE(0);
|
||||
const compressed = data.subarray(4);
|
||||
|
||||
const calculatedChecksum = crc32.buf(compressed);
|
||||
if (storedChecksum !== calculatedChecksum) {
|
||||
this.logger.error({storedChecksum, calculatedChecksum}, 'Snapshot checksum mismatch');
|
||||
return;
|
||||
}
|
||||
|
||||
const decompressed = await promisify(zlib.inflate)(compressed);
|
||||
const snapshot = unpack(Buffer.from(decompressed)) as SerializableSnapshot;
|
||||
|
||||
if (snapshot.version !== SNAPSHOT_VERSION) {
|
||||
this.logger.warn({version: snapshot.version, expected: SNAPSHOT_VERSION}, 'Snapshot version mismatch');
|
||||
return;
|
||||
}
|
||||
|
||||
this.jobs.clear();
|
||||
this.readyQueue.clear();
|
||||
this.scheduledQueue.clear();
|
||||
this.inflightQueue.clear();
|
||||
this.deduplicationIndex.clear();
|
||||
|
||||
for (const [key, record] of snapshot.jobs) {
|
||||
const restoredRecord: JobRecord = {
|
||||
...record,
|
||||
job: {
|
||||
...record.job,
|
||||
id: createJobID(record.job.id),
|
||||
payload: new Uint8Array(record.job.payload),
|
||||
deduplicationId: record.job.deduplicationId ? createDeduplicationID(record.job.deduplicationId) : null,
|
||||
},
|
||||
receipt: record.receipt ? createReceipt(record.receipt) : null,
|
||||
};
|
||||
|
||||
this.jobs.set(key, restoredRecord);
|
||||
|
||||
switch (restoredRecord.status) {
|
||||
case JobStatus.Ready:
|
||||
this.addToReadyQueue(restoredRecord.job);
|
||||
break;
|
||||
case JobStatus.Scheduled:
|
||||
this.scheduledQueue.push(restoredRecord.job.id, restoredRecord.job.runAtMs);
|
||||
break;
|
||||
case JobStatus.Inflight:
|
||||
restoredRecord.status = JobStatus.Ready;
|
||||
restoredRecord.receipt = null;
|
||||
restoredRecord.visibilityDeadlineMs = null;
|
||||
this.addToReadyQueue(restoredRecord.job);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (const [key, value] of snapshot.deduplicationIndex) {
|
||||
this.deduplicationIndex.set(key, value);
|
||||
}
|
||||
|
||||
this.sequenceCounter = snapshot.sequenceCounter;
|
||||
|
||||
this.logger.info(
|
||||
{
|
||||
jobs: this.jobs.size,
|
||||
ready: this.readyQueue.size,
|
||||
scheduled: this.scheduledQueue.size,
|
||||
},
|
||||
'Snapshot loaded',
|
||||
);
|
||||
} catch (err) {
|
||||
if ((err as NodeJS.ErrnoException).code === 'ENOENT') {
|
||||
this.logger.info({}, 'No snapshot found, starting fresh');
|
||||
} else {
|
||||
this.logger.error({err}, 'Failed to load snapshot');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private getSnapshotPath(): string {
|
||||
return path.join(this.config.dataDir, SNAPSHOT_FILENAME);
|
||||
}
|
||||
|
||||
private async removeSnapshotFile(): Promise<void> {
|
||||
try {
|
||||
await fs.rm(this.getSnapshotPath(), {force: true});
|
||||
} catch (error) {
|
||||
if ((error as NodeJS.ErrnoException).code !== 'ENOENT') {
|
||||
this.logger.debug({error}, 'Failed to remove queue snapshot');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async resetState(): Promise<void> {
|
||||
this.jobs.clear();
|
||||
this.readyQueue.clear();
|
||||
this.scheduledQueue.clear();
|
||||
this.inflightQueue.clear();
|
||||
this.deduplicationIndex.clear();
|
||||
this.sequenceCounter = 0;
|
||||
this.operationsSinceSnapshot = 0;
|
||||
this.lastSnapshotTime = nowMs();
|
||||
await this.removeSnapshotFile();
|
||||
}
|
||||
}
|
||||
276
packages/queue/src/types/JobTypes.tsx
Normal file
276
packages/queue/src/types/JobTypes.tsx
Normal file
@@ -0,0 +1,276 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import type {JsonValue} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import {JsonValueSchema} from '@fluxer/queue/src/types/JsonTypes';
|
||||
import {z} from 'zod';
|
||||
|
||||
export type JobID = string & {readonly __brand: 'JobID'};
|
||||
export type Receipt = string & {readonly __brand: 'Receipt'};
|
||||
export type CronID = string & {readonly __brand: 'CronID'};
|
||||
export type DeduplicationID = string & {readonly __brand: 'DeduplicationID'};
|
||||
export function createJobID(id: string): JobID {
|
||||
return id as JobID;
|
||||
}
|
||||
|
||||
export function createReceipt(id: string): Receipt {
|
||||
return id as Receipt;
|
||||
}
|
||||
|
||||
export function createCronID(id: string): CronID {
|
||||
return id as CronID;
|
||||
}
|
||||
|
||||
export function createDeduplicationID(id: string): DeduplicationID {
|
||||
return id as DeduplicationID;
|
||||
}
|
||||
|
||||
export enum JobStatus {
|
||||
Ready = 'ready',
|
||||
Scheduled = 'scheduled',
|
||||
Inflight = 'inflight',
|
||||
DeadLetter = 'dead_letter',
|
||||
}
|
||||
|
||||
export interface Job {
|
||||
id: JobID;
|
||||
taskType: string;
|
||||
payload: Uint8Array;
|
||||
priority: number;
|
||||
runAtMs: number;
|
||||
createdAtMs: number;
|
||||
attempts: number;
|
||||
maxAttempts: number;
|
||||
error: string | null;
|
||||
deduplicationId: DeduplicationID | null;
|
||||
}
|
||||
|
||||
export interface LeasedJob {
|
||||
job: Job;
|
||||
receipt: Receipt;
|
||||
visibilityDeadlineMs: number;
|
||||
}
|
||||
|
||||
export interface CronSchedule {
|
||||
id: CronID;
|
||||
taskType: string;
|
||||
payload: Uint8Array;
|
||||
cronExpression: string;
|
||||
enabled: boolean;
|
||||
lastRunMs: number | null;
|
||||
nextRunMs: number | null;
|
||||
createdAtMs: number;
|
||||
updatedAtMs: number;
|
||||
}
|
||||
|
||||
export interface QueueStats {
|
||||
ready: number;
|
||||
processing: number;
|
||||
scheduled: number;
|
||||
deadLetter: number;
|
||||
}
|
||||
|
||||
export interface CronStats {
|
||||
total: number;
|
||||
enabled: number;
|
||||
disabled: number;
|
||||
}
|
||||
|
||||
export interface Stats {
|
||||
queue: QueueStats;
|
||||
cron: CronStats;
|
||||
}
|
||||
|
||||
export interface ReadyItem {
|
||||
jobId: JobID;
|
||||
priority: number;
|
||||
runAtMs: number;
|
||||
createdAtMs: number;
|
||||
sequence: number;
|
||||
}
|
||||
|
||||
export interface JobRecord {
|
||||
job: Job;
|
||||
status: JobStatus;
|
||||
receipt: Receipt | null;
|
||||
visibilityDeadlineMs: number | null;
|
||||
}
|
||||
|
||||
export interface QueueSnapshot {
|
||||
version: number;
|
||||
jobs: Map<string, JobRecord>;
|
||||
cronSchedules: Map<string, CronSchedule>;
|
||||
sequenceCounter: number;
|
||||
deduplicationIndex: Map<string, string>;
|
||||
checksum: number;
|
||||
}
|
||||
|
||||
export interface SerializableJob {
|
||||
id: JobID;
|
||||
taskType: string;
|
||||
payload: Array<number>;
|
||||
priority: number;
|
||||
runAtMs: number;
|
||||
createdAtMs: number;
|
||||
attempts: number;
|
||||
maxAttempts: number;
|
||||
error: string | null;
|
||||
deduplicationId: DeduplicationID | null;
|
||||
}
|
||||
|
||||
export interface SerializableJobRecord {
|
||||
job: SerializableJob;
|
||||
status: JobStatus;
|
||||
receipt: Receipt | null;
|
||||
visibilityDeadlineMs: number | null;
|
||||
}
|
||||
|
||||
export interface SerializableSnapshot {
|
||||
version: number;
|
||||
jobs: Array<[string, SerializableJobRecord]>;
|
||||
cronSchedules: Array<[string, CronSchedule]>;
|
||||
sequenceCounter: number;
|
||||
deduplicationIndex: Array<[string, string]>;
|
||||
}
|
||||
|
||||
export const EnqueueRequestSchema = z.object({
|
||||
task_type: z.string().min(1).max(256),
|
||||
payload: JsonValueSchema,
|
||||
priority: z.number().int().min(0).max(100).default(0),
|
||||
run_at: z.iso.datetime().optional(),
|
||||
max_attempts: z.number().int().min(1).max(100).default(3),
|
||||
deduplication_id: z.string().max(256).optional(),
|
||||
});
|
||||
|
||||
export type EnqueueRequest = z.infer<typeof EnqueueRequestSchema>;
|
||||
|
||||
export const DequeueQuerySchema = z.object({
|
||||
task_types: z.string().optional(),
|
||||
limit: z.coerce.number().int().min(1).max(100).default(1),
|
||||
wait_time_ms: z.coerce.number().int().min(0).max(30000).default(0),
|
||||
visibility_timeout_ms: z.coerce.number().int().min(1000).max(43200000).optional(),
|
||||
});
|
||||
|
||||
export type DequeueQuery = z.infer<typeof DequeueQuerySchema>;
|
||||
|
||||
export const AckRequestSchema = z.object({
|
||||
receipt: z.string().uuid(),
|
||||
});
|
||||
|
||||
export type AckRequest = z.infer<typeof AckRequestSchema>;
|
||||
|
||||
export const NackRequestSchema = z.object({
|
||||
receipt: z.string().uuid(),
|
||||
error: z.string().max(4096).optional(),
|
||||
});
|
||||
|
||||
export type NackRequest = z.infer<typeof NackRequestSchema>;
|
||||
|
||||
export const VisibilityRequestSchema = z.object({
|
||||
receipt: z.string().uuid(),
|
||||
timeout_ms: z.number().int().min(1000).max(43200000),
|
||||
});
|
||||
|
||||
export type VisibilityRequest = z.infer<typeof VisibilityRequestSchema>;
|
||||
|
||||
export const UpsertCronRequestSchema = z.object({
|
||||
id: z.string().min(1).max(256),
|
||||
task_type: z.string().min(1).max(256),
|
||||
payload: JsonValueSchema,
|
||||
cron_expression: z.string().min(1).max(256),
|
||||
enabled: z.boolean().default(true),
|
||||
});
|
||||
|
||||
export type UpsertCronRequest = z.infer<typeof UpsertCronRequestSchema>;
|
||||
|
||||
export const RetryJobParamsSchema = z.object({
|
||||
job_id: z.string().uuid(),
|
||||
});
|
||||
|
||||
export type RetryJobParams = z.infer<typeof RetryJobParamsSchema>;
|
||||
|
||||
export const DeleteJobParamsSchema = z.object({
|
||||
job_id: z.string().uuid(),
|
||||
});
|
||||
|
||||
export type DeleteJobParams = z.infer<typeof DeleteJobParamsSchema>;
|
||||
|
||||
export interface EnqueueResponse {
|
||||
job_id: string;
|
||||
task_type: string;
|
||||
priority: number;
|
||||
run_at: string;
|
||||
max_attempts: number;
|
||||
deduplication_id: string | null;
|
||||
}
|
||||
|
||||
export interface LeasedJobResponse {
|
||||
job_id: string;
|
||||
task_type: string;
|
||||
payload: JsonValue | null;
|
||||
priority: number;
|
||||
run_at: string;
|
||||
created_at: string;
|
||||
attempts: number;
|
||||
max_attempts: number;
|
||||
receipt: string;
|
||||
visibility_deadline: string;
|
||||
}
|
||||
|
||||
export interface DequeueResponse {
|
||||
jobs: Array<LeasedJobResponse>;
|
||||
}
|
||||
|
||||
export interface StatsResponse {
|
||||
queue: {
|
||||
ready: number;
|
||||
processing: number;
|
||||
scheduled: number;
|
||||
dead_letter: number;
|
||||
};
|
||||
cron: {
|
||||
total: number;
|
||||
enabled: number;
|
||||
disabled: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface CronResponse {
|
||||
id: string;
|
||||
task_type: string;
|
||||
cron_expression: string;
|
||||
enabled: boolean;
|
||||
last_run: string | null;
|
||||
next_run: string | null;
|
||||
created_at: string;
|
||||
updated_at: string;
|
||||
}
|
||||
|
||||
export interface JobResponse {
|
||||
job_id: string;
|
||||
task_type: string;
|
||||
payload: JsonValue | null;
|
||||
priority: number;
|
||||
run_at: string;
|
||||
created_at: string;
|
||||
attempts: number;
|
||||
max_attempts: number;
|
||||
error: string | null;
|
||||
status: string;
|
||||
}
|
||||
35
packages/queue/src/types/JsonTypes.tsx
Normal file
35
packages/queue/src/types/JsonTypes.tsx
Normal file
@@ -0,0 +1,35 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import {z} from 'zod';
|
||||
|
||||
export type JsonPrimitive = string | number | boolean | null;
|
||||
export interface JsonObject {
|
||||
[key: string]: JsonValue;
|
||||
}
|
||||
|
||||
export interface JsonArray extends Array<JsonValue> {}
|
||||
|
||||
export type JsonValue = JsonPrimitive | JsonObject | JsonArray;
|
||||
|
||||
const JsonPrimitiveSchema = z.union([z.string(), z.number(), z.boolean(), z.null()]);
|
||||
|
||||
export const JsonValueSchema: z.ZodType<JsonValue> = z
|
||||
.union([JsonPrimitiveSchema, z.array(z.any()), z.record(z.string(), z.any())])
|
||||
.transform((val): JsonValue => val as JsonValue);
|
||||
40
packages/queue/src/types/QueueConfig.tsx
Normal file
40
packages/queue/src/types/QueueConfig.tsx
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
export interface QueueConfig {
|
||||
dataDir: string;
|
||||
snapshotEveryMs: number;
|
||||
snapshotAfterOps: number;
|
||||
snapshotZstdLevel: number;
|
||||
defaultVisibilityTimeoutMs: number;
|
||||
visibilityTimeoutBackoffMs: number;
|
||||
maxReceiveBatch: number;
|
||||
commandBuffer: number;
|
||||
}
|
||||
|
||||
export const defaultQueueConfig: QueueConfig = {
|
||||
dataDir: './data/queue',
|
||||
snapshotEveryMs: 2000,
|
||||
snapshotAfterOps: 50000,
|
||||
snapshotZstdLevel: 3,
|
||||
defaultVisibilityTimeoutMs: 30000,
|
||||
visibilityTimeoutBackoffMs: 10000,
|
||||
maxReceiveBatch: 100,
|
||||
commandBuffer: 8192,
|
||||
};
|
||||
5
packages/queue/tsconfig.json
Normal file
5
packages/queue/tsconfig.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"extends": "../../tsconfigs/package.json",
|
||||
"compilerOptions": {},
|
||||
"include": ["src/**/*"]
|
||||
}
|
||||
45
packages/queue/vitest.config.ts
Normal file
45
packages/queue/vitest.config.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (C) 2026 Fluxer Contributors
|
||||
*
|
||||
* This file is part of Fluxer.
|
||||
*
|
||||
* Fluxer is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Fluxer is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import path from 'node:path';
|
||||
import {fileURLToPath} from 'node:url';
|
||||
import tsconfigPaths from 'vite-tsconfig-paths';
|
||||
import {defineConfig} from 'vitest/config';
|
||||
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
export default defineConfig({
|
||||
plugins: [
|
||||
tsconfigPaths({
|
||||
root: path.resolve(__dirname, '../..'),
|
||||
}),
|
||||
],
|
||||
test: {
|
||||
globals: true,
|
||||
environment: 'node',
|
||||
include: ['**/*.{test,spec}.{ts,tsx}'],
|
||||
exclude: ['node_modules', 'dist'],
|
||||
setupFiles: [],
|
||||
coverage: {
|
||||
provider: 'v8',
|
||||
reporter: ['text', 'json', 'html'],
|
||||
exclude: ['**/*.test.tsx', '**/*.spec.tsx', 'node_modules/'],
|
||||
},
|
||||
},
|
||||
});
|
||||
Reference in New Issue
Block a user