refactor progress

This commit is contained in:
Hampus Kraft
2026-02-17 12:22:36 +00:00
parent cb31608523
commit d5abd1a7e4
8257 changed files with 1190207 additions and 761040 deletions

View File

@@ -0,0 +1,86 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {LoggerInterface} from '@fluxer/logger/src/LoggerInterface';
import type {IWorkerService} from '@fluxer/worker/src/contracts/IWorkerService';
import type {WorkerJobOptions, WorkerJobPayload} from '@fluxer/worker/src/contracts/WorkerTypes';
import type {IQueueProvider} from '@fluxer/worker/src/providers/IQueueProvider';
export interface DirectWorkerServiceOptions {
queueProvider: IQueueProvider;
logger: LoggerInterface;
}
export class DirectWorkerService implements IWorkerService {
private readonly queueProvider: IQueueProvider;
private readonly logger: LoggerInterface;
constructor(options: DirectWorkerServiceOptions) {
this.queueProvider = options.queueProvider;
this.logger = options.logger;
}
async addJob<TPayload extends WorkerJobPayload = WorkerJobPayload>(
taskType: string,
payload: TPayload,
options?: WorkerJobOptions,
): Promise<void> {
try {
await this.queueProvider.enqueue(taskType, payload, {
runAt: options?.runAt,
maxAttempts: options?.maxAttempts,
priority: options?.priority,
});
this.logger.debug({taskType, payload}, 'Job queued successfully via direct provider');
} catch (error) {
this.logger.error({error, taskType, payload}, 'Failed to queue job via direct provider');
throw error;
}
}
async cancelJob(jobId: string): Promise<boolean> {
try {
const cancelled = await this.queueProvider.cancelJob(jobId);
if (cancelled) {
this.logger.info({jobId}, 'Job cancelled successfully');
} else {
this.logger.debug({jobId}, 'Job not found (may have already been processed)');
}
return cancelled;
} catch (error) {
this.logger.error({error, jobId}, 'Failed to cancel job');
throw error;
}
}
async retryDeadLetterJob(jobId: string): Promise<boolean> {
try {
const retried = await this.queueProvider.retryDeadLetterJob(jobId);
if (retried) {
this.logger.info({jobId}, 'Dead letter job retried successfully');
} else {
this.logger.debug({jobId}, 'Job not found in dead letter queue');
}
return retried;
} catch (error) {
this.logger.error({error, jobId}, 'Failed to retry dead letter job');
throw error;
}
}
}

View File

@@ -0,0 +1,300 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {Config} from '@fluxer/api/src/Config';
import {addSpanEvent, setSpanAttributes, withSpan} from '@fluxer/api/src/telemetry/Tracing';
import type {WorkerJobPayload} from '@fluxer/worker/src/contracts/WorkerTypes';
import {ms} from 'itty-time';
let _queueBaseUrl: string | null = null;
function getQueueBaseUrl(): string {
if (!_queueBaseUrl) {
_queueBaseUrl = Config.queue.baseUrl;
}
return _queueBaseUrl;
}
function getRequestHeaders(): Record<string, string> {
const headers: Record<string, string> = {'Content-Type': 'application/json'};
if (Config.queue.authSecret) {
headers.Authorization = `Bearer ${Config.queue.authSecret}`;
}
return headers;
}
function getAuthHeaders(): Record<string, string> | undefined {
if (Config.queue.authSecret) {
return {Authorization: `Bearer ${Config.queue.authSecret}`};
}
return undefined;
}
interface QueueJob {
id: string;
task_type: string;
payload: WorkerJobPayload;
priority: number;
run_at: string;
created_at: string;
attempts: number;
max_attempts: number;
error?: string | null;
deduplication_id?: string | null;
}
interface LeasedQueueJob {
receipt: string;
visibility_deadline: string;
job: QueueJob;
}
export class HttpWorkerQueue {
private createTimeoutController(): AbortController {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), ms('30 seconds'));
(controller as {timeoutId?: NodeJS.Timeout}).timeoutId = timeoutId;
return controller;
}
private async fetchWithTimeout(input: string | URL | Request, init?: RequestInit): Promise<Response> {
const controller = this.createTimeoutController();
try {
const response = await fetch(input, {
...init,
signal: controller.signal,
});
return response;
} finally {
const timeoutId = (controller as {timeoutId?: NodeJS.Timeout}).timeoutId;
if (timeoutId !== undefined) {
clearTimeout(timeoutId);
}
}
}
async enqueue(
taskType: string,
payload: WorkerJobPayload,
options?: {
runAt?: Date;
maxAttempts?: number;
priority?: number;
},
): Promise<string> {
return await withSpan(
{
name: 'queue.enqueue',
attributes: {
'queue.task_type': taskType,
'queue.priority': options?.priority ?? 0,
'queue.max_attempts': options?.maxAttempts ?? 5,
'queue.scheduled': options?.runAt !== undefined,
'net.peer.name': new URL(getQueueBaseUrl()).hostname,
},
},
async () => {
const body = {
task_type: taskType,
payload,
priority: options?.priority ?? 0,
run_at: options?.runAt?.toISOString(),
max_attempts: options?.maxAttempts ?? 5,
};
const response = await this.fetchWithTimeout(`${getQueueBaseUrl()}/enqueue`, {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify(body),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`Failed to enqueue job: ${response.status} ${text}`);
}
const jobIdResult = (await response.json()) as {job_id: string};
setSpanAttributes({'queue.job_id': jobIdResult.job_id});
return jobIdResult.job_id;
},
);
}
async dequeue(taskTypes: Array<string>, limit = 1): Promise<Array<LeasedQueueJob>> {
return await withSpan(
{
name: 'queue.dequeue',
attributes: {
'queue.task_types': taskTypes.join(','),
'queue.limit': limit,
'queue.service': 'fluxer-queue',
},
},
async () => {
addSpanEvent('dequeue.start');
const url = new URL(`${getQueueBaseUrl()}/dequeue`);
url.searchParams.set('task_types', taskTypes.join(','));
url.searchParams.set('limit', limit.toString());
url.searchParams.set('wait_time_ms', '5000');
const response = await this.fetchWithTimeout(url.toString(), {method: 'GET', headers: getAuthHeaders()});
if (!response.ok) {
const text = await response.text();
throw new Error(`Failed to dequeue job: ${response.status} ${text}`);
}
addSpanEvent('dequeue.parse_response');
const jobs = (await response.json()) as Array<LeasedQueueJob>;
const jobCount = jobs?.length ?? 0;
setSpanAttributes({
'queue.jobs_returned': jobCount,
'queue.empty': jobCount === 0,
});
addSpanEvent('dequeue.complete');
return jobs ?? [];
},
);
}
async upsertCron(id: string, taskType: string, payload: WorkerJobPayload, cronExpression: string): Promise<void> {
const response = await this.fetchWithTimeout(`${getQueueBaseUrl()}/cron`, {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify({id, task_type: taskType, payload, cron_expression: cronExpression}),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`Failed to upsert cron job: ${response.status} ${text}`);
}
}
async complete(receipt: string): Promise<void> {
return await withSpan(
{
name: 'queue.complete',
attributes: {
'queue.receipt': receipt,
},
},
async () => {
const response = await this.fetchWithTimeout(`${getQueueBaseUrl()}/ack`, {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify({receipt}),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`Failed to complete job: ${response.status} ${text}`);
}
},
);
}
async extendVisibility(receipt: string, additionalMs: number): Promise<void> {
return await withSpan(
{
name: 'queue.extend_visibility',
attributes: {
'queue.receipt': receipt,
'queue.additional_ms': additionalMs,
},
},
async () => {
const response = await this.fetchWithTimeout(`${getQueueBaseUrl()}/extend`, {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify({receipt, additional_ms: additionalMs}),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`Failed to extend visibility: ${response.status} ${text}`);
}
},
);
}
async fail(receipt: string, error: string): Promise<void> {
return await withSpan(
{
name: 'queue.fail',
attributes: {
'queue.receipt': receipt,
'queue.error_message': error,
},
},
async () => {
const response = await this.fetchWithTimeout(`${getQueueBaseUrl()}/nack`, {
method: 'POST',
headers: getRequestHeaders(),
body: JSON.stringify({receipt, error}),
});
if (!response.ok) {
const text = await response.text();
throw new Error(`Failed to fail job: ${response.status} ${text}`);
}
},
);
}
async cancelJob(jobId: string): Promise<boolean> {
const response = await this.fetchWithTimeout(`${getQueueBaseUrl()}/job/${jobId}`, {
method: 'DELETE',
headers: getAuthHeaders(),
});
if (!response.ok) {
const text = await response.text();
if (response.status === 404) {
return false;
}
throw new Error(`Failed to cancel job: ${response.status} ${text}`);
}
const result = (await response.json()) as {success: boolean};
return result.success ?? true;
}
async retryDeadLetterJob(jobId: string): Promise<boolean> {
const response = await this.fetchWithTimeout(`${getQueueBaseUrl()}/retry/${jobId}`, {
method: 'POST',
headers: getAuthHeaders(),
});
if (!response.ok) {
const text = await response.text();
if (response.status === 404) {
return false;
}
throw new Error(`Failed to retry job: ${response.status} ${text}`);
}
return true;
}
}

View File

@@ -0,0 +1,46 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {WorkerDependencies} from '@fluxer/api/src/worker/WorkerDependencies';
import {
clearWorkerDependencies as clearWorkerDependenciesBase,
getWorkerDependencies as getWorkerDependenciesBase,
hasWorkerDependencies as hasWorkerDependenciesBase,
setWorkerDependencies as setWorkerDependenciesBase,
} from '@fluxer/worker/src/context/WorkerContext';
export function setWorkerDependencies(dependencies: WorkerDependencies): void {
setWorkerDependenciesBase(dependencies);
}
export function setWorkerDependenciesForTest(dependencies: Partial<WorkerDependencies>): void {
setWorkerDependenciesBase(dependencies);
}
export function getWorkerDependencies(): WorkerDependencies {
return getWorkerDependenciesBase<WorkerDependencies>();
}
export function hasWorkerDependencies(): boolean {
return hasWorkerDependenciesBase();
}
export function clearWorkerDependencies(): void {
clearWorkerDependenciesBase();
}

View File

@@ -0,0 +1,406 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {AdminRepository} from '@fluxer/api/src/admin/AdminRepository';
import {AdminArchiveRepository} from '@fluxer/api/src/admin/repositories/AdminArchiveRepository';
import {SystemDmJobRepository} from '@fluxer/api/src/admin/repositories/SystemDmJobRepository';
import {Config} from '@fluxer/api/src/Config';
import {ChannelRepository} from '@fluxer/api/src/channel/ChannelRepository';
import {ChannelService} from '@fluxer/api/src/channel/services/ChannelService';
import {ConnectionRepository} from '@fluxer/api/src/connection/ConnectionRepository';
import {ConnectionService} from '@fluxer/api/src/connection/ConnectionService';
import {CsamEvidenceRetentionService} from '@fluxer/api/src/csam/CsamEvidenceRetentionService';
import {CsamScanJobService} from '@fluxer/api/src/csam/CsamScanJobService';
import {createEmailProvider} from '@fluxer/api/src/email/EmailProviderFactory';
import {FavoriteMemeRepository} from '@fluxer/api/src/favorite_meme/FavoriteMemeRepository';
import {GuildAuditLogService} from '@fluxer/api/src/guild/GuildAuditLogService';
import {GuildRepository} from '@fluxer/api/src/guild/repositories/GuildRepository';
import {ExpressionAssetPurger} from '@fluxer/api/src/guild/services/content/ExpressionAssetPurger';
import {GuildService} from '@fluxer/api/src/guild/services/GuildService';
import {AssetDeletionQueue} from '@fluxer/api/src/infrastructure/AssetDeletionQueue';
import {AvatarService} from '@fluxer/api/src/infrastructure/AvatarService';
import {CloudflarePurgeQueue, NoopPurgeQueue} from '@fluxer/api/src/infrastructure/CloudflarePurgeQueue';
import {DisabledLiveKitService} from '@fluxer/api/src/infrastructure/DisabledLiveKitService';
import {DiscriminatorService} from '@fluxer/api/src/infrastructure/DiscriminatorService';
import {EmbedService} from '@fluxer/api/src/infrastructure/EmbedService';
import {EntityAssetService} from '@fluxer/api/src/infrastructure/EntityAssetService';
import type {IGatewayService} from '@fluxer/api/src/infrastructure/IGatewayService';
import type {ILiveKitService} from '@fluxer/api/src/infrastructure/ILiveKitService';
import type {IMediaService} from '@fluxer/api/src/infrastructure/IMediaService';
import {InMemoryVoiceRoomStore} from '@fluxer/api/src/infrastructure/InMemoryVoiceRoomStore';
import type {IStorageService} from '@fluxer/api/src/infrastructure/IStorageService';
import type {IVoiceRoomStore} from '@fluxer/api/src/infrastructure/IVoiceRoomStore';
import {KVAccountDeletionQueueService} from '@fluxer/api/src/infrastructure/KVAccountDeletionQueueService';
import {KVActivityTracker} from '@fluxer/api/src/infrastructure/KVActivityTracker';
import {KVBulkMessageDeletionQueueService} from '@fluxer/api/src/infrastructure/KVBulkMessageDeletionQueueService';
import {LiveKitService} from '@fluxer/api/src/infrastructure/LiveKitService';
import type {SnowflakeService} from '@fluxer/api/src/infrastructure/SnowflakeService';
import {createStorageService} from '@fluxer/api/src/infrastructure/StorageServiceFactory';
import {UnfurlerService} from '@fluxer/api/src/infrastructure/UnfurlerService';
import {UserCacheService} from '@fluxer/api/src/infrastructure/UserCacheService';
import {VirusScanService} from '@fluxer/api/src/infrastructure/VirusScanService';
import {VoiceRoomStore} from '@fluxer/api/src/infrastructure/VoiceRoomStore';
import {InstanceConfigRepository} from '@fluxer/api/src/instance/InstanceConfigRepository';
import {InviteRepository} from '@fluxer/api/src/invite/InviteRepository';
import {InviteService} from '@fluxer/api/src/invite/InviteService';
import {Logger} from '@fluxer/api/src/Logger';
import {LimitConfigService} from '@fluxer/api/src/limits/LimitConfigService';
import {
getGatewayService,
getInjectedS3Service,
getKVClient,
getMediaService,
getWorkerService,
} from '@fluxer/api/src/middleware/ServiceRegistry';
import {ApplicationRepository} from '@fluxer/api/src/oauth/repositories/ApplicationRepository';
import {OAuth2TokenRepository} from '@fluxer/api/src/oauth/repositories/OAuth2TokenRepository';
import {PackRepository} from '@fluxer/api/src/pack/PackRepository';
import {PackService} from '@fluxer/api/src/pack/PackService';
import {ReadStateRepository} from '@fluxer/api/src/read_state/ReadStateRepository';
import {ReadStateService} from '@fluxer/api/src/read_state/ReadStateService';
import {ReportRepository} from '@fluxer/api/src/report/ReportRepository';
import {PaymentRepository} from '@fluxer/api/src/user/repositories/PaymentRepository';
import {UserContactChangeLogRepository} from '@fluxer/api/src/user/repositories/UserContactChangeLogRepository';
import {UserRepository} from '@fluxer/api/src/user/repositories/UserRepository';
import {UserContactChangeLogService} from '@fluxer/api/src/user/services/UserContactChangeLogService';
import {UserDeletionEligibilityService} from '@fluxer/api/src/user/services/UserDeletionEligibilityService';
import {UserHarvestRepository} from '@fluxer/api/src/user/UserHarvestRepository';
import {UserPermissionUtils} from '@fluxer/api/src/utils/UserPermissionUtils';
import {VoiceRepository} from '@fluxer/api/src/voice/VoiceRepository';
import {VoiceTopology} from '@fluxer/api/src/voice/VoiceTopology';
import {WebhookRepository} from '@fluxer/api/src/webhook/WebhookRepository';
import {KVCacheProvider} from '@fluxer/cache/src/providers/KVCacheProvider';
import {EmailI18nService} from '@fluxer/email/src/EmailI18nService';
import type {EmailConfig, UserBouncedEmailChecker} from '@fluxer/email/src/EmailProviderTypes';
import {EmailService} from '@fluxer/email/src/EmailService';
import type {IEmailService} from '@fluxer/email/src/IEmailService';
import {TestEmailService} from '@fluxer/email/src/TestEmailService';
import type {IKVProvider} from '@fluxer/kv_client/src/IKVProvider';
import {RateLimitService} from '@fluxer/rate_limit/src/RateLimitService';
import type {IWorkerService} from '@fluxer/worker/src/contracts/IWorkerService';
import Stripe from 'stripe';
let _workerTestEmailService: TestEmailService | null = null;
function getWorkerTestEmailService(): TestEmailService {
if (!_workerTestEmailService) {
_workerTestEmailService = new TestEmailService();
}
return _workerTestEmailService;
}
export interface WorkerDependencies {
kvClient: IKVProvider;
snowflakeService: SnowflakeService;
limitConfigService: LimitConfigService;
userRepository: UserRepository;
channelRepository: ChannelRepository;
guildRepository: GuildRepository;
favoriteMemeRepository: FavoriteMemeRepository;
applicationRepository: ApplicationRepository;
oauth2TokenRepository: OAuth2TokenRepository;
readStateRepository: ReadStateRepository;
adminRepository: AdminRepository;
reportRepository: ReportRepository;
paymentRepository: PaymentRepository;
userHarvestRepository: UserHarvestRepository;
adminArchiveRepository: AdminArchiveRepository;
systemDmJobRepository: SystemDmJobRepository;
voiceRepository: VoiceRepository | null;
connectionRepository: ConnectionRepository;
connectionService: ConnectionService;
cacheService: KVCacheProvider;
userCacheService: UserCacheService;
storageService: IStorageService;
assetDeletionQueue: AssetDeletionQueue;
purgeQueue: CloudflarePurgeQueue | NoopPurgeQueue;
gatewayService: IGatewayService;
mediaService: IMediaService;
discriminatorService: DiscriminatorService;
avatarService: AvatarService;
virusScanService: VirusScanService;
rateLimitService: RateLimitService;
emailService: IEmailService;
inviteService: InviteService;
workerService: IWorkerService;
unfurlerService: UnfurlerService;
embedService: EmbedService;
readStateService: ReadStateService;
userPermissionUtils: UserPermissionUtils;
activityTracker: KVActivityTracker;
deletionQueueService: KVAccountDeletionQueueService;
bulkMessageDeletionQueueService: KVBulkMessageDeletionQueueService;
deletionEligibilityService: UserDeletionEligibilityService;
voiceRoomStore: IVoiceRoomStore;
liveKitService: ILiveKitService;
voiceTopology: VoiceTopology | null;
channelService: ChannelService;
guildAuditLogService: GuildAuditLogService;
contactChangeLogService: UserContactChangeLogService;
csamEvidenceRetentionService: CsamEvidenceRetentionService;
stripe: Stripe | null;
csamScanJobService: CsamScanJobService;
}
export async function initializeWorkerDependencies(snowflakeService: SnowflakeService): Promise<WorkerDependencies> {
Logger.info('Initializing worker dependencies...');
const kvClient = getKVClient();
const userRepository = new UserRepository();
const channelRepository = new ChannelRepository();
const guildRepository = new GuildRepository();
const favoriteMemeRepository = new FavoriteMemeRepository();
const applicationRepository = new ApplicationRepository();
const oauth2TokenRepository = new OAuth2TokenRepository();
const readStateRepository = new ReadStateRepository();
const adminRepository = new AdminRepository();
const adminArchiveRepository = new AdminArchiveRepository();
const systemDmJobRepository = new SystemDmJobRepository();
const reportRepository = new ReportRepository();
const paymentRepository = new PaymentRepository();
const userHarvestRepository = new UserHarvestRepository();
const contactChangeLogRepository = new UserContactChangeLogRepository();
const contactChangeLogService = new UserContactChangeLogService(contactChangeLogRepository);
const connectionRepository = new ConnectionRepository();
const cacheService = new KVCacheProvider({client: kvClient});
const instanceConfigRepository = new InstanceConfigRepository();
const limitConfigSubscriber = getKVClient();
const limitConfigService = new LimitConfigService(instanceConfigRepository, cacheService, limitConfigSubscriber);
await limitConfigService.initialize();
const userCacheService = new UserCacheService(cacheService, userRepository);
const storageService = createStorageService({s3Service: getInjectedS3Service()});
const csamEvidenceRetentionService = new CsamEvidenceRetentionService(storageService);
const assetDeletionQueue = new AssetDeletionQueue(kvClient);
const purgeQueue = Config.cloudflare.purgeEnabled ? new CloudflarePurgeQueue(kvClient) : new NoopPurgeQueue();
const gatewayService = getGatewayService();
const connectionService = new ConnectionService(connectionRepository, gatewayService, null);
const mediaService = getMediaService();
const discriminatorService = new DiscriminatorService(userRepository, cacheService, limitConfigService);
const avatarService = new AvatarService(storageService, mediaService, limitConfigService);
const entityAssetService = new EntityAssetService(
storageService,
mediaService,
assetDeletionQueue,
limitConfigService,
);
const virusScanService = new VirusScanService(cacheService);
const rateLimitService = new RateLimitService(cacheService);
const packRepository = new PackRepository();
const packAssetPurger = new ExpressionAssetPurger(assetDeletionQueue);
const packService = new PackService(
packRepository,
guildRepository,
avatarService,
snowflakeService,
packAssetPurger,
userRepository,
userCacheService,
limitConfigService,
);
const emailConfig: EmailConfig = {
enabled: Config.email.enabled,
fromEmail: Config.email.fromEmail,
fromName: Config.email.fromName,
appBaseUrl: Config.endpoints.webApp,
marketingBaseUrl: Config.endpoints.marketing,
};
const bouncedEmailChecker: UserBouncedEmailChecker = {
isEmailBounced: async (email: string) => {
const user = await userRepository.findByEmail(email);
return user?.emailBounced ?? false;
},
};
const emailI18n = new EmailI18nService();
const emailProvider = createEmailProvider(Config.email);
const emailService: IEmailService = Config.dev.testModeEnabled
? getWorkerTestEmailService()
: new EmailService(emailConfig, emailI18n, emailProvider, bouncedEmailChecker);
const workerService = getWorkerService();
const guildAuditLogService = new GuildAuditLogService(guildRepository, snowflakeService, workerService);
const unfurlerService = new UnfurlerService(cacheService, mediaService);
const embedService = new EmbedService(channelRepository, cacheService, unfurlerService, mediaService, workerService);
const readStateService = new ReadStateService(readStateRepository, gatewayService);
const userPermissionUtils = new UserPermissionUtils(userRepository, guildRepository);
const activityTracker = new KVActivityTracker(kvClient);
const deletionQueueService = new KVAccountDeletionQueueService(kvClient, userRepository);
const bulkMessageDeletionQueueService = new KVBulkMessageDeletionQueueService(kvClient);
const deletionEligibilityService = new UserDeletionEligibilityService(kvClient);
const csamScanJobService = new CsamScanJobService();
let voiceRepository: VoiceRepository | null = null;
let voiceTopology: VoiceTopology | null = null;
let voiceRoomStore: IVoiceRoomStore;
let liveKitService: ILiveKitService;
if (Config.voice.enabled) {
voiceRepository = new VoiceRepository();
voiceTopology = new VoiceTopology(voiceRepository, null);
await voiceTopology.initialize();
voiceRoomStore = new VoiceRoomStore(kvClient);
liveKitService = new LiveKitService(voiceTopology);
Logger.info('Voice services initialized');
} else {
voiceRoomStore = new InMemoryVoiceRoomStore();
liveKitService = new DisabledLiveKitService();
}
const inviteRepository = new InviteRepository();
const webhookRepository = new WebhookRepository();
const channelService = new ChannelService(
channelRepository,
userRepository,
guildRepository,
packService,
userCacheService,
embedService,
readStateService,
cacheService,
storageService,
gatewayService,
mediaService,
avatarService,
workerService,
virusScanService,
snowflakeService,
rateLimitService,
purgeQueue,
favoriteMemeRepository,
guildAuditLogService,
voiceRoomStore,
liveKitService,
inviteRepository,
webhookRepository,
limitConfigService,
undefined,
);
const guildService = new GuildService(
guildRepository,
channelRepository,
inviteRepository,
channelService,
userCacheService,
gatewayService,
entityAssetService,
avatarService,
assetDeletionQueue,
userRepository,
mediaService,
cacheService,
snowflakeService,
rateLimitService,
workerService,
webhookRepository,
guildAuditLogService,
limitConfigService,
undefined,
);
const inviteService = new InviteService(
inviteRepository,
guildService,
channelService,
gatewayService,
guildAuditLogService,
userRepository,
packRepository,
packService,
limitConfigService,
);
let stripe: Stripe | null = null;
if (Config.stripe.enabled && Config.stripe.secretKey) {
stripe = new Stripe(Config.stripe.secretKey, {
apiVersion: '2026-01-28.clover',
httpClient: Config.dev.testModeEnabled ? Stripe.createFetchHttpClient() : undefined,
});
Logger.info('Stripe initialized');
}
Logger.info('Worker dependencies initialized successfully');
return {
kvClient,
snowflakeService,
limitConfigService,
userRepository,
channelRepository,
guildRepository,
favoriteMemeRepository,
applicationRepository,
oauth2TokenRepository,
readStateRepository,
adminRepository,
reportRepository,
paymentRepository,
userHarvestRepository,
adminArchiveRepository,
systemDmJobRepository,
voiceRepository,
connectionRepository,
connectionService,
cacheService,
userCacheService,
storageService,
assetDeletionQueue,
purgeQueue,
gatewayService,
mediaService,
discriminatorService,
avatarService,
virusScanService,
rateLimitService,
emailService,
inviteService,
workerService,
unfurlerService,
embedService,
readStateService,
userPermissionUtils,
activityTracker,
deletionQueueService,
bulkMessageDeletionQueueService,
deletionEligibilityService,
voiceRoomStore,
liveKitService,
voiceTopology,
channelService,
guildAuditLogService,
contactChangeLogService,
csamEvidenceRetentionService,
stripe,
csamScanJobService,
};
}
export async function shutdownWorkerDependencies(_deps: WorkerDependencies): Promise<void> {
Logger.info('Shutting down worker dependencies...');
Logger.info('Worker dependencies shut down successfully');
}

View File

@@ -0,0 +1,135 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {getMetricsService, initializeMetricsService} from '@fluxer/api/src/infrastructure/MetricsService';
import {SnowflakeService} from '@fluxer/api/src/infrastructure/SnowflakeService';
import {Logger} from '@fluxer/api/src/Logger';
import {getKVClient} from '@fluxer/api/src/middleware/ServiceRegistry';
import {initializeSearch} from '@fluxer/api/src/SearchFactory';
import {HttpWorkerQueue} from '@fluxer/api/src/worker/HttpWorkerQueue';
import {setWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {initializeWorkerDependencies, shutdownWorkerDependencies} from '@fluxer/api/src/worker/WorkerDependencies';
import {WorkerMetricsCollector} from '@fluxer/api/src/worker/WorkerMetricsCollector';
import {WorkerRunner} from '@fluxer/api/src/worker/WorkerRunner';
import {workerTasks} from '@fluxer/api/src/worker/WorkerTaskRegistry';
import {setupGracefulShutdown} from '@fluxer/hono/src/Server';
import {captureException, flushSentry as flush} from '@fluxer/sentry/src/Sentry';
import {ms} from 'itty-time';
const WORKER_CONCURRENCY = 20;
async function registerCronJobs(queue: HttpWorkerQueue): Promise<void> {
try {
await queue.upsertCron('processAssetDeletionQueue', 'processAssetDeletionQueue', {}, '0 */5 * * * *');
await queue.upsertCron('processCloudflarePurgeQueue', 'processCloudflarePurgeQueue', {}, '0 */2 * * * *');
await queue.upsertCron(
'processPendingBulkMessageDeletions',
'processPendingBulkMessageDeletions',
{},
'0 */10 * * * *',
);
await queue.upsertCron('processInactivityDeletions', 'processInactivityDeletions', {}, '0 0 */6 * * *');
await queue.upsertCron('expireAttachments', 'expireAttachments', {}, '0 0 */12 * * *');
await queue.upsertCron('cleanupCsamEvidence', 'cleanupCsamEvidence', {}, '0 0 3 * * *');
await queue.upsertCron('csamScanConsumer', 'csamScanConsumer', {}, '* * * * * *');
await queue.upsertCron('syncDiscoveryIndex', 'syncDiscoveryIndex', {}, '0 */15 * * * *');
Logger.info('Cron jobs registered successfully');
} catch (error) {
Logger.error({error}, 'Failed to register cron jobs');
}
}
export async function startWorkerMain(): Promise<void> {
Logger.info('Starting worker backend...');
initializeMetricsService();
Logger.info('MetricsService initialized');
const kvClient = getKVClient();
const snowflakeService = new SnowflakeService(kvClient);
await snowflakeService.initialize();
Logger.info('Shared SnowflakeService initialized');
const dependencies = await initializeWorkerDependencies(snowflakeService);
setWorkerDependencies(dependencies);
const queue = new HttpWorkerQueue();
await registerCronJobs(queue);
const metricsCollector = new WorkerMetricsCollector({
kvClient: dependencies.kvClient,
metricsService: getMetricsService(),
assetDeletionQueue: dependencies.assetDeletionQueue,
purgeQueue: dependencies.purgeQueue,
bulkMessageDeletionQueue: dependencies.bulkMessageDeletionQueueService,
accountDeletionQueue: dependencies.deletionQueueService,
});
const runner = new WorkerRunner({
tasks: workerTasks,
concurrency: WORKER_CONCURRENCY,
});
try {
try {
await initializeSearch();
Logger.info('Search initialised for worker backend');
} catch (error) {
Logger.warn({err: error}, 'Search initialisation failed; continuing without search');
}
metricsCollector.start();
Logger.info('WorkerMetricsCollector started');
await runner.start();
Logger.info(`Worker runner started with ${WORKER_CONCURRENCY} workers`);
const shutdown = async (): Promise<void> => {
Logger.info('Shutting down worker backend...');
metricsCollector.stop();
await runner.stop();
await shutdownWorkerDependencies(dependencies);
await snowflakeService.shutdown();
};
setupGracefulShutdown(shutdown, {logger: Logger, timeoutMs: 30000});
process.on('uncaughtException', async (error) => {
Logger.error({err: error}, 'Uncaught Exception');
captureException(error);
await flush(ms('2 seconds'));
await shutdown();
process.exit(0);
});
process.on('unhandledRejection', async (reason: unknown) => {
Logger.error({err: reason}, 'Unhandled Rejection at Promise');
captureException(reason instanceof Error ? reason : new Error(String(reason)));
await flush(ms('2 seconds'));
setTimeout(() => process.exit(1), ms('5 seconds')).unref();
await shutdown();
});
} catch (error: unknown) {
Logger.error({err: error}, 'Failed to start worker backend');
captureException(error instanceof Error ? error : new Error(String(error)));
await flush(ms('2 seconds'));
process.exit(1);
}
}

View File

@@ -0,0 +1,163 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {AssetDeletionQueue} from '@fluxer/api/src/infrastructure/AssetDeletionQueue';
import type {IPurgeQueue} from '@fluxer/api/src/infrastructure/CloudflarePurgeQueue';
import type {IMetricsService} from '@fluxer/api/src/infrastructure/IMetricsService';
import type {KVAccountDeletionQueueService} from '@fluxer/api/src/infrastructure/KVAccountDeletionQueueService';
import type {KVBulkMessageDeletionQueueService} from '@fluxer/api/src/infrastructure/KVBulkMessageDeletionQueueService';
import {Logger} from '@fluxer/api/src/Logger';
import type {IKVProvider} from '@fluxer/kv_client/src/IKVProvider';
import {ms} from 'itty-time';
interface WorkerMetricsCollectorOptions {
kvClient: IKVProvider;
metricsService: IMetricsService;
assetDeletionQueue: AssetDeletionQueue;
purgeQueue: IPurgeQueue;
bulkMessageDeletionQueue: KVBulkMessageDeletionQueueService;
accountDeletionQueue: KVAccountDeletionQueueService;
reportIntervalMs?: number;
}
export class WorkerMetricsCollector {
private readonly kvClient: IKVProvider;
private readonly metricsService: IMetricsService;
private readonly assetDeletionQueue: AssetDeletionQueue;
private readonly purgeQueue: IPurgeQueue;
private readonly bulkMessageDeletionQueue: KVBulkMessageDeletionQueueService;
private readonly accountDeletionQueue: KVAccountDeletionQueueService;
private readonly reportIntervalMs: number;
private intervalHandle: ReturnType<typeof setInterval> | null = null;
private kvErrorCount = 0;
constructor(options: WorkerMetricsCollectorOptions) {
this.kvClient = options.kvClient;
this.metricsService = options.metricsService;
this.assetDeletionQueue = options.assetDeletionQueue;
this.purgeQueue = options.purgeQueue;
this.bulkMessageDeletionQueue = options.bulkMessageDeletionQueue;
this.accountDeletionQueue = options.accountDeletionQueue;
this.reportIntervalMs = options.reportIntervalMs ?? ms('30 seconds');
}
start(): void {
if (this.intervalHandle) return;
Logger.info({intervalMs: this.reportIntervalMs}, 'Starting WorkerMetricsCollector');
this.collectAndReport().catch((err) => {
Logger.error({err}, 'Initial metrics collection failed');
});
this.intervalHandle = setInterval(() => {
this.collectAndReport().catch((err) => {
Logger.error({err}, 'Metrics collection failed');
});
}, this.reportIntervalMs);
}
stop(): void {
if (this.intervalHandle) {
clearInterval(this.intervalHandle);
this.intervalHandle = null;
Logger.info('Stopped WorkerMetricsCollector');
}
}
private async collectAndReport(): Promise<void> {
const [kvQueueSizes, kvConnectionStatus] = await Promise.all([
this.collectKVQueueSizes(),
this.collectKVConnectionStatus(),
]);
this.reportKVQueueSizes(kvQueueSizes);
this.reportKVHealthMetrics(kvConnectionStatus);
}
private async collectKVQueueSizes(): Promise<{
assetDeletion: number;
cloudflarePurge: number;
bulkMessageDeletion: number;
accountDeletion: number;
}> {
try {
const [assetDeletion, cloudflarePurge, bulkMessageDeletion, accountDeletion] = await Promise.all([
this.assetDeletionQueue.getQueueSize(),
this.purgeQueue.getQueueSize(),
this.bulkMessageDeletionQueue.getQueueSize(),
this.accountDeletionQueue.getQueueSize(),
]);
return {assetDeletion, cloudflarePurge, bulkMessageDeletion, accountDeletion};
} catch (err) {
this.kvErrorCount++;
Logger.error({err}, 'Failed to collect KV queue sizes');
return {assetDeletion: 0, cloudflarePurge: 0, bulkMessageDeletion: 0, accountDeletion: 0};
}
}
private reportKVQueueSizes(sizes: {
assetDeletion: number;
cloudflarePurge: number;
bulkMessageDeletion: number;
accountDeletion: number;
}): void {
this.metricsService.gauge({
name: 'worker.kv_queue.asset_deletion',
value: sizes.assetDeletion,
});
this.metricsService.gauge({
name: 'worker.kv_queue.cloudflare_purge',
value: sizes.cloudflarePurge,
});
this.metricsService.gauge({
name: 'worker.kv_queue.bulk_message_deletion',
value: sizes.bulkMessageDeletion,
});
this.metricsService.gauge({
name: 'worker.kv_queue.account_deletion',
value: sizes.accountDeletion,
});
}
private async collectKVConnectionStatus(): Promise<boolean> {
try {
return await this.kvClient.health();
} catch (err) {
this.kvErrorCount++;
Logger.error({err}, 'KV health check failed');
return false;
}
}
private reportKVHealthMetrics(isConnected: boolean): void {
this.metricsService.gauge({
name: 'kv.connection.status',
value: isConnected ? 1 : 0,
});
if (this.kvErrorCount > 0) {
this.metricsService.counter({
name: 'kv.command.error',
value: this.kvErrorCount,
});
this.kvErrorCount = 0;
}
}
}

View File

@@ -0,0 +1,181 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {randomUUID} from 'node:crypto';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerService} from '@fluxer/api/src/middleware/ServiceRegistry';
import {addSpanEvent, setSpanAttributes, withSpan} from '@fluxer/api/src/telemetry/Tracing';
import type {HttpWorkerQueue} from '@fluxer/api/src/worker/HttpWorkerQueue';
import {HttpWorkerQueue as HttpWorkerQueueClass} from '@fluxer/api/src/worker/HttpWorkerQueue';
import type {IWorkerService} from '@fluxer/worker/src/contracts/IWorkerService';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {ms} from 'itty-time';
interface WorkerRunnerOptions {
tasks: Record<string, WorkerTaskHandler>;
workerId?: string;
taskTypes?: Array<string>;
concurrency?: number;
}
export class WorkerRunner {
private readonly tasks: Record<string, WorkerTaskHandler>;
private readonly workerId: string;
private readonly taskTypes: Array<string>;
private readonly concurrency: number;
private readonly queue: HttpWorkerQueue;
private readonly workerService: IWorkerService;
private running = false;
private abortController: AbortController | null = null;
constructor(options: WorkerRunnerOptions) {
this.tasks = options.tasks;
this.workerId = options.workerId ?? `worker-${randomUUID()}`;
this.taskTypes = options.taskTypes ?? Object.keys(options.tasks);
this.concurrency = options.concurrency ?? 1;
this.queue = new HttpWorkerQueueClass();
this.workerService = getWorkerService();
}
async start(): Promise<void> {
if (this.running) {
Logger.warn({workerId: this.workerId}, 'Worker already running');
return;
}
this.running = true;
this.abortController = new AbortController();
Logger.info({workerId: this.workerId, taskTypes: this.taskTypes, concurrency: this.concurrency}, 'Worker starting');
const workers = Array.from({length: this.concurrency}, (_, i) => this.workerLoop(i, this.abortController!.signal));
Promise.all(workers).catch((error) => {
Logger.error({workerId: this.workerId, error}, 'Worker loop failed unexpectedly');
});
}
async stop(): Promise<void> {
if (!this.running) {
return;
}
this.running = false;
this.abortController?.abort();
await new Promise((resolve) => setTimeout(resolve, ms('5 seconds')));
Logger.info({workerId: this.workerId}, 'Worker stopped');
}
private async workerLoop(workerIndex: number, signal: AbortSignal): Promise<void> {
Logger.info({workerId: this.workerId, workerIndex}, 'Worker loop started');
while (!signal.aborted) {
try {
const leasedJobs = await this.queue.dequeue(this.taskTypes, 1);
if (!leasedJobs || leasedJobs.length === 0) {
continue;
}
const leasedJob = leasedJobs[0]!;
const job = leasedJob.job;
Logger.info(
{
workerId: this.workerId,
workerIndex,
jobId: job.id,
taskType: job.task_type,
attempts: job.attempts,
receipt: leasedJob.receipt,
},
'Processing job',
);
const succeeded = await this.processJob(leasedJob);
if (succeeded) {
Logger.info({workerId: this.workerId, workerIndex, jobId: job.id}, 'Job completed successfully');
}
} catch (error) {
Logger.error({workerId: this.workerId, workerIndex, error}, 'Worker loop error');
await this.sleep(ms('1 second'));
}
}
Logger.info({workerId: this.workerId, workerIndex}, 'Worker loop stopped');
}
private async processJob(leasedJob: {
receipt: string;
job: {id: string; task_type: string; payload: unknown; attempts: number};
}): Promise<boolean> {
return await withSpan(
{
name: 'worker.process_job',
attributes: {
'worker.id': this.workerId,
'job.id': leasedJob.job.id,
'job.task_type': leasedJob.job.task_type,
'job.attempts': leasedJob.job.attempts,
},
},
async () => {
const task = this.tasks[leasedJob.job.task_type];
if (!task) {
throw new Error(`Unknown task: ${leasedJob.job.task_type}`);
}
addSpanEvent('job.execution.start');
try {
await task(leasedJob.job.payload as never, {
logger: Logger.child({jobId: leasedJob.job.id}),
addJob: this.workerService.addJob.bind(this.workerService),
});
addSpanEvent('job.execution.success');
setSpanAttributes({'job.status': 'success'});
await this.queue.complete(leasedJob.receipt);
return true;
} catch (error) {
Logger.error({jobId: leasedJob.job.id, error}, 'Job failed');
setSpanAttributes({
'job.status': 'failed',
'job.error': error instanceof Error ? error.message : String(error),
});
addSpanEvent('job.execution.failed', {
error: error instanceof Error ? error.message : String(error),
});
await this.queue.fail(leasedJob.receipt, String(error));
return false;
}
},
);
}
private async sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
}

View File

@@ -0,0 +1,79 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {Logger} from '@fluxer/api/src/Logger';
import {HttpWorkerQueue} from '@fluxer/api/src/worker/HttpWorkerQueue';
import type {IWorkerService} from '@fluxer/worker/src/contracts/IWorkerService';
import type {WorkerJobOptions, WorkerJobPayload} from '@fluxer/worker/src/contracts/WorkerTypes';
export class WorkerService implements IWorkerService {
private readonly queue: HttpWorkerQueue;
constructor() {
this.queue = new HttpWorkerQueue();
}
async addJob<TPayload extends WorkerJobPayload = WorkerJobPayload>(
taskType: string,
payload: TPayload,
options?: WorkerJobOptions,
): Promise<void> {
try {
await this.queue.enqueue(taskType, payload, {
...(options?.runAt !== undefined && {runAt: options.runAt}),
...(options?.maxAttempts !== undefined && {maxAttempts: options.maxAttempts}),
...(options?.priority !== undefined && {priority: options.priority}),
});
Logger.debug({taskType, payload}, 'Job queued successfully');
} catch (error) {
Logger.error({error, taskType, payload}, 'Failed to queue job');
throw error;
}
}
async cancelJob(jobId: string): Promise<boolean> {
try {
const cancelled = await this.queue.cancelJob(jobId);
if (cancelled) {
Logger.info({jobId}, 'Job cancelled successfully');
} else {
Logger.debug({jobId}, 'Job not found (may have already been processed)');
}
return cancelled;
} catch (error) {
Logger.error({error, jobId}, 'Failed to cancel job');
throw error;
}
}
async retryDeadLetterJob(jobId: string): Promise<boolean> {
try {
const retried = await this.queue.retryDeadLetterJob(jobId);
if (retried) {
Logger.info({jobId}, 'Dead letter job retried successfully');
} else {
Logger.debug({jobId}, 'Job not found in dead letter queue');
}
return retried;
} catch (error) {
Logger.error({error, jobId}, 'Failed to retry dead letter job');
throw error;
}
}
}

View File

@@ -0,0 +1,73 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import applicationProcessDeletion from '@fluxer/api/src/worker/tasks/ApplicationProcessDeletion';
import batchGuildAuditLogMessageDeletes from '@fluxer/api/src/worker/tasks/BatchGuildAuditLogMessageDeletes';
import bulkDeleteUserMessages from '@fluxer/api/src/worker/tasks/BulkDeleteUserMessages';
import cleanupCsamEvidence from '@fluxer/api/src/worker/tasks/CleanupCsamEvidence';
import csamScanConsumer from '@fluxer/api/src/worker/tasks/CsamScanConsumerWorker';
import deleteUserMessagesInGuildByTime from '@fluxer/api/src/worker/tasks/DeleteUserMessagesInGuildByTime';
import expireAttachments from '@fluxer/api/src/worker/tasks/ExpireAttachments';
import extractEmbeds from '@fluxer/api/src/worker/tasks/ExtractEmbeds';
import handleMentions from '@fluxer/api/src/worker/tasks/HandleMentions';
import harvestGuildData from '@fluxer/api/src/worker/tasks/HarvestGuildData';
import harvestUserData from '@fluxer/api/src/worker/tasks/HarvestUserData';
import indexChannelMessages from '@fluxer/api/src/worker/tasks/IndexChannelMessages';
import indexGuildMembers from '@fluxer/api/src/worker/tasks/IndexGuildMembers';
import messageShred from '@fluxer/api/src/worker/tasks/MessageShred';
import processAssetDeletionQueue from '@fluxer/api/src/worker/tasks/ProcessAssetDeletionQueue';
import processCloudflarePurgeQueue from '@fluxer/api/src/worker/tasks/ProcessCloudflarePurgeQueue';
import processInactivityDeletions from '@fluxer/api/src/worker/tasks/ProcessInactivityDeletions';
import processPendingBulkMessageDeletions from '@fluxer/api/src/worker/tasks/ProcessPendingBulkMessageDeletions';
import refreshSearchIndex from '@fluxer/api/src/worker/tasks/RefreshSearchIndex';
import revalidateUserConnections from '@fluxer/api/src/worker/tasks/RevalidateUserConnections';
import {sendScheduledMessage} from '@fluxer/api/src/worker/tasks/SendScheduledMessage';
import {sendSystemDm} from '@fluxer/api/src/worker/tasks/SendSystemDm';
import syncDiscoveryIndex from '@fluxer/api/src/worker/tasks/SyncDiscoveryIndex';
import userProcessPendingDeletion from '@fluxer/api/src/worker/tasks/UserProcessPendingDeletion';
import userProcessPendingDeletions from '@fluxer/api/src/worker/tasks/UserProcessPendingDeletions';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
export const workerTasks: Record<string, WorkerTaskHandler> = {
applicationProcessDeletion,
batchGuildAuditLogMessageDeletes,
bulkDeleteUserMessages,
csamScanConsumer,
deleteUserMessagesInGuildByTime,
expireAttachments,
extractEmbeds,
handleMentions,
harvestGuildData,
harvestUserData,
indexChannelMessages,
indexGuildMembers,
messageShred,
processAssetDeletionQueue,
cleanupCsamEvidence,
processCloudflarePurgeQueue,
processInactivityDeletions,
processPendingBulkMessageDeletions,
refreshSearchIndex,
revalidateUserConnections,
sendScheduledMessage,
sendSystemDm,
syncDiscoveryIndex,
userProcessPendingDeletion,
userProcessPendingDeletions,
};

View File

@@ -0,0 +1,152 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {MessageID, UserID} from '@fluxer/api/src/BrandedTypes';
import {createMessageID, createUserID} from '@fluxer/api/src/BrandedTypes';
import {SCHEDULED_MESSAGE_TTL_SECONDS} from '@fluxer/api/src/channel/services/ScheduledMessageService';
import {createRequestCache} from '@fluxer/api/src/middleware/RequestCacheMiddleware';
import {ScheduledMessageRepository} from '@fluxer/api/src/user/repositories/ScheduledMessageRepository';
import type {WorkerDependencies} from '@fluxer/api/src/worker/WorkerDependencies';
export interface WorkerLogger {
debug(message: string, extra?: object): void;
info(message: string, extra?: object): void;
warn(message: string, extra?: object): void;
error(message: string, extra?: object): void;
}
export interface SendScheduledMessageParams {
userId: string;
scheduledMessageId: string;
expectedScheduledAt: string;
}
export class ScheduledMessageExecutor {
constructor(
private readonly deps: WorkerDependencies,
private readonly logger: WorkerLogger,
private readonly scheduledMessageRepository: ScheduledMessageRepository = new ScheduledMessageRepository(),
) {}
async execute(params: SendScheduledMessageParams): Promise<void> {
const userId = this.parseUserID(params.userId);
const scheduledMessageId = this.parseMessageID(params.scheduledMessageId);
if (!userId || !scheduledMessageId) {
this.logger.warn('Malformed scheduled message job payload', {payload: params});
return;
}
const expectedScheduledAt = this.parseScheduledAt(params.expectedScheduledAt);
if (!expectedScheduledAt) {
this.logger.warn('Invalid expectedScheduledAt for scheduled message job', {payload: params});
return;
}
const scheduledMessage = await this.scheduledMessageRepository.getScheduledMessage(userId, scheduledMessageId);
if (!scheduledMessage) {
this.logger.info('Scheduled message not found, skipping', {userId, scheduledMessageId});
return;
}
if (scheduledMessage.status !== 'pending') {
this.logger.info('Scheduled message already processed', {
scheduledMessageId,
status: scheduledMessage.status,
});
return;
}
if (scheduledMessage.scheduledAt.toISOString() !== expectedScheduledAt.toISOString()) {
this.logger.info('Scheduled message time mismatch, skipping stale job', {
scheduledMessageId,
expected: expectedScheduledAt.toISOString(),
actual: scheduledMessage.scheduledAt.toISOString(),
});
return;
}
const user = await this.deps.userRepository.findUnique(userId);
if (!user) {
await this.markInvalid(userId, scheduledMessageId, 'User not found');
return;
}
const messageRequest = scheduledMessage.parseToMessageRequest();
const requestCache = createRequestCache();
try {
await this.deps.channelService.messages.validateMessageCanBeSent({
user,
channelId: scheduledMessage.channelId,
data: messageRequest,
});
await this.deps.channelService.messages.sendMessage({
user,
channelId: scheduledMessage.channelId,
data: messageRequest,
requestCache,
});
await this.scheduledMessageRepository.deleteScheduledMessage(userId, scheduledMessageId);
this.logger.info('Scheduled message sent successfully', {scheduledMessageId, userId});
} catch (error) {
const reason = error instanceof Error ? error.message : 'Failed to send scheduled message';
this.logger.warn('Marking scheduled message invalid', {scheduledMessageId, userId, reason});
await this.markInvalid(userId, scheduledMessageId, reason);
} finally {
requestCache.clear();
}
}
private async markInvalid(userId: UserID, scheduledMessageId: MessageID, reason: string): Promise<void> {
try {
await this.scheduledMessageRepository.markInvalid(
userId,
scheduledMessageId,
reason,
SCHEDULED_MESSAGE_TTL_SECONDS,
);
} catch (error) {
this.logger.error('Failed to mark scheduled message invalid', {error, scheduledMessageId});
}
}
private parseUserID(value: string): UserID | null {
try {
return createUserID(BigInt(value));
} catch {
return null;
}
}
private parseMessageID(value: string): MessageID | null {
try {
return createMessageID(BigInt(value));
} catch {
return null;
}
}
private parseScheduledAt(value: string): Date | null {
const date = new Date(value);
return Number.isNaN(date.getTime()) ? null : date;
}
}

View File

@@ -0,0 +1,199 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {type ChannelID, createGuildID, createUserID, type GuildID, type UserID} from '@fluxer/api/src/BrandedTypes';
import type {MessageRequest} from '@fluxer/api/src/channel/MessageTypes';
import {createRequestCache, type RequestCache} from '@fluxer/api/src/middleware/RequestCacheMiddleware';
import {getUserSearchService} from '@fluxer/api/src/SearchFactory';
import {collectSystemDmTargets, type SystemDmTargetFilters} from '@fluxer/api/src/system_dm/TargetFinder';
import {UserChannelService} from '@fluxer/api/src/user/services/UserChannelService';
import type {WorkerDependencies} from '@fluxer/api/src/worker/WorkerDependencies';
export interface WorkerLogger {
debug(message: string, extra?: object): void;
info(message: string, extra?: object): void;
warn(message: string, extra?: object): void;
error(message: string, extra?: object): void;
}
export interface SendSystemDmParams {
job_id: string;
}
export class SystemDmExecutor {
private readonly systemUserId = createUserID(0n);
private readonly userChannelService: UserChannelService;
constructor(
private readonly deps: WorkerDependencies,
private readonly logger: WorkerLogger,
) {
this.userChannelService = new UserChannelService(
this.deps.userRepository,
this.deps.userRepository,
this.deps.userRepository,
this.deps.channelService,
this.deps.channelRepository,
this.deps.gatewayService,
this.deps.mediaService,
this.deps.snowflakeService,
this.deps.userPermissionUtils,
this.deps.limitConfigService,
);
}
async execute(params: SendSystemDmParams): Promise<void> {
const jobId = this.parseJobId(params.job_id);
if (jobId === null) {
this.logger.warn('Invalid system DM job id', {payload: params});
return;
}
const job = await this.deps.systemDmJobRepository.getJob(jobId);
if (!job) {
this.logger.warn('System DM job missing', {jobId: jobId.toString()});
return;
}
if (job.status !== 'approved') {
this.logger.warn('Skipping system DM job in unexpected state', {jobId: jobId.toString(), status: job.status});
return;
}
await this.deps.systemDmJobRepository.patchJob(jobId, {status: 'running', updated_at: new Date()});
const userSearchService = getUserSearchService();
if (!userSearchService) {
await this.failJob(jobId, 'User search service unavailable');
return;
}
const systemUser = await this.deps.userRepository.findUnique(this.systemUserId);
if (!systemUser) {
await this.failJob(jobId, 'System user not found');
return;
}
const filters: SystemDmTargetFilters = {
...(job.registration_start != null && {registrationStart: job.registration_start}),
...(job.registration_end != null && {registrationEnd: job.registration_end}),
excludedGuildIds: this.convertExcludedGuildIds(job.excluded_guild_ids),
};
const recipients = await collectSystemDmTargets(
{userRepository: this.deps.userRepository, userSearchService},
filters,
);
if (recipients.length === 0) {
await this.completeJob(jobId, job.sent_count, job.failed_count, job.last_error);
return;
}
const requestCache = createRequestCache();
let sentCount = job.sent_count;
let failedCount = job.failed_count;
let lastError = job.last_error;
for (const recipientId of recipients) {
const channelId = await this.ensureDmChannel(recipientId, requestCache);
try {
const messageRequest: MessageRequest = {
content: job.content,
};
await this.deps.channelService.messages.sendMessage({
user: systemUser,
channelId,
data: messageRequest,
requestCache,
});
sentCount += 1;
} catch (error) {
failedCount += 1;
lastError = error instanceof Error ? error.message : 'Failed to send system DM';
this.logger.warn('System DM send failed', {jobId: jobId.toString(), error});
}
await this.deps.systemDmJobRepository.patchJob(jobId, {
sent_count: sentCount,
failed_count: failedCount,
last_error: lastError,
updated_at: new Date(),
});
}
await this.completeJob(jobId, sentCount, failedCount, lastError);
requestCache.clear();
}
private async ensureDmChannel(recipientId: UserID, requestCache: RequestCache): Promise<ChannelID> {
const channel = await this.userChannelService.ensureDmOpenForBothUsers({
userId: this.systemUserId,
recipientId,
userCacheService: this.deps.userCacheService,
requestCache,
});
return channel.id;
}
private convertExcludedGuildIds(value?: ReadonlySet<string>): Set<GuildID> {
const result = new Set<GuildID>();
if (!value) {
return result;
}
for (const id of value) {
try {
result.add(createGuildID(BigInt(id)));
} catch (error) {
this.logger.warn('Failed to convert excluded guild ID', {
guildId: id,
error: error instanceof Error ? error.message : String(error),
});
}
}
return result;
}
private async completeJob(jobId: bigint, sentCount: number, failedCount: number, lastError: string | null) {
await this.deps.systemDmJobRepository.patchJob(jobId, {
status: 'completed',
sent_count: sentCount,
failed_count: failedCount,
last_error: lastError,
updated_at: new Date(),
});
}
private async failJob(jobId: bigint, reason: string) {
await this.deps.systemDmJobRepository.patchJob(jobId, {
status: 'failed',
last_error: reason,
updated_at: new Date(),
});
}
private parseJobId(value: string): bigint | null {
try {
return BigInt(value);
} catch {
return null;
}
}
}

View File

@@ -0,0 +1,88 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {MessageID} from '@fluxer/api/src/BrandedTypes';
import {channelIdToUserId} from '@fluxer/api/src/BrandedTypes';
import type {GatewayDispatchEvent} from '@fluxer/api/src/constants/Gateway';
import type {IGatewayService} from '@fluxer/api/src/infrastructure/IGatewayService';
import type {Channel} from '@fluxer/api/src/models/Channel';
import {ChannelTypes} from '@fluxer/constants/src/ChannelConstants';
interface ChannelEventDispatcherDeps {
gatewayService: IGatewayService;
}
export class ChannelEventDispatcher {
constructor(private readonly deps: ChannelEventDispatcherDeps) {}
async dispatchToChannel(channel: Channel, event: GatewayDispatchEvent, data: unknown): Promise<void> {
if (channel.type === ChannelTypes.DM_PERSONAL_NOTES) {
return this.deps.gatewayService.dispatchPresence({
userId: channelIdToUserId(channel.id),
event,
data,
});
}
if (channel.guildId) {
return this.deps.gatewayService.dispatchGuild({
guildId: channel.guildId,
event,
data,
});
}
for (const recipientId of channel.recipientIds) {
await this.deps.gatewayService.dispatchPresence({
userId: recipientId,
event,
data,
});
}
}
async dispatchBulkDelete(channel: Channel, messageIds: Array<MessageID>): Promise<void> {
if (messageIds.length === 0) {
return;
}
await this.dispatchToChannel(channel, 'MESSAGE_DELETE_BULK', {
channel_id: channel.id.toString(),
ids: messageIds.map((id) => id.toString()),
});
}
async dispatchMessageUpdate(channel: Channel, messageData: unknown): Promise<void> {
await this.dispatchToChannel(channel, 'MESSAGE_UPDATE', messageData);
}
async dispatchMessageDelete(
channel: Channel,
messageId: MessageID,
content?: string,
authorId?: string,
): Promise<void> {
await this.dispatchToChannel(channel, 'MESSAGE_DELETE', {
channel_id: channel.id.toString(),
id: messageId.toString(),
content,
author_id: authorId,
});
}
}

View File

@@ -0,0 +1,154 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {ChannelID, MessageID, UserID} from '@fluxer/api/src/BrandedTypes';
import {createChannelID} from '@fluxer/api/src/BrandedTypes';
import type {IChannelRepository} from '@fluxer/api/src/channel/IChannelRepository';
import {purgeMessageAttachments} from '@fluxer/api/src/channel/services/message/MessageHelpers';
import type {IPurgeQueue} from '@fluxer/api/src/infrastructure/CloudflarePurgeQueue';
import type {IGatewayService} from '@fluxer/api/src/infrastructure/IGatewayService';
import type {IStorageService} from '@fluxer/api/src/infrastructure/IStorageService';
import {Logger} from '@fluxer/api/src/Logger';
import type {Message} from '@fluxer/api/src/models/Message';
import {ChannelEventDispatcher} from '@fluxer/api/src/worker/services/ChannelEventDispatcher';
import {chunkArray} from '@fluxer/api/src/worker/tasks/utils/MessageDeletion';
import {snowflakeToDate} from '@fluxer/snowflake/src/Snowflake';
interface MessageDeletionServiceDeps {
channelRepository: IChannelRepository;
gatewayService: IGatewayService;
storageService: IStorageService;
purgeQueue: IPurgeQueue;
}
interface BulkDeleteOptions {
beforeTimestamp?: number;
onProgress?: (deleted: number) => void;
}
interface MessageWithChannel {
channelId: ChannelID;
messageId: MessageID;
message: Message;
}
export class MessageDeletionService {
private readonly eventDispatcher: ChannelEventDispatcher;
private readonly FETCH_BATCH_SIZE = 100;
private readonly DELETE_BATCH_SIZE = 100;
constructor(private readonly deps: MessageDeletionServiceDeps) {
this.eventDispatcher = new ChannelEventDispatcher({gatewayService: deps.gatewayService});
}
async deleteUserMessagesBulk(userId: UserID, options: BulkDeleteOptions = {}): Promise<number> {
const {beforeTimestamp = Number.POSITIVE_INFINITY, onProgress} = options;
Logger.debug({userId, beforeTimestamp}, 'Starting bulk user message deletion');
const messagesByChannel = await this.collectUserMessages(userId, beforeTimestamp);
let totalDeleted = 0;
for (const [channelIdStr, messages] of messagesByChannel.entries()) {
const deleted = await this.deleteMessagesInChannel(channelIdStr, messages);
totalDeleted += deleted;
onProgress?.(totalDeleted);
}
Logger.debug({userId, totalDeleted}, 'Bulk user message deletion completed');
return totalDeleted;
}
private async collectUserMessages(
userId: UserID,
beforeTimestamp: number,
): Promise<Map<string, Array<MessageWithChannel>>> {
const messagesByChannel = new Map<string, Array<MessageWithChannel>>();
let lastMessageId: MessageID | undefined;
while (true) {
const messageRefs = await this.deps.channelRepository.listMessagesByAuthor(
userId,
this.FETCH_BATCH_SIZE,
lastMessageId,
);
if (messageRefs.length === 0) {
break;
}
for (const {channelId, messageId} of messageRefs) {
const messageTimestamp = snowflakeToDate(messageId).getTime();
if (messageTimestamp > beforeTimestamp) {
continue;
}
const message = await this.deps.channelRepository.getMessage(channelId, messageId);
if (message && message.authorId === userId) {
const channelIdStr = channelId.toString();
if (!messagesByChannel.has(channelIdStr)) {
messagesByChannel.set(channelIdStr, []);
}
messagesByChannel.get(channelIdStr)!.push({channelId, messageId, message});
}
}
lastMessageId = messageRefs[messageRefs.length - 1]!.messageId;
}
return messagesByChannel;
}
private async deleteMessagesInChannel(channelIdStr: string, messages: Array<MessageWithChannel>): Promise<number> {
if (messages.length === 0) {
return 0;
}
const channelId = createChannelID(BigInt(channelIdStr));
const channel = await this.deps.channelRepository.findUnique(channelId);
if (!channel) {
Logger.debug({channelId: channelIdStr}, 'Channel not found, skipping messages');
return 0;
}
let deleted = 0;
const batches = chunkArray(messages, this.DELETE_BATCH_SIZE);
for (const batch of batches) {
const messageIds = batch.map((m: MessageWithChannel) => m.messageId);
const messageObjects = batch.map((m: MessageWithChannel) => m.message);
await Promise.all(
messageObjects.map((message: Message) =>
purgeMessageAttachments(message, this.deps.storageService, this.deps.purgeQueue),
),
);
await this.deps.channelRepository.bulkDeleteMessages(channelId, messageIds);
await this.eventDispatcher.dispatchBulkDelete(channel, messageIds);
deleted += batch.length;
}
return deleted;
}
}

View File

@@ -0,0 +1,162 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {applicationIdToUserId, createApplicationID, type GuildID} from '@fluxer/api/src/BrandedTypes';
import {mapGuildMemberToResponse} from '@fluxer/api/src/guild/GuildModel';
import {Logger} from '@fluxer/api/src/Logger';
import {createRequestCache} from '@fluxer/api/src/middleware/RequestCacheMiddleware';
import {remapAuthorMessagesToDeletedUser} from '@fluxer/api/src/oauth/ApplicationMessageAuthorAnonymization';
import {chunkArray} from '@fluxer/api/src/worker/tasks/utils/MessageDeletion';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {
DELETED_USER_DISCRIMINATOR,
DELETED_USER_GLOBAL_NAME,
DELETED_USER_USERNAME,
UserFlags,
} from '@fluxer/constants/src/UserConstants';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
applicationId: z.string(),
});
const CHUNK_SIZE = 50;
const applicationProcessDeletion: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing applicationProcessDeletion task');
const applicationId = createApplicationID(BigInt(validated.applicationId));
const botUserId = applicationIdToUserId(applicationId);
const {
userRepository,
guildRepository,
channelRepository,
applicationRepository,
userCacheService,
gatewayService,
snowflakeService,
} = getWorkerDependencies();
Logger.debug({applicationId, botUserId}, 'Starting application deletion');
try {
const application = await applicationRepository.getApplication(applicationId);
if (!application) {
Logger.warn({applicationId}, 'Application not found, skipping deletion (already deleted)');
return;
}
const botUser = await userRepository.findUniqueAssert(botUserId);
const replacementAuthorId = await remapAuthorMessagesToDeletedUser({
originalAuthorId: botUserId,
channelRepository,
userRepository,
snowflakeService,
});
if (botUser.flags & UserFlags.DELETED) {
Logger.info(
{
applicationId,
botUserId,
replacementAuthorId: replacementAuthorId?.toString() ?? null,
},
'Bot user already marked as deleted, skipping profile update',
);
await applicationRepository.deleteApplication(applicationId);
return;
}
const updatedBotUser = await userRepository.patchUpsert(
botUserId,
{
username: DELETED_USER_USERNAME,
global_name: DELETED_USER_GLOBAL_NAME,
discriminator: DELETED_USER_DISCRIMINATOR,
flags: botUser.flags | UserFlags.DELETED,
},
botUser.toRow(),
);
await userCacheService.setUserPartialResponseFromUser(updatedBotUser);
Logger.debug({applicationId, botUserId}, 'Updated bot user to deleted state');
const guildIds = await userRepository.getUserGuildIds(botUserId);
Logger.debug({applicationId, botUserId, guildCount: guildIds.length}, 'Found guilds bot is member of');
const chunks = chunkArray(guildIds, CHUNK_SIZE);
let processedGuilds = 0;
for (const chunk of chunks) {
await Promise.all(
chunk.map(async (guildId: GuildID) => {
try {
const member = await guildRepository.getMember(guildId, botUserId);
if (!member) {
Logger.debug({botUserId, guildId}, 'Member not found in guild, skipping');
return;
}
const requestCache = createRequestCache();
const botMemberResponse = await mapGuildMemberToResponse(member, userCacheService, requestCache);
await gatewayService.dispatchGuild({
guildId,
event: 'GUILD_MEMBER_UPDATE',
data: {
guild_id: guildId.toString(),
...botMemberResponse,
},
});
Logger.debug({botUserId, guildId}, 'Dispatched GUILD_MEMBER_UPDATE for bot');
} catch (error) {
Logger.error({error, botUserId, guildId}, 'Failed to dispatch guild member update');
}
}),
);
processedGuilds += chunk.length;
if (processedGuilds < guildIds.length) {
await new Promise((resolve) => setTimeout(resolve, 100));
}
Logger.info(
{applicationId, botUserId, processedGuilds, totalGuilds: guildIds.length},
'Application deletion: dispatched guild updates',
);
}
Logger.debug({applicationId, botUserId, totalGuilds: guildIds.length}, 'Completed guild member updates');
Logger.debug({applicationId}, 'Deleting application from database');
await applicationRepository.deleteApplication(applicationId);
Logger.info({applicationId, botUserId, guildCount: guildIds.length}, 'Application deletion completed successfully');
} catch (error) {
Logger.error({error, applicationId, botUserId}, 'Failed to delete application');
throw error;
}
};
export default applicationProcessDeletion;

View File

@@ -0,0 +1,60 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createGuildID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
guildId: z.string(),
});
const BATCH_LIMIT = 250;
const batchGuildAuditLogMessageDeletes: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing batchGuildAuditLogMessageDeletes task');
const guildId = createGuildID(BigInt(validated.guildId));
const {guildAuditLogService} = getWorkerDependencies();
try {
const result = await guildAuditLogService.batchRecentMessageDeleteLogs(guildId, BATCH_LIMIT);
if (result.deletedLogIds.length > 0) {
Logger.info(
{
guildId: guildId.toString(),
deletedCount: result.deletedLogIds.length,
createdCount: result.createdLogs.length,
},
'Batched consecutive message delete audit logs',
);
} else {
Logger.debug({guildId: guildId.toString()}, 'No consecutive message delete audit logs found to batch');
}
} catch (error) {
Logger.error({error, guildId: guildId.toString()}, 'Failed to batch guild audit log message deletes');
throw error;
}
};
export default batchGuildAuditLogMessageDeletes;

View File

@@ -0,0 +1,125 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createUserID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {withSpan} from '@fluxer/api/src/telemetry/Tracing';
import {MessageDeletionService} from '@fluxer/api/src/worker/services/MessageDeletionService';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {recordCounter, recordHistogram} from '@fluxer/telemetry/src/Metrics';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
userId: z.string(),
scheduledAt: z.number().optional(),
});
const bulkDeleteUserMessages: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing bulkDeleteUserMessages task');
const userId = createUserID(BigInt(validated.userId));
const userIdStr = validated.userId;
const start = Date.now();
return await withSpan(
{
name: 'fluxer.worker.bulk_delete_user_messages',
attributes: {user_id: userIdStr},
},
async () => {
try {
const scheduledAtMs = validated.scheduledAt ?? Number.POSITIVE_INFINITY;
const {channelRepository, gatewayService, userRepository, storageService, purgeQueue} = getWorkerDependencies();
const user = await userRepository.findUniqueAssert(userId);
if (!user.pendingBulkMessageDeletionAt) {
Logger.debug({userId}, 'User has no pending bulk message deletion, skipping (already completed)');
return;
}
const deletionService = new MessageDeletionService({
channelRepository,
gatewayService,
storageService,
purgeQueue,
});
const totalDeleted = await deletionService.deleteUserMessagesBulk(userId, {
beforeTimestamp: scheduledAtMs,
onProgress: (deleted) => helpers.logger.debug(`Deleted ${deleted} messages so far`),
});
await userRepository.patchUpsert(
userId,
{
pending_bulk_message_deletion_at: null,
pending_bulk_message_deletion_channel_count: null,
pending_bulk_message_deletion_message_count: null,
},
user.toRow(),
);
const duration = Date.now() - start;
recordCounter({
name: 'fluxer.worker.messages.bulk_deleted',
dimensions: {
status: 'success',
user_id: userIdStr,
},
});
recordHistogram({
name: 'fluxer.worker.bulk_delete.duration',
valueMs: duration,
dimensions: {
user_id: userIdStr,
},
});
recordHistogram({
name: 'fluxer.worker.bulk_delete.count',
valueMs: totalDeleted,
dimensions: {
user_id: userIdStr,
},
});
Logger.debug({userId, totalDeleted}, 'Bulk message deletion completed');
} catch (error) {
recordCounter({
name: 'fluxer.worker.messages.bulk_deleted',
dimensions: {
status: 'error',
user_id: userIdStr,
error_type: error instanceof Error ? error.name : 'unknown',
},
});
throw error;
}
},
);
};
export default bulkDeleteUserMessages;

View File

@@ -0,0 +1,34 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
const cleanupCsamEvidence: WorkerTaskHandler = async () => {
const dependencies = getWorkerDependencies();
try {
await dependencies.csamEvidenceRetentionService.cleanupExpired();
} catch (error) {
Logger.error({error}, 'CSAM evidence retention cleanup failed');
throw error;
}
};
export default cleanupCsamEvidence;

View File

@@ -0,0 +1,126 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {Config} from '@fluxer/api/src/Config';
import type {CsamScanQueueEntry, CsamScanResultMessage, PhotoDnaMatchResult} from '@fluxer/api/src/csam/CsamTypes';
import {PhotoDnaMatchService} from '@fluxer/api/src/csam/PhotoDnaMatchService';
import {Logger} from '@fluxer/api/src/Logger';
import {recordCsamQueueDepth, recordCsamQueueProcessed} from '@fluxer/api/src/telemetry/CsamTelemetry';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
const LOCK_KEY = 'csam:scan:consumer:lock';
const LOCK_TTL_SECONDS = 5;
const QUEUE_KEY = 'csam:scan:queue';
const BATCH_SIZE = 5;
const csamScanConsumer: WorkerTaskHandler = async (_payload, _helpers) => {
const deps = getWorkerDependencies();
const kvProvider = deps.kvClient;
const cacheService = deps.cacheService;
const lockToken = await cacheService.acquireLock(LOCK_KEY, LOCK_TTL_SECONDS);
if (!lockToken) {
return;
}
const entries: Array<CsamScanQueueEntry> = [];
try {
const rawEntries = await kvProvider.lpop(QUEUE_KEY, BATCH_SIZE);
for (const raw of rawEntries) {
try {
entries.push(JSON.parse(raw) as CsamScanQueueEntry);
} catch {
Logger.warn({raw}, 'Failed to parse CSAM scan queue entry');
}
}
if (entries.length === 0) {
return;
}
const queueDepth = await kvProvider.llen(QUEUE_KEY);
recordCsamQueueDepth({depth: queueDepth});
if (!Config.photoDna.enabled) {
for (const entry of entries) {
const result: CsamScanResultMessage = {isMatch: false};
await kvProvider.publish(`csam:result:${entry.requestId}`, JSON.stringify(result));
}
recordCsamQueueProcessed({status: 'success', batchSize: entries.length});
return;
}
const hashToRequest = new Map<string, {requestId: string; entry: CsamScanQueueEntry}>();
const allHashes: Array<string> = [];
for (const entry of entries) {
for (const hash of entry.hashes) {
hashToRequest.set(hash, {requestId: entry.requestId, entry});
allHashes.push(hash);
}
}
if (allHashes.length === 0) {
for (const entry of entries) {
const result: CsamScanResultMessage = {isMatch: false};
await kvProvider.publish(`csam:result:${entry.requestId}`, JSON.stringify(result));
}
recordCsamQueueProcessed({status: 'success', batchSize: entries.length});
return;
}
let matchResult: PhotoDnaMatchResult;
try {
const matchService = new PhotoDnaMatchService();
matchResult = await matchService.matchHashes(allHashes);
} catch (error) {
Logger.error({error}, 'PhotoDNA match service failed');
for (const entry of entries) {
const result: CsamScanResultMessage = {
isMatch: false,
error: 'PhotoDNA service error',
};
await kvProvider.publish(`csam:result:${entry.requestId}`, JSON.stringify(result));
}
recordCsamQueueProcessed({status: 'error', batchSize: entries.length});
return;
}
for (const entry of entries) {
const result: CsamScanResultMessage = {
isMatch: matchResult.isMatch,
matchResult: matchResult.isMatch ? matchResult : undefined,
};
await kvProvider.publish(`csam:result:${entry.requestId}`, JSON.stringify(result));
}
recordCsamQueueProcessed({status: 'success', batchSize: entries.length});
} catch (error) {
Logger.error({error}, 'CSAM scan consumer failed');
const entriesCount = entries.length;
if (entriesCount > 0) {
recordCsamQueueProcessed({status: 'error', batchSize: entriesCount});
}
throw error;
} finally {
await cacheService.releaseLock(LOCK_KEY, lockToken);
}
};
export default csamScanConsumer;

View File

@@ -0,0 +1,62 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createGuildID, createUserID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
guildId: z.string(),
userId: z.string(),
days: z.number().min(0).max(7),
});
const deleteUserMessagesInGuildByTime: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing deleteUserMessagesInGuildByTime task');
const guildId = createGuildID(BigInt(validated.guildId));
const userId = createUserID(BigInt(validated.userId));
const {days} = validated;
Logger.debug(
{guildId: guildId.toString(), userId: userId.toString(), days},
'Starting time-based message deletion for guild ban',
);
try {
const {channelService} = getWorkerDependencies();
await channelService.deleteUserMessagesInGuild({guildId, userId, days});
Logger.debug(
{guildId: guildId.toString(), userId: userId.toString(), days},
'Time-based message deletion completed successfully',
);
} catch (error) {
Logger.error(
{guildId: guildId.toString(), userId: userId.toString(), days, error},
'Failed to delete user messages in guild',
);
throw error;
}
};
export default deleteUserMessagesInGuildByTime;

View File

@@ -0,0 +1,127 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {AttachmentDecayRepository} from '@fluxer/api/src/attachment/AttachmentDecayRepository';
import {Config} from '@fluxer/api/src/Config';
import {makeAttachmentCdnKey, makeAttachmentCdnUrl} from '@fluxer/api/src/channel/services/message/MessageHelpers';
import {getMetricsService} from '@fluxer/api/src/infrastructure/MetricsService';
import {Logger} from '@fluxer/api/src/Logger';
import {getExpiryBucket} from '@fluxer/api/src/utils/AttachmentDecay';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
const BUCKET_LOOKBACK_DAYS = 3;
const FETCH_LIMIT = 200;
export async function processExpiredAttachments(now = new Date()): Promise<void> {
if (!Config.attachmentDecayEnabled) {
Logger.info('Attachment decay disabled; skipping expireAttachments task');
return;
}
const {assetDeletionQueue} = getWorkerDependencies();
const repo = new AttachmentDecayRepository();
const metrics = getMetricsService();
let totalQueued = 0;
let totalDeletedRows = 0;
for (let offset = 0; offset <= BUCKET_LOOKBACK_DAYS; offset++) {
const bucketDate = new Date(Date.UTC(now.getUTCFullYear(), now.getUTCMonth(), now.getUTCDate() - offset));
const bucket = getExpiryBucket(bucketDate);
while (true) {
const expired = await repo.fetchExpiredByBucket(bucket, now, FETCH_LIMIT);
if (expired.length === 0) break;
for (const row of expired) {
const metadata = await repo.fetchById(row.attachment_id);
if (!metadata) {
await repo.deleteRecords({
expiry_bucket: row.expiry_bucket,
expires_at: row.expires_at,
attachment_id: row.attachment_id,
});
totalDeletedRows++;
continue;
}
if (metadata.expires_at > row.expires_at) {
await repo.deleteRecords({
expiry_bucket: row.expiry_bucket,
expires_at: row.expires_at,
attachment_id: row.attachment_id,
});
totalDeletedRows++;
continue;
}
const s3Key = makeAttachmentCdnKey(metadata.channel_id, metadata.attachment_id, metadata.filename);
const cdnUrl = makeAttachmentCdnUrl(metadata.channel_id, metadata.attachment_id, metadata.filename);
await assetDeletionQueue.queueDeletion({
s3Key,
cdnUrl,
reason: 'attachment-decay-expired',
});
await repo.deleteRecords({
expiry_bucket: row.expiry_bucket,
expires_at: row.expires_at,
attachment_id: row.attachment_id,
});
metrics.counter({
name: 'attachment.expired',
dimensions: {
channel_id: metadata.channel_id.toString(),
action: 'expiry',
},
});
metrics.counter({
name: 'attachment.storage.bytes',
dimensions: {
channel_id: metadata.channel_id.toString(),
action: 'expiry',
},
value: -Number(metadata.size_bytes),
});
totalQueued++;
totalDeletedRows++;
}
}
}
Logger.info(
{
queuedForDeletion: totalQueued,
expiryRowsRemoved: totalDeletedRows,
lookbackDays: BUCKET_LOOKBACK_DAYS,
},
'Processed attachment decay expiry buckets',
);
}
const expireAttachments: WorkerTaskHandler = async () => {
await processExpiredAttachments();
};
export default expireAttachments;

View File

@@ -0,0 +1,391 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {ChannelID, GuildID, MessageID} from '@fluxer/api/src/BrandedTypes';
import {createChannelID, createGuildID, createMessageID} from '@fluxer/api/src/BrandedTypes';
import type {ChannelRepository} from '@fluxer/api/src/channel/ChannelRepository';
import {mapMessageToResponse} from '@fluxer/api/src/channel/MessageMappers';
import type {MessageEmbed, MessageEmbedChild} from '@fluxer/api/src/database/types/MessageTypes';
import type {EmbedService} from '@fluxer/api/src/infrastructure/EmbedService';
import type {IGatewayService} from '@fluxer/api/src/infrastructure/IGatewayService';
import type {IMediaService} from '@fluxer/api/src/infrastructure/IMediaService';
import type {UserCacheService} from '@fluxer/api/src/infrastructure/UserCacheService';
import {Logger} from '@fluxer/api/src/Logger';
import {createRequestCache} from '@fluxer/api/src/middleware/RequestCacheMiddleware';
import type {Channel} from '@fluxer/api/src/models/Channel';
import {Embed} from '@fluxer/api/src/models/Embed';
import {Message} from '@fluxer/api/src/models/Message';
import * as UnfurlerUtils from '@fluxer/api/src/utils/UnfurlerUtils';
import {ChannelEventDispatcher} from '@fluxer/api/src/worker/services/ChannelEventDispatcher';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {ICacheService} from '@fluxer/cache/src/ICacheService';
import {MessageFlags} from '@fluxer/constants/src/ChannelConstants';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
channelId: z.string(),
messageId: z.string(),
guildId: z.string().nullable().optional(),
isNSFWAllowed: z.boolean().optional(),
});
interface NormalizedEmbedAuthor {
name: string | null;
url: string | null;
icon_url: string | null;
}
interface NormalizedEmbedField {
name: string | null;
value: string | null;
inline: boolean;
}
interface NormalizedEmbedMedia {
url: string | null;
content_type: string | null;
content_hash: string | null;
width: number | null;
height: number | null;
description: string | null;
placeholder: string | null;
duration: number | null;
flags: number;
}
interface NormalizedEmbedChild {
type: string | null;
title: string | null;
description: string | null;
url: string | null;
timestamp: string | null;
color: number | null;
author: NormalizedEmbedAuthor | null;
provider: NormalizedEmbedAuthor | null;
thumbnail: NormalizedEmbedMedia | null;
image: NormalizedEmbedMedia | null;
video: NormalizedEmbedMedia | null;
footer: {text: string | null; icon_url: string | null} | null;
fields: Array<NormalizedEmbedField>;
nsfw: boolean | null;
}
interface NormalizedEmbed extends NormalizedEmbedChild {
children: Array<NormalizedEmbedChild>;
}
function normalizeEmbedAuthor(
author?: MessageEmbed['author'] | MessageEmbed['provider'],
): NormalizedEmbedAuthor | null {
if (!author) {
return null;
}
const iconUrl = 'icon_url' in author ? (author.icon_url ?? null) : null;
return {
name: author.name ?? null,
url: author.url ?? null,
icon_url: iconUrl,
};
}
function normalizeEmbedMedia(media?: MessageEmbed['image']): NormalizedEmbedMedia | null {
if (!media) {
return null;
}
return {
url: media.url ?? null,
content_type: media.content_type ?? null,
content_hash: media.content_hash ?? null,
width: media.width ?? null,
height: media.height ?? null,
description: media.description ?? null,
placeholder: media.placeholder ?? null,
duration: media.duration ?? null,
flags: media.flags ?? 0,
};
}
function normalizeEmbedChildForComparison(embed: MessageEmbed | MessageEmbedChild): NormalizedEmbedChild {
return {
type: embed.type ?? null,
title: embed.title ?? null,
description: embed.description ?? null,
url: embed.url ?? null,
timestamp: embed.timestamp ? new Date(embed.timestamp).toISOString() : null,
color: embed.color ?? null,
author: normalizeEmbedAuthor(embed.author),
provider: normalizeEmbedAuthor(embed.provider),
thumbnail: normalizeEmbedMedia(embed.thumbnail ?? undefined),
image: normalizeEmbedMedia(embed.image ?? undefined),
video: normalizeEmbedMedia(embed.video ?? undefined),
footer: embed.footer
? {
text: embed.footer.text ?? null,
icon_url: embed.footer.icon_url ?? null,
}
: null,
fields: (embed.fields ?? []).map((field) => ({
name: field.name ?? null,
value: field.value ?? null,
inline: field.inline ?? false,
})),
nsfw: embed.nsfw ?? null,
};
}
function normalizeEmbedForComparison(embed: MessageEmbed): NormalizedEmbed {
return {
...normalizeEmbedChildForComparison(embed),
children: (embed.children ?? []).map((child) => normalizeEmbedChildForComparison(child)),
};
}
function areEmbedsEquivalent(existingEmbeds: Array<MessageEmbed>, newEmbeds: Array<MessageEmbed>): boolean {
if (existingEmbeds.length !== newEmbeds.length) {
return false;
}
const normalizedExistingEmbeds = existingEmbeds.map((embed) => normalizeEmbedForComparison(embed));
const normalizedNewEmbeds = newEmbeds.map((embed) => normalizeEmbedForComparison(embed));
return JSON.stringify(normalizedExistingEmbeds) === JSON.stringify(normalizedNewEmbeds);
}
async function partitionUrlsByCache(
urls: Array<string>,
cacheService: ICacheService,
): Promise<{cachedEmbedsByUrl: Map<string, Array<MessageEmbed>>; urlsToUnfurl: Array<string>}> {
const cachedEmbedsByUrl = new Map<string, Array<MessageEmbed>>();
const urlsToUnfurl: Array<string> = [];
for (const url of urls) {
const cacheKey = `url-embed:${url}`;
const cached = await cacheService.get<Array<MessageEmbed>>(cacheKey);
if (cached && cached.length > 0) {
cachedEmbedsByUrl.set(url, cached);
Logger.debug({url, embedCount: cached.length}, 'Using cached embed(s) for URL');
} else {
urlsToUnfurl.push(url);
}
}
return {cachedEmbedsByUrl, urlsToUnfurl};
}
async function unfurlUrls(
urlsToUnfurl: Array<string>,
embedService: EmbedService,
isNSFWAllowed: boolean,
): Promise<Map<string, Array<MessageEmbed>>> {
const unfurledEmbedsByUrl = new Map<string, Array<MessageEmbed>>();
await Promise.all(
urlsToUnfurl.map(async (url) => {
try {
const embeds = await embedService.processUrl(url, isNSFWAllowed);
if (embeds.length > 0) {
unfurledEmbedsByUrl.set(
url,
embeds.map((e) => e.toMessageEmbed()),
);
await embedService.cacheEmbeds(url, embeds);
}
} catch (error) {
Logger.error({error, url}, 'Failed to unfurl URL');
}
}),
);
return unfurledEmbedsByUrl;
}
function buildOrderedEmbeds(
urls: Array<string>,
cachedEmbedsByUrl: Map<string, Array<MessageEmbed>>,
unfurledEmbedsByUrl: Map<string, Array<MessageEmbed>>,
): Array<MessageEmbed> {
const allEmbedsByUrl = new Map([...cachedEmbedsByUrl, ...unfurledEmbedsByUrl]);
const orderedEmbeds: Array<MessageEmbed> = [];
for (const url of urls) {
const embeds = allEmbedsByUrl.get(url);
if (embeds) {
orderedEmbeds.push(...embeds);
}
}
return orderedEmbeds;
}
async function updateMessageEmbeds(
channelRepository: ChannelRepository,
channelId: ChannelID,
messageId: MessageID,
orderedEmbeds: Array<MessageEmbed>,
): Promise<Message | null> {
const freshMessage = await channelRepository.getMessage(channelId, messageId);
if (!freshMessage) {
Logger.debug({messageId}, 'Message no longer exists, skipping embed update');
return null;
}
const existingEmbeds = (freshMessage.embeds ?? []).map((embed) => embed.toMessageEmbed());
if (areEmbedsEquivalent(existingEmbeds, orderedEmbeds)) {
Logger.debug({messageId}, 'Embeds unchanged, skipping update');
return null;
}
const messageWithEmbeds = new Message({
...freshMessage.toRow(),
embeds: orderedEmbeds.length > 0 ? orderedEmbeds : null,
});
await channelRepository.updateEmbeds(messageWithEmbeds);
return channelRepository.getMessage(channelId, messageId);
}
interface DispatchEmbedUpdateParams {
latestMessage: Message;
orderedEmbeds: Array<MessageEmbed>;
channel: Channel;
guildId: GuildID | null;
userCacheService: UserCacheService;
mediaService: IMediaService;
gatewayService: IGatewayService;
}
async function dispatchEmbedUpdate({
latestMessage,
orderedEmbeds,
channel,
guildId,
userCacheService,
mediaService,
gatewayService,
}: DispatchEmbedUpdateParams): Promise<void> {
const requestCache = createRequestCache();
const embedObjects = orderedEmbeds.length > 0 ? orderedEmbeds.map((e) => new Embed(e)) : latestMessage.embeds;
const messageWithUpdatedEmbeds = new Message({
...latestMessage.toRow(),
embeds: embedObjects.map((e) => e.toMessageEmbed()),
});
const messageData = await mapMessageToResponse({
message: messageWithUpdatedEmbeds,
userCacheService,
requestCache,
mediaService,
});
const eventDispatcher = new ChannelEventDispatcher({gatewayService});
if (guildId && !channel.guildId) {
await gatewayService.dispatchGuild({
guildId,
event: 'MESSAGE_UPDATE',
data: messageData,
});
} else {
await eventDispatcher.dispatchMessageUpdate(channel, messageData);
}
Logger.debug({messageId: latestMessage.id.toString()}, 'Dispatched MESSAGE_UPDATE after embed processing');
}
const extractEmbeds: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing extractEmbeds task');
const {channelRepository, userCacheService, mediaService, gatewayService, embedService, cacheService} =
getWorkerDependencies();
const messageId = createMessageID(BigInt(validated.messageId));
const channelId = createChannelID(BigInt(validated.channelId));
const message = await channelRepository.getMessage(channelId, messageId);
if (!message || !message.content) {
Logger.debug({messageId}, 'Skipping extractEmbeds: message not found or no content');
return;
}
const channel = await channelRepository.findUnique(channelId);
if (!channel) {
Logger.debug({channelId}, 'Skipping extractEmbeds: channel not found');
return;
}
const guildId =
validated.guildId && validated.guildId !== 'null' ? createGuildID(BigInt(validated.guildId)) : channel.guildId;
const urls = UnfurlerUtils.extractURLs(message.content);
if (urls.length === 0) {
Logger.debug({messageId}, 'Skipping extractEmbeds: no URLs found');
return;
}
const {cachedEmbedsByUrl, urlsToUnfurl} = await partitionUrlsByCache(urls, cacheService);
if (urlsToUnfurl.length === 0) {
Logger.debug({messageId}, 'Skipping extractEmbeds: all URLs already cached');
return;
}
try {
const isNSFWAllowed = validated.isNSFWAllowed ?? false;
const unfurledEmbedsByUrl = await unfurlUrls(urlsToUnfurl, embedService, isNSFWAllowed);
if (unfurledEmbedsByUrl.size === 0) {
Logger.debug({messageId: messageId.toString()}, 'No URLs were successfully unfurled');
return;
}
const orderedEmbeds = buildOrderedEmbeds(urls, cachedEmbedsByUrl, unfurledEmbedsByUrl);
const latestMessage = await updateMessageEmbeds(channelRepository, channelId, messageId, orderedEmbeds);
if (!latestMessage) {
return;
}
if (!(message.flags & MessageFlags.SUPPRESS_EMBEDS)) {
await dispatchEmbedUpdate({
latestMessage,
orderedEmbeds,
channel,
guildId,
userCacheService,
mediaService,
gatewayService,
});
} else {
Logger.debug({messageId: messageId.toString()}, 'Skipping MESSAGE_UPDATE dispatch due to SUPPRESS_EMBEDS flag');
}
Logger.debug(
{messageId: messageId.toString(), embedCount: unfurledEmbedsByUrl.size},
'Handled extractEmbeds successfully',
);
} catch (error) {
Logger.error({error, messageId: messageId.toString()}, 'Failed to process embeds');
throw error;
}
};
export default extractEmbeds;

View File

@@ -0,0 +1,160 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createChannelID, createGuildID, createMessageID, createUserID, type UserID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
channelId: z.string(),
messageId: z.string(),
authorId: z.string(),
guildId: z.string().optional(),
mentionHere: z.boolean().optional(),
});
const handleMentions: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing handleMentions task');
const {userRepository, channelRepository, readStateService, gatewayService} = getWorkerDependencies();
const authorId = createUserID(BigInt(validated.authorId));
const channelId = createChannelID(BigInt(validated.channelId));
const messageId = createMessageID(BigInt(validated.messageId));
const guildId = validated.guildId ? createGuildID(BigInt(validated.guildId)) : null;
const mentionHere = validated.mentionHere ?? false;
const message = await channelRepository.getMessage(channelId, messageId);
if (!message) {
Logger.debug({messageId}, 'handleMentions: Message not found, skipping');
return;
}
const channel = await channelRepository.findUnique(channelId);
if (!channel) {
Logger.debug({channelId}, 'handleMentions: Channel not found, skipping');
return;
}
let mentionedUserIds: Array<UserID>;
if (channel.guildId) {
const isEveryoneMention = message.mentionEveryone && !mentionHere;
const roleIds = Array.from(message.mentionedRoleIds);
const userIds = Array.from(message.mentionedUserIds);
if (isEveryoneMention || mentionHere) {
mentionedUserIds = await gatewayService.resolveAllMentions({
guildId: channel.guildId,
channelId,
authorId,
mentionEveryone: isEveryoneMention,
mentionHere,
roleIds,
userIds,
});
Logger.debug(
{
channelId,
guildId: channel.guildId,
mentionedCount: mentionedUserIds.length,
everyoneMention: isEveryoneMention,
hereMention: mentionHere,
roleCount: message.mentionedRoleIds.size,
userCount: message.mentionedUserIds.size,
},
'Resolved all mentions via combined RPC',
);
} else {
const [roleMentionedUserIds, userMentionedUserIds] = await Promise.all([
roleIds.length > 0
? gatewayService.getUsersToMentionByRoles({
guildId: channel.guildId,
channelId,
roleIds,
authorId,
})
: Promise.resolve([]),
userIds.length > 0
? gatewayService.getUsersToMentionByUserIds({
guildId: channel.guildId,
channelId,
userIds,
authorId,
})
: Promise.resolve([]),
]);
mentionedUserIds = [...roleMentionedUserIds, ...userMentionedUserIds];
Logger.debug(
{
channelId,
guildId: channel.guildId,
roleMentionedCount: roleMentionedUserIds.length,
userMentionedCount: userMentionedUserIds.length,
roleCount: message.mentionedRoleIds.size,
userCount: message.mentionedUserIds.size,
},
'Resolved role and user mentions via dedicated RPC methods',
);
}
} else {
mentionedUserIds = Array.from(message.mentionedUserIds).filter((userId) => userId !== authorId);
Logger.debug({channelId, userMentionCount: mentionedUserIds.length}, 'Handled DM user mentions');
}
const uniqueUserIds = Array.from(new Set(mentionedUserIds));
if (uniqueUserIds.length === 0) {
Logger.debug({channelId, guildId}, 'No users to mention, skipping read state updates');
return;
}
await readStateService.bulkIncrementMentionCounts(uniqueUserIds.map((userId) => ({userId, channelId})));
await Promise.all(uniqueUserIds.map((userId) => gatewayService.invalidatePushBadgeCount({userId})));
if (guildId != null) {
await userRepository.createRecentMentions(
uniqueUserIds.map((userId) => ({
user_id: userId,
channel_id: channelId,
message_id: messageId,
guild_id: guildId,
is_everyone: message.mentionEveryone,
is_role: message.mentionedRoleIds.size > 0,
})),
);
}
Logger.debug(
{
channelId,
guildId,
totalMentioned: uniqueUserIds.length,
everyoneMentions: message.mentionEveryone ? 1 : 0,
roleMentions: message.mentionedRoleIds.size,
userMentions: message.mentionedUserIds.size,
},
'Handled all mentions',
);
};
export default handleMentions;

View File

@@ -0,0 +1,308 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import fs from 'node:fs';
import os from 'node:os';
import path from 'node:path';
import {createGuildID, type MessageID} from '@fluxer/api/src/BrandedTypes';
import {Config} from '@fluxer/api/src/Config';
import {Logger} from '@fluxer/api/src/Logger';
import {
appendAssetToArchive,
buildHashedAssetKey,
buildSimpleAssetKey,
getAnimatedAssetExtension,
getEmojiExtension,
} from '@fluxer/api/src/worker/utils/AssetArchiveHelpers';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {ChannelTypes} from '@fluxer/constants/src/ChannelConstants';
import {snowflakeToDate} from '@fluxer/snowflake/src/Snowflake';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import archiver from 'archiver';
import {ms} from 'itty-time';
import {z} from 'zod';
const PayloadSchema = z.object({
guildId: z.string(),
archiveId: z.string(),
requestedBy: z.string(),
});
const MESSAGE_BATCH_SIZE = 100;
const MESSAGE_LIMIT_PER_CHANNEL = 1000;
const INITIAL_PROGRESS = 5;
const METADATA_PROGRESS = 25;
const MESSAGE_PROGRESS_MAX = 75;
const ZIP_PROGRESS = 90;
const COMPLETE_PROGRESS = 100;
const harvestGuildData: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload}, 'Processing harvestGuildData task');
const guildId = createGuildID(BigInt(validated.guildId));
const archiveId = BigInt(validated.archiveId);
const guildIdString = guildId.toString();
const {guildRepository, channelRepository, adminArchiveRepository, storageService} = getWorkerDependencies();
const adminArchive = await adminArchiveRepository.findBySubjectAndArchiveId('guild', guildId, archiveId);
if (!adminArchive) {
throw new Error('Admin archive record not found for guild');
}
const progress = {
markStarted: () => adminArchiveRepository.markAsStarted(adminArchive, 'Starting guild archive'),
updateProgress: (percent: number, step: string) =>
adminArchiveRepository.updateProgress(adminArchive, percent, step),
markCompleted: (storageKey: string, fileSize: bigint, expiresAt: Date) =>
adminArchiveRepository.markAsCompleted(adminArchive, storageKey, fileSize, expiresAt),
markFailed: (message: string) => adminArchiveRepository.markAsFailed(adminArchive, message),
};
try {
await progress.markStarted();
const guild = await guildRepository.findUnique(guildId);
if (!guild) {
throw new Error(`Guild ${guildId.toString()} not found`);
}
await progress.updateProgress(INITIAL_PROGRESS, 'Collecting guild metadata');
const [roles, members, channels, emojis, stickers] = await Promise.all([
guildRepository.listRoles(guildId),
guildRepository.listMembers(guildId),
channelRepository.channelData.listGuildChannels(guildId),
guildRepository.listEmojis(guildId),
guildRepository.listStickers(guildId),
]);
await progress.updateProgress(METADATA_PROGRESS, 'Harvesting channel messages');
const channelMessages: Record<string, Array<unknown>> = {};
let processedChannels = 0;
for (const channel of channels) {
if (channel.type !== ChannelTypes.GUILD_TEXT) {
continue;
}
const messagesForChannel: Array<{
id: string;
author_id: string;
timestamp: string;
content: string | null;
}> = [];
let beforeMessageId: MessageID | undefined;
while (messagesForChannel.length < MESSAGE_LIMIT_PER_CHANNEL) {
const batch = await channelRepository.listMessages(channel.id, beforeMessageId, MESSAGE_BATCH_SIZE);
if (batch.length === 0) break;
for (const message of batch) {
if (message.authorId == null) {
continue;
}
messagesForChannel.push({
id: message.id.toString(),
author_id: message.authorId.toString(),
timestamp: snowflakeToDate(message.id).toISOString(),
content: message.content ?? null,
});
}
beforeMessageId = batch[batch.length - 1]!.id;
}
channelMessages[channel.id.toString()] = messagesForChannel;
processedChannels++;
const progressPercent = Math.min(
METADATA_PROGRESS +
Math.floor((processedChannels / Math.max(channels.length, 1)) * (MESSAGE_PROGRESS_MAX - METADATA_PROGRESS)),
MESSAGE_PROGRESS_MAX,
);
await progress.updateProgress(progressPercent, `Harvested ${processedChannels}/${channels.length} channels`);
}
await progress.updateProgress(MESSAGE_PROGRESS_MAX, 'Downloading guild assets');
const payloadJson = {
guild: {
id: guild.id.toString(),
name: guild.name,
owner_id: guild.ownerId.toString(),
features: Array.from(guild.features),
verification_level: guild.verificationLevel,
default_message_notifications: guild.defaultMessageNotifications,
explicit_content_filter: guild.explicitContentFilter,
created_at: snowflakeToDate(guild.id).toISOString(),
},
roles: roles.map((role) => ({
id: role.id.toString(),
name: role.name,
color: role.color,
position: role.position,
permissions: role.permissions.toString(),
mentionable: role.isMentionable,
hoist: role.isHoisted,
})),
members: members.map((member) => ({
user_id: member.userId.toString(),
joined_at: member.joinedAt.toISOString(),
nickname: member.nickname,
role_ids: Array.from(member.roleIds).map((id) => id.toString()),
avatar_hash: member.avatarHash,
banner_hash: member.bannerHash,
})),
emojis: emojis.map((emoji) => ({
id: emoji.id.toString(),
name: emoji.name,
animated: emoji.isAnimated,
creator_id: emoji.creatorId.toString(),
})),
stickers: stickers.map((sticker) => ({
id: sticker.id.toString(),
name: sticker.name,
description: sticker.description,
animated: sticker.animated,
tags: sticker.tags,
creator_id: sticker.creatorId.toString(),
})),
channels: channels.map((channel) => ({
id: channel.id.toString(),
name: channel.name,
type: channel.type,
parent_id: channel.parentId?.toString() ?? null,
topic: channel.topic,
nsfw: channel.isNsfw,
position: channel.position,
last_message_id: channel.lastMessageId?.toString() ?? null,
})),
};
const payloadBuffer = Buffer.from(JSON.stringify(payloadJson, null, 2), 'utf-8');
await progress.updateProgress(ZIP_PROGRESS, 'Creating archive');
const tempDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'fluxer-guild-archive-'));
const zipPath = path.join(tempDir, `guild-${guildId}.zip`);
let output: fs.WriteStream | null = null;
try {
output = fs.createWriteStream(zipPath);
const archive = archiver('zip', {zlib: {level: 9}});
archive.pipe(output);
archive.append(payloadBuffer, {name: 'guild.json'});
const guildAssetEntries = [
{hash: guild.iconHash, prefix: 'icons', fileName: 'icon'},
{hash: guild.bannerHash, prefix: 'banners', fileName: 'banner'},
{hash: guild.splashHash, prefix: 'splashes', fileName: 'splash'},
{hash: guild.embedSplashHash, prefix: 'embed-splashes', fileName: 'embed-splash'},
];
for (const entry of guildAssetEntries) {
if (!entry.hash) {
continue;
}
const assetExtension = getAnimatedAssetExtension(entry.hash);
const assetArchiveName = `assets/guild/${entry.fileName}.${assetExtension}`;
const assetStorageKey = buildHashedAssetKey(entry.prefix, guildIdString, entry.hash);
await appendAssetToArchive({
archive,
storageService,
storageKey: assetStorageKey,
archiveName: assetArchiveName,
label: `guild ${entry.fileName}`,
subjectId: guildIdString,
});
}
for (const emoji of emojis) {
const emojiId = emoji.id.toString();
const emojiArchiveName = `assets/guild/emojis/${emojiId}.${getEmojiExtension(emoji.isAnimated)}`;
const emojiStorageKey = buildSimpleAssetKey('emojis', emojiId);
await appendAssetToArchive({
archive,
storageService,
storageKey: emojiStorageKey,
archiveName: emojiArchiveName,
label: `emoji ${emojiId}`,
subjectId: guildIdString,
});
}
for (const sticker of stickers) {
const stickerId = sticker.id.toString();
const stickerExtension = sticker.animated ? 'gif' : 'png';
const stickerArchiveName = `assets/guild/stickers/${stickerId}.${stickerExtension}`;
const stickerStorageKey = buildSimpleAssetKey('stickers', stickerId);
await appendAssetToArchive({
archive,
storageService,
storageKey: stickerStorageKey,
archiveName: stickerArchiveName,
label: `sticker ${stickerId}`,
subjectId: guildIdString,
});
}
for (const [channelId, messages] of Object.entries(channelMessages)) {
archive.append(JSON.stringify(messages, null, 2), {name: `channels/${channelId}/messages.json`});
}
await archive.finalize();
await new Promise<void>((resolve, reject) => {
output!.on('close', resolve);
output!.on('error', reject);
});
const zipBuffer = await fs.promises.readFile(zipPath);
const expiresAt = new Date(Date.now() + ms('1 year'));
const storageKey = `archives/guilds/${guildId}/${archiveId}/guild-archive.zip`;
await storageService.uploadObject({
bucket: Config.s3.buckets.harvests,
key: storageKey,
body: zipBuffer,
contentType: 'application/zip',
expiresAt,
});
await progress.markCompleted(storageKey, BigInt(zipBuffer.length), expiresAt);
await progress.updateProgress(COMPLETE_PROGRESS, 'Completed');
} finally {
if (output && !output.destroyed) {
output.destroy();
}
await fs.promises.rm(tempDir, {recursive: true, force: true});
}
} catch (error) {
Logger.error({error, guildId, archiveId}, 'Failed to harvest guild data');
await progress.markFailed(error instanceof Error ? error.message : String(error));
throw error;
}
};
export default harvestGuildData;

View File

@@ -0,0 +1,840 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import fs from 'node:fs';
import os from 'node:os';
import path from 'node:path';
import type {AdminArchive} from '@fluxer/api/src/admin/models/AdminArchiveModel';
import {type ChannelID, createUserID, type GuildID, type MessageID, type UserID} from '@fluxer/api/src/BrandedTypes';
import {Config} from '@fluxer/api/src/Config';
import {makeAttachmentCdnUrl} from '@fluxer/api/src/channel/services/message/MessageHelpers';
import type {IStorageService} from '@fluxer/api/src/infrastructure/IStorageService';
import {Logger} from '@fluxer/api/src/Logger';
import type {Application} from '@fluxer/api/src/models/Application';
import type {AuthSession} from '@fluxer/api/src/models/AuthSession';
import type {Channel} from '@fluxer/api/src/models/Channel';
import type {FavoriteMeme} from '@fluxer/api/src/models/FavoriteMeme';
import type {GiftCode} from '@fluxer/api/src/models/GiftCode';
import type {Guild} from '@fluxer/api/src/models/Guild';
import type {GuildMember} from '@fluxer/api/src/models/GuildMember';
import type {MfaBackupCode} from '@fluxer/api/src/models/MfaBackupCode';
import type {Payment} from '@fluxer/api/src/models/Payment';
import type {PushSubscription} from '@fluxer/api/src/models/PushSubscription';
import type {Relationship} from '@fluxer/api/src/models/Relationship';
import type {SavedMessage} from '@fluxer/api/src/models/SavedMessage';
import type {User} from '@fluxer/api/src/models/User';
import type {UserGuildSettings} from '@fluxer/api/src/models/UserGuildSettings';
import type {UserSettings} from '@fluxer/api/src/models/UserSettings';
import type {WebAuthnCredential} from '@fluxer/api/src/models/WebAuthnCredential';
import {resolveSessionClientInfo} from '@fluxer/api/src/utils/UserAgentUtils';
import {
appendAssetToArchive,
buildHashedAssetKey,
getAnimatedAssetExtension,
} from '@fluxer/api/src/worker/utils/AssetArchiveHelpers';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {snowflakeToDate} from '@fluxer/snowflake/src/Snowflake';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import archiver from 'archiver';
import {ms} from 'itty-time';
import {z} from 'zod';
const PayloadSchema = z.object({
userId: z.string(),
harvestId: z.string(),
adminRequestedBy: z.string().optional(),
});
interface HarvestedMessage {
id: string;
timestamp: string;
content: string;
attachments: Array<string>;
}
interface ChannelHarvestResult {
channelId: string;
messageData: HarvestedMessage;
}
interface GuildMembershipEntry {
member: GuildMember | null;
guild: Guild | null;
guildId: GuildID;
}
interface HarvestMessageResult {
channelMessagesMap: Map<string, Array<HarvestedMessage>>;
totalMessages: number;
}
interface UserDataJsonParams {
user: User;
userId: UserID;
authSessions: Array<AuthSession>;
relationships: Array<Relationship>;
userNotes: Map<UserID, string>;
userSettings: UserSettings | null;
guildMemberships: Array<GuildMembershipEntry>;
guildSettings: Array<UserGuildSettings | null>;
savedMessages: Array<SavedMessage>;
privateChannels: Array<Channel>;
favoriteMemes: Array<FavoriteMeme>;
pushSubscriptions: Array<PushSubscription>;
webAuthnCredentials: Array<WebAuthnCredential>;
mfaBackupCodes: Array<MfaBackupCode>;
createdGiftCodes: Array<GiftCode>;
payments: Array<Payment>;
oauthClients: Array<Application>;
pinnedDms: Array<{channel_id: bigint; sort_order: number}>;
authorizedIps: Array<{ip: string}>;
activityData: {last_active_at: Date | null; last_active_ip: string | null};
}
interface ArchiveParams {
userId: UserID;
harvestId: bigint;
isAdminArchive: boolean;
userDataJsonBuffer: Buffer;
user: User;
channelMessagesMap: Map<string, Array<HarvestedMessage>>;
payments: Array<Payment>;
oauthClients: Array<Application>;
authorizedIps: Array<{ip: string}>;
activityData: {last_active_at: Date | null; last_active_ip: string | null};
storageService: IStorageService;
}
interface ArchiveResult {
zipBuffer: Buffer;
storageKey: string;
expiresAt: Date;
downloadUrl: string;
}
const CONCURRENT_MESSAGE_LIMIT = 10;
const INITIAL_PROGRESS = 5;
const MESSAGES_PROGRESS_MAX = 55;
const METADATA_PROGRESS = 60;
const ZIP_PROGRESS = 70;
const COMPLETE_PROGRESS = 100;
const ZIP_EXPIRY_MS = ms('7 days');
function mapPayment(payment: Payment) {
return {
checkout_session_id: payment.checkoutSessionId,
amount_cents: payment.amountCents,
currency: payment.currency,
status: payment.status,
subscription_id: payment.subscriptionId,
payment_intent_id: payment.paymentIntentId,
product_type: payment.productType,
is_gift: payment.isGift,
gift_code: payment.giftCode,
created_at: payment.createdAt.toISOString(),
completed_at: payment.completedAt?.toISOString() ?? null,
};
}
function mapOAuthApplication(app: Application) {
return {
application_id: app.applicationId.toString(),
name: app.name,
redirect_uris: Array.from(app.oauth2RedirectUris),
};
}
function mapSecurityData(params: {
authorizedIps: Array<{ip: string}>;
activityData: {last_active_at: Date | null; last_active_ip: string | null};
}) {
return {
authorized_ips: params.authorizedIps,
activity_tracking: {
last_active_at: params.activityData.last_active_at?.toISOString() ?? null,
last_active_ip: params.activityData.last_active_ip,
},
};
}
async function harvestMessages(
channelRepository: {
listMessagesByAuthor: (
userId: UserID,
limit: number,
) => Promise<Array<{channelId: ChannelID; messageId: MessageID}>>;
getMessage: (
channelId: ChannelID,
messageId: MessageID,
) => Promise<{content: string | null; attachments?: Array<{id: bigint; filename: string}>} | null>;
},
userId: UserID,
startTime: number,
): Promise<HarvestMessageResult> {
const channelMessagesMap = new Map<string, Array<HarvestedMessage>>();
Logger.debug('Fetching all user messages');
const startFetchTime = Date.now();
const messageRefs = await channelRepository.listMessagesByAuthor(userId, 100000);
Logger.debug(
{
totalMessages: messageRefs.length,
fetchElapsed: Date.now() - startFetchTime,
totalElapsed: Date.now() - startTime,
},
'All messages retrieved',
);
if (messageRefs.length === 0) {
return {channelMessagesMap, totalMessages: 0};
}
const messages: Array<ChannelHarvestResult> = [];
for (let i = 0; i < messageRefs.length; i += CONCURRENT_MESSAGE_LIMIT) {
const batch = messageRefs.slice(i, i + CONCURRENT_MESSAGE_LIMIT);
const batchPromises = batch.map(async ({channelId, messageId}): Promise<ChannelHarvestResult | null> => {
try {
const message = await channelRepository.getMessage(channelId, messageId);
if (!message) {
Logger.warn(
{channelId: channelId.toString(), messageId: messageId.toString()},
'Message not found during harvest',
);
return null;
}
const timestamp = snowflakeToDate(messageId);
const attachments: Array<string> = [];
if (message.attachments) {
for (const attachment of message.attachments) {
const attachmentUrl = makeAttachmentCdnUrl(channelId, attachment.id, attachment.filename);
attachments.push(attachmentUrl);
}
}
return {
channelId: channelId.toString(),
messageData: {
id: messageId.toString(),
timestamp: timestamp.toISOString(),
content: message.content ?? '',
attachments,
},
};
} catch (error) {
Logger.error(
{error, channelId: channelId.toString(), messageId: messageId.toString()},
'Failed to process message during harvest',
);
return null;
}
});
const batchResults = await Promise.all(batchPromises);
for (const result of batchResults) {
if (result !== null) {
messages.push(result);
}
}
}
for (const {channelId, messageData} of messages) {
if (!channelMessagesMap.has(channelId)) {
channelMessagesMap.set(channelId, []);
}
channelMessagesMap.get(channelId)!.push(messageData);
}
return {channelMessagesMap, totalMessages: messages.length};
}
function buildUserDataJson(params: UserDataJsonParams) {
const {
user,
userId,
authSessions,
relationships,
userNotes,
userSettings,
guildMemberships,
guildSettings,
savedMessages,
privateChannels,
favoriteMemes,
pushSubscriptions,
webAuthnCredentials,
mfaBackupCodes,
createdGiftCodes,
payments,
oauthClients,
pinnedDms,
authorizedIps,
activityData,
} = params;
return {
user: {
id: user.id.toString(),
username: user.username,
discriminator: user.discriminator,
bot: user.isBot,
system: user.isSystem,
email: user.email,
email_verified: user.emailVerified,
email_bounced: user.emailBounced,
phone: user.phone,
avatar_hash: user.avatarHash,
avatar_url: user.avatarHash
? `${Config.endpoints.media}/avatars/${userId}/${user.avatarHash}.${user.avatarHash.startsWith('a_') ? 'gif' : 'png'}`
: null,
banner_hash: user.bannerHash,
banner_url: user.bannerHash
? `${Config.endpoints.media}/banners/${userId}/${user.bannerHash}.${user.bannerHash.startsWith('a_') ? 'gif' : 'png'}`
: null,
bio: user.bio,
pronouns: user.pronouns,
accent_color: user.accentColor,
date_of_birth: user.dateOfBirth,
locale: user.locale,
flags: user.flags.toString(),
premium_type: user.premiumType,
premium_since: user.premiumSince?.toISOString() ?? null,
premium_until: user.premiumUntil?.toISOString() ?? null,
premium_lifetime_sequence: user.premiumLifetimeSequence,
stripe_customer_id: user.stripeCustomerId,
stripe_subscription_id: user.stripeSubscriptionId,
terms_agreed_at: user.termsAgreedAt?.toISOString() ?? null,
privacy_agreed_at: user.privacyAgreedAt?.toISOString() ?? null,
last_active_at: user.lastActiveAt?.toISOString() ?? null,
created_at: snowflakeToDate(user.id).toISOString(),
mfa_enabled: user.authenticatorTypes.size > 0,
authenticator_types: Array.from(user.authenticatorTypes),
},
auth_sessions: authSessions.map((session) => {
const {clientOs, clientPlatform} = resolveSessionClientInfo({
userAgent: session.clientUserAgent,
isDesktopClient: session.clientIsDesktop,
});
return {
created_at: session.createdAt.toISOString(),
approx_last_used_at: session.approximateLastUsedAt?.toISOString() ?? null,
client_ip: session.clientIp,
client_os: clientOs,
client_user_agent: session.clientUserAgent,
client_platform: clientPlatform,
};
}),
relationships: relationships.map((rel) => ({
target_user_id: rel.targetUserId.toString(),
type: rel.type,
nickname: rel.nickname,
since: rel.since?.toISOString() ?? null,
})),
notes: Array.from(userNotes.entries()).map(([targetUserId, note]) => ({
target_user_id: targetUserId.toString(),
note,
})),
user_settings: userSettings
? {
locale: userSettings.locale,
theme: userSettings.theme,
status: userSettings.status,
custom_status: userSettings.customStatus
? {
text: userSettings.customStatus.text,
emoji_id: userSettings.customStatus.emojiId?.toString() ?? null,
emoji_name: userSettings.customStatus.emojiName,
emoji_animated: userSettings.customStatus.emojiAnimated,
expires_at: userSettings.customStatus.expiresAt?.toISOString() ?? null,
}
: null,
developer_mode: userSettings.developerMode,
message_display_compact: userSettings.compactMessageDisplay,
animate_emoji: userSettings.animateEmoji,
animate_stickers: userSettings.animateStickers,
gif_auto_play: userSettings.gifAutoPlay,
render_embeds: userSettings.renderEmbeds,
render_reactions: userSettings.renderReactions,
render_spoilers: userSettings.renderSpoilers,
inline_attachment_media: userSettings.inlineAttachmentMedia,
inline_embed_media: userSettings.inlineEmbedMedia,
explicit_content_filter: userSettings.explicitContentFilter,
friend_source_flags: userSettings.friendSourceFlags,
default_guilds_restricted: userSettings.defaultGuildsRestricted,
bot_default_guilds_restricted: userSettings.botDefaultGuildsRestricted,
restricted_guilds: Array.from(userSettings.restrictedGuilds).map((id) => id.toString()),
bot_restricted_guilds: Array.from(userSettings.botRestrictedGuilds).map((id) => id.toString()),
guild_positions: userSettings.guildPositions.map((id) => id.toString()),
guild_folders: userSettings.guildFolders,
afk_timeout: userSettings.afkTimeout,
time_format: userSettings.timeFormat,
}
: null,
guild_memberships: guildMemberships
.filter((gm) => gm.member !== null)
.map(({member, guild, guildId}) => ({
guild_id: guildId.toString(),
guild_name: guild?.name ?? null,
joined_at: member!.joinedAt.toISOString(),
nick: member!.nickname,
avatar_hash: member!.avatarHash,
avatar_url: member!.avatarHash
? `${Config.endpoints.media}/guilds/${guildId}/users/${userId}/avatars/${member!.avatarHash}`
: null,
banner_hash: member!.bannerHash,
banner_url: member!.bannerHash
? `${Config.endpoints.media}/guilds/${guildId}/users/${userId}/banners/${member!.bannerHash}`
: null,
role_ids: Array.from(member!.roleIds).map((id) => id.toString()),
})),
user_guild_settings: guildSettings
.filter((settings) => settings !== null)
.map((settings) => ({
guild_id: settings!.guildId.toString(),
message_notifications: settings!.messageNotifications,
muted: settings!.muted,
mobile_push: settings!.mobilePush,
suppress_everyone: settings!.suppressEveryone,
suppress_roles: settings!.suppressRoles,
hide_muted_channels: settings!.hideMutedChannels,
})),
saved_messages: savedMessages.map((msg) => ({
channel_id: msg.channelId.toString(),
message_id: msg.messageId.toString(),
saved_at: msg.savedAt.toISOString(),
})),
private_channels: privateChannels.map((channel) => ({
channel_id: channel.id.toString(),
type: channel.type,
name: channel.name,
icon_hash: channel.iconHash,
owner_id: channel.ownerId?.toString() ?? null,
recipient_ids: Array.from(channel.recipientIds).map((id) => id.toString()),
last_message_id: channel.lastMessageId?.toString() ?? null,
})),
favorite_memes: favoriteMemes.map((meme) => ({
meme_id: meme.id.toString(),
name: meme.name,
alt_text: meme.altText,
tags: meme.tags,
filename: meme.filename,
content_type: meme.contentType,
size: meme.size.toString(),
width: meme.width,
height: meme.height,
duration: meme.duration,
})),
push_subscriptions: pushSubscriptions.map((sub) => ({
subscription_id: sub.subscriptionId,
endpoint: sub.endpoint,
user_agent: sub.userAgent,
})),
webauthn_credentials: webAuthnCredentials.map((cred) => ({
credential_id: cred.credentialId,
name: cred.name,
transports: cred.transports ? Array.from(cred.transports) : [],
created_at: cred.createdAt.toISOString(),
last_used_at: cred.lastUsedAt?.toISOString() ?? null,
})),
mfa_backup_codes: {
total_count: mfaBackupCodes.length,
consumed_count: mfaBackupCodes.filter((code) => code.consumed).length,
remaining_count: mfaBackupCodes.filter((code) => !code.consumed).length,
},
gift_codes_created: createdGiftCodes.map((gift) => ({
code: gift.code,
duration_months: gift.durationMonths,
created_at: gift.createdAt.toISOString(),
redeemed_by_user_id: gift.redeemedByUserId?.toString() ?? null,
redeemed_at: gift.redeemedAt?.toISOString() ?? null,
stripe_payment_intent_id: gift.stripePaymentIntentId,
})),
payments: payments.map(mapPayment),
oauth_applications: oauthClients.map(mapOAuthApplication),
pinned_dms: pinnedDms.map((pin) => ({
channel_id: pin.channel_id.toString(),
sort_order: pin.sort_order,
})),
...mapSecurityData({authorizedIps, activityData}),
};
}
async function createAndUploadArchive(params: ArchiveParams): Promise<ArchiveResult> {
const {
userId,
harvestId,
isAdminArchive,
userDataJsonBuffer,
user,
channelMessagesMap,
payments,
oauthClients,
authorizedIps,
activityData,
storageService,
} = params;
const userIdString = userId.toString();
const tempDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'fluxer-harvest-'));
const zipPath = path.join(tempDir, `user-data-${userId}.zip`);
let output: fs.WriteStream | null = null;
try {
output = fs.createWriteStream(zipPath);
const archive = archiver('zip', {zlib: {level: 9}});
archive.pipe(output);
archive.append(userDataJsonBuffer, {name: 'user.json'});
if (user.avatarHash) {
const avatarArchiveName = `assets/user/avatar.${getAnimatedAssetExtension(user.avatarHash)}`;
const avatarStorageKey = buildHashedAssetKey('avatars', userIdString, user.avatarHash);
await appendAssetToArchive({
archive,
storageService,
storageKey: avatarStorageKey,
archiveName: avatarArchiveName,
label: 'user avatar',
subjectId: userIdString,
});
}
if (user.bannerHash) {
const bannerArchiveName = `assets/user/banner.${getAnimatedAssetExtension(user.bannerHash)}`;
const bannerStorageKey = buildHashedAssetKey('banners', userIdString, user.bannerHash);
await appendAssetToArchive({
archive,
storageService,
storageKey: bannerStorageKey,
archiveName: bannerArchiveName,
label: 'user banner',
subjectId: userIdString,
});
}
for (const [channelId, messages] of channelMessagesMap.entries()) {
messages.sort((a, b) => a.timestamp.localeCompare(b.timestamp));
const messagesJson = JSON.stringify(messages, null, 2);
archive.append(messagesJson, {name: `channels/${channelId}/messages.json`});
}
archive.append(JSON.stringify(payments.map(mapPayment), null, 2), {
name: 'payments/payment_history.json',
});
archive.append(JSON.stringify({applications: oauthClients.map(mapOAuthApplication)}, null, 2), {
name: 'integrations/oauth.json',
});
archive.append(JSON.stringify(mapSecurityData({authorizedIps, activityData}), null, 2), {
name: 'account/security.json',
});
await archive.finalize();
await new Promise<void>((resolve, reject) => {
if (!output) {
reject(new Error('Output stream is null'));
return;
}
output.on('close', resolve);
output.on('error', reject);
});
const zipBuffer = await fs.promises.readFile(zipPath);
const storageKey = `exports/${userId}/${harvestId}/user-data.zip`;
const expiresAt = new Date(Date.now() + (isAdminArchive ? ms('1 year') : ZIP_EXPIRY_MS));
await storageService.uploadObject({
bucket: Config.s3.buckets.harvests,
key: storageKey,
body: zipBuffer,
contentType: 'application/zip',
expiresAt: expiresAt,
});
const downloadUrl = await storageService.getPresignedDownloadURL({
bucket: Config.s3.buckets.harvests,
key: storageKey,
expiresIn: ZIP_EXPIRY_MS / 1000,
});
return {zipBuffer, storageKey, expiresAt, downloadUrl};
} finally {
if (output && !output.destroyed) {
output.destroy();
}
await fs.promises.rm(tempDir, {recursive: true, force: true});
}
}
const harvestUserData: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload}, 'Processing harvestUserData task');
const startTime = Date.now();
const userId = createUserID(BigInt(validated.userId));
const harvestId = BigInt(validated.harvestId);
Logger.info({userId, harvestId, startTime: new Date(startTime).toISOString()}, 'Task started');
const {
channelRepository,
guildRepository,
userRepository,
userHarvestRepository,
adminArchiveRepository,
favoriteMemeRepository,
paymentRepository,
applicationRepository,
storageService,
emailService,
} = getWorkerDependencies();
const adminRequestedBy = validated.adminRequestedBy ? BigInt(validated.adminRequestedBy) : null;
const isAdminArchive = adminRequestedBy !== null;
const existingHarvest = isAdminArchive
? await adminArchiveRepository.findBySubjectAndArchiveId('user', userId, harvestId)
: await userHarvestRepository.findByUserAndHarvestId(userId, harvestId);
if (isAdminArchive && !existingHarvest) {
throw new Error(`Admin archive ${harvestId.toString()} for user ${userId.toString()} not found`);
}
if (existingHarvest?.completedAt) {
Logger.info(
{userId, harvestId, completedAt: existingHarvest.completedAt},
'Harvest already completed, skipping (idempotent early bailout)',
);
return;
}
const adminArchive = isAdminArchive ? (existingHarvest as AdminArchive) : null;
const progressReporter = {
markAsStarted: () =>
isAdminArchive && adminArchive
? adminArchiveRepository.markAsStarted(adminArchive)
: userHarvestRepository.markAsStarted(userId, harvestId),
updateProgress: (progressPercent: number, progressStep: string) =>
isAdminArchive && adminArchive
? adminArchiveRepository.updateProgress(adminArchive, progressPercent, progressStep)
: userHarvestRepository.updateProgress(userId, harvestId, progressPercent, progressStep),
markAsCompleted: (storageKey: string, fileSize: bigint, expiresAt: Date) =>
isAdminArchive && adminArchive
? adminArchiveRepository.markAsCompleted(adminArchive, storageKey, fileSize, expiresAt)
: userHarvestRepository.markAsCompleted(userId, harvestId, storageKey, fileSize, expiresAt),
markAsFailed: (message: string) =>
isAdminArchive && adminArchive
? adminArchiveRepository.markAsFailed(adminArchive, message)
: userHarvestRepository.markAsFailed(userId, harvestId, message),
shouldSendEmail: !isAdminArchive,
};
try {
await progressReporter.markAsStarted();
Logger.debug({userId, harvestId, elapsed: Date.now() - startTime}, 'Starting user data harvest');
await progressReporter.updateProgress(INITIAL_PROGRESS, 'Harvesting messages');
Logger.debug({elapsed: Date.now() - startTime}, 'Set progress to INITIAL_PROGRESS');
const {channelMessagesMap, totalMessages} = await harvestMessages(channelRepository, userId, startTime);
if (totalMessages > 0) {
const progress = Math.min(INITIAL_PROGRESS + Math.floor((totalMessages / 10000) * 50), MESSAGES_PROGRESS_MAX);
await progressReporter.updateProgress(progress, `Harvested ${totalMessages} messages`);
}
Logger.debug(
{
userId,
harvestId,
channelCount: channelMessagesMap.size,
totalMessages,
elapsed: Date.now() - startTime,
},
'Harvested all messages',
);
await progressReporter.updateProgress(METADATA_PROGRESS, 'Collecting user metadata');
Logger.debug({elapsed: Date.now() - startTime}, 'Starting metadata collection');
const user = await userRepository.findUnique(userId);
if (!user) {
throw new Error(`User ${userId} not found`);
}
const [
authSessions,
relationships,
userNotes,
userSettings,
guildIds,
savedMessages,
privateChannels,
favoriteMemes,
pushSubscriptions,
webAuthnCredentials,
mfaBackupCodes,
createdGiftCodes,
payments,
oauthClients,
pinnedDms,
authorizedIps,
activityData,
] = await Promise.all([
userRepository.listAuthSessions(userId) as Promise<Array<AuthSession>>,
userRepository.listRelationships(userId) as Promise<Array<Relationship>>,
userRepository.getUserNotes(userId) as Promise<Map<UserID, string>>,
userRepository.findSettings(userId) as Promise<UserSettings | null>,
userRepository.getUserGuildIds(userId) as Promise<Array<GuildID>>,
userRepository.listSavedMessages(userId, 1000) as Promise<Array<SavedMessage>>,
userRepository.listPrivateChannels(userId) as Promise<Array<Channel>>,
favoriteMemeRepository.findByUserId(userId) as Promise<Array<FavoriteMeme>>,
userRepository.listPushSubscriptions(userId) as Promise<Array<PushSubscription>>,
userRepository.listWebAuthnCredentials(userId) as Promise<Array<WebAuthnCredential>>,
userRepository.listMfaBackupCodes(userId) as Promise<Array<MfaBackupCode>>,
userRepository.findGiftCodesByCreator(userId) as Promise<Array<GiftCode>>,
paymentRepository.findPaymentsByUserId(userId) as Promise<Array<Payment>>,
applicationRepository.listApplicationsByOwner(userId) as Promise<Array<Application>>,
userRepository.getPinnedDmsWithDetails(userId) as Promise<Array<{channel_id: bigint; sort_order: number}>>,
userRepository.getAuthorizedIps(userId) as Promise<Array<{ip: string}>>,
userRepository.getActivityTracking(userId) as Promise<{
last_active_at: Date | null;
last_active_ip: string | null;
}>,
]);
const guilds = await guildRepository.listGuilds(guildIds);
const guildsMap = new Map(guilds.map((guild) => [guild.id.toString(), guild]));
const guildMemberships = await Promise.all(
guildIds.map(async (guildId: GuildID) => {
const member = await guildRepository.getMember(guildId, userId);
const guild = guildsMap.get(guildId.toString()) ?? null;
return {member, guild, guildId};
}),
);
const guildSettings = await Promise.all(
guildIds.map((guildId: GuildID) => userRepository.findGuildSettings(userId, guildId)),
);
const userData = buildUserDataJson({
user,
userId,
authSessions,
relationships,
userNotes,
userSettings,
guildMemberships,
guildSettings,
savedMessages,
privateChannels,
favoriteMemes,
pushSubscriptions,
webAuthnCredentials,
mfaBackupCodes,
createdGiftCodes,
payments,
oauthClients,
pinnedDms,
authorizedIps,
activityData,
});
const userDataJsonBuffer = Buffer.from(JSON.stringify(userData, null, 2), 'utf-8');
Logger.debug({userId, harvestId, elapsed: Date.now() - startTime}, 'Collected user metadata');
await progressReporter.updateProgress(METADATA_PROGRESS + 5, 'Downloading media assets');
await progressReporter.updateProgress(ZIP_PROGRESS, 'Creating ZIP archive');
Logger.debug({elapsed: Date.now() - startTime}, 'Starting ZIP creation');
const {zipBuffer, storageKey, expiresAt, downloadUrl} = await createAndUploadArchive({
userId,
harvestId,
isAdminArchive,
userDataJsonBuffer,
user,
channelMessagesMap,
payments,
oauthClients,
authorizedIps,
activityData,
storageService,
});
Logger.debug(
{userId, harvestId, zipSize: zipBuffer.length, elapsed: Date.now() - startTime},
'Uploaded final ZIP to S3 with TTL',
);
await progressReporter.markAsCompleted(storageKey, BigInt(zipBuffer.length), expiresAt);
Logger.debug({userId, harvestId}, 'Marked harvest as completed');
if (progressReporter.shouldSendEmail && user.email && Config.email.enabled) {
await emailService.sendHarvestCompletedEmail(
user.email,
user.username,
downloadUrl,
totalMessages,
zipBuffer.length,
expiresAt,
user.locale,
);
Logger.debug({userId, harvestId, email: user.email, totalMessages}, 'Sent harvest completion email');
}
await progressReporter.updateProgress(COMPLETE_PROGRESS, 'Completed');
Logger.info(
{
userId,
harvestId,
totalElapsed: Date.now() - startTime,
totalElapsedSeconds: Math.round((Date.now() - startTime) / 1000),
},
'User data harvest completed successfully',
);
} catch (error) {
Logger.error(
{
error,
userId,
harvestId,
elapsed: Date.now() - startTime,
},
'Failed to harvest user data',
);
await progressReporter.markAsFailed(String(error));
throw error;
}
};
export default harvestUserData;

View File

@@ -0,0 +1,111 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {UserID} from '@fluxer/api/src/BrandedTypes';
import {createChannelID, createMessageID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {getMessageSearchService} from '@fluxer/api/src/SearchFactory';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
channelId: z.string(),
lastMessageId: z.string().nullable().optional(),
});
const BATCH_SIZE = 100;
const indexChannelMessages: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
const searchService = getMessageSearchService();
if (!searchService) {
return;
}
const channelId = createChannelID(BigInt(validated.channelId));
const {channelRepository, userRepository} = getWorkerDependencies();
try {
const lastMessageId = validated.lastMessageId ? createMessageID(BigInt(validated.lastMessageId)) : undefined;
const messages = await channelRepository.listMessages(channelId, lastMessageId, BATCH_SIZE);
if (messages.length === 0) {
const channel = await channelRepository.findUnique(channelId);
if (channel) {
await channelRepository.upsert({
...channel.toRow(),
indexed_at: new Date(),
});
}
return;
}
const authorIds = new Set(messages.map((m) => m.authorId).filter((id): id is UserID => id !== null));
const authorBotMap = new Map<UserID, boolean>();
for (const authorId of authorIds) {
const user = await userRepository.findUnique(authorId);
if (user) {
authorBotMap.set(authorId, user.isBot);
}
}
await searchService.indexMessages(messages, authorBotMap);
Logger.debug(
{
channelId: channelId.toString(),
messagesIndexed: messages.length,
hasMore: messages.length === BATCH_SIZE,
},
'Indexed message batch',
);
if (messages.length === BATCH_SIZE) {
const oldestMessageId = messages[messages.length - 1]!.id;
await helpers.addJob(
'indexChannelMessages',
{
channelId: validated.channelId,
lastMessageId: oldestMessageId.toString(),
},
{
jobKey: `index-channel-${validated.channelId}-${oldestMessageId}`,
maxAttempts: 3,
},
);
} else {
Logger.debug({channelId: channelId.toString()}, 'Channel indexing complete');
const channel = await channelRepository.findUnique(channelId);
if (channel) {
await channelRepository.upsert({
...channel.toRow(),
indexed_at: new Date(),
});
}
}
} catch (error) {
Logger.error({error, channelId: channelId.toString()}, 'Failed to index channel messages');
throw error;
}
};
export default indexChannelMessages;

View File

@@ -0,0 +1,121 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {UserID} from '@fluxer/api/src/BrandedTypes';
import {createGuildID, createUserID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import type {User} from '@fluxer/api/src/models/User';
import {getGuildMemberSearchService} from '@fluxer/api/src/SearchFactory';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
guildId: z.string(),
lastUserId: z.string().nullable().optional(),
});
const BATCH_SIZE = 100;
const indexGuildMembers: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
const searchService = getGuildMemberSearchService();
if (!searchService) {
return;
}
const guildId = createGuildID(BigInt(validated.guildId));
const {guildRepository, userRepository} = getWorkerDependencies();
try {
const lastUserId = validated.lastUserId ? createUserID(BigInt(validated.lastUserId)) : undefined;
const members = await guildRepository.listMembersPaginated(guildId, BATCH_SIZE, lastUserId);
if (members.length === 0) {
const guild = await guildRepository.findUnique(guildId);
if (guild) {
await guildRepository.upsert({
...guild.toRow(),
members_indexed_at: new Date(),
});
}
return;
}
const userIds = new Set(members.map((m) => m.userId));
const userMap = new Map<UserID, User>();
for (const uid of userIds) {
const user = await userRepository.findUnique(uid);
if (user) {
userMap.set(uid, user);
}
}
const membersWithUsers = members
.map((member) => {
const user = userMap.get(member.userId);
return user ? {member, user} : null;
})
.filter((item): item is NonNullable<typeof item> => item != null);
if (membersWithUsers.length > 0) {
await searchService.indexMembers(membersWithUsers);
}
Logger.debug(
{
guildId: guildId.toString(),
membersIndexed: membersWithUsers.length,
hasMore: members.length === BATCH_SIZE,
},
'Indexed guild member batch',
);
if (members.length === BATCH_SIZE) {
const lastMember = members[members.length - 1]!;
await helpers.addJob(
'indexGuildMembers',
{
guildId: validated.guildId,
lastUserId: lastMember.userId.toString(),
},
{
jobKey: `index-guild-members-${validated.guildId}-${lastMember.userId}`,
maxAttempts: 3,
},
);
} else {
Logger.debug({guildId: guildId.toString()}, 'Guild member indexing complete');
const guild = await guildRepository.findUnique(guildId);
if (guild) {
await guildRepository.upsert({
...guild.toRow(),
members_indexed_at: new Date(),
});
}
}
} catch (error) {
Logger.error({error, guildId: guildId.toString()}, 'Failed to index guild members');
throw error;
}
};
export default indexGuildMembers;

View File

@@ -0,0 +1,234 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {ChannelID, MessageID} from '@fluxer/api/src/BrandedTypes';
import {createChannelID, createMessageID, createUserID} from '@fluxer/api/src/BrandedTypes';
import {withSpan} from '@fluxer/api/src/telemetry/Tracing';
import {chunkArray, createBulkDeleteDispatcher} from '@fluxer/api/src/worker/tasks/utils/MessageDeletion';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {recordCounter, recordHistogram} from '@fluxer/telemetry/src/Metrics';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {seconds} from 'itty-time';
import {z} from 'zod';
const PayloadSchema = z.object({
job_id: z.string().min(1),
admin_user_id: z.string().min(1),
target_user_id: z.string().min(1),
entries: z.array(
z.object({
channel_id: z.string(),
message_id: z.string(),
}),
),
});
const INPUT_SLICE_SIZE = 500;
const VALIDATION_CHUNK_SIZE = 25;
const DELETION_CHUNK_SIZE = 10;
const STATUS_TTL_SECONDS = seconds('1 hour');
const messageShredTask: WorkerTaskHandler = async (payload, helpers) => {
const data = PayloadSchema.parse(payload);
helpers.logger.debug({payload: data}, 'Processing messageShred task');
const start = Date.now();
const targetUserIdStr = data.target_user_id;
return await withSpan(
{
name: 'fluxer.worker.message_shred',
attributes: {job_id: data.job_id, target_user_id: targetUserIdStr},
},
async () => {
try {
const {kvClient, channelRepository, gatewayService} = getWorkerDependencies();
const progressKey = `message_shred_status:${data.job_id}`;
const requestedEntries = data.entries.length;
const startedAt = new Date().toISOString();
let skippedCount = 0;
let processedCount = 0;
let totalValidCount = 0;
const persistStatus = async (
status: 'in_progress' | 'completed' | 'failed',
extra?: {completed_at?: string; failed_at?: string; error?: string},
) => {
await kvClient.set(
progressKey,
JSON.stringify({
status,
requested: requestedEntries,
total: totalValidCount,
processed: processedCount,
skipped: skippedCount,
started_at: startedAt,
...extra,
}),
'EX',
STATUS_TTL_SECONDS,
);
};
await persistStatus('in_progress');
const authorId = createUserID(BigInt(data.target_user_id));
const seen = new Set<string>();
const bulkDeleteDispatcher = createBulkDeleteDispatcher({
channelRepository,
gatewayService,
batchSize: DELETION_CHUNK_SIZE,
});
const processSlice = async (entriesSlice: Array<{channel_id: string; message_id: string}>) => {
const typedSlice: Array<{channelId: ChannelID; messageId: MessageID}> = [];
for (const entry of entriesSlice) {
const key = `${entry.channel_id}:${entry.message_id}`;
if (seen.has(key)) {
skippedCount += 1;
continue;
}
seen.add(key);
try {
typedSlice.push({
channelId: createChannelID(BigInt(entry.channel_id)),
messageId: createMessageID(BigInt(entry.message_id)),
});
} catch (error) {
skippedCount += 1;
helpers.logger.warn({error, entry}, 'Skipping malformed entry in message shred job');
}
}
if (typedSlice.length === 0) {
return;
}
for (const validationChunk of chunkArray(typedSlice, VALIDATION_CHUNK_SIZE)) {
const existenceChecks = validationChunk.map(
({channelId, messageId}: {channelId: ChannelID; messageId: MessageID}) =>
channelRepository.messages.authorHasMessage(authorId, channelId, messageId),
);
const results = await Promise.all(existenceChecks);
const deletableChunk: Array<{channelId: ChannelID; messageId: MessageID}> = [];
for (let i = 0; i < validationChunk.length; i++) {
if (results[i]) {
deletableChunk.push(validationChunk[i]!);
} else {
skippedCount += 1;
}
}
if (deletableChunk.length === 0) {
await persistStatus('in_progress');
continue;
}
totalValidCount += deletableChunk.length;
await persistStatus('in_progress');
for (const deletionChunk of chunkArray(deletableChunk, DELETION_CHUNK_SIZE)) {
await Promise.all(
deletionChunk.map(({channelId, messageId}: {channelId: ChannelID; messageId: MessageID}) =>
channelRepository.deleteMessage(channelId, messageId, authorId),
),
);
processedCount += deletionChunk.length;
await persistStatus('in_progress');
for (const {channelId, messageId} of deletionChunk) {
bulkDeleteDispatcher.track(channelId, messageId);
}
await bulkDeleteDispatcher.flush(true);
}
}
};
for (const entriesSlice of chunkArray(data.entries, INPUT_SLICE_SIZE)) {
await processSlice(entriesSlice);
}
await persistStatus('completed', {
completed_at: new Date().toISOString(),
});
await bulkDeleteDispatcher.flush(true);
const duration = Date.now() - start;
recordCounter({
name: 'fluxer.worker.messages.shred_processed',
dimensions: {
status: 'success',
job_id: data.job_id,
target_user_id: targetUserIdStr,
},
value: processedCount,
});
recordHistogram({
name: 'fluxer.worker.message_shred.duration',
valueMs: duration,
dimensions: {
job_id: data.job_id,
target_user_id: targetUserIdStr,
},
});
recordHistogram({
name: 'fluxer.worker.message_shred.processed_count',
valueMs: processedCount,
dimensions: {
target_user_id: targetUserIdStr,
},
});
recordHistogram({
name: 'fluxer.worker.message_shred.skipped_count',
valueMs: skippedCount,
dimensions: {
target_user_id: targetUserIdStr,
},
});
} catch (error) {
recordCounter({
name: 'fluxer.worker.messages.shred_processed',
dimensions: {
status: 'error',
job_id: data.job_id,
target_user_id: targetUserIdStr,
error_type: error instanceof Error ? error.name : 'unknown',
},
});
throw error;
}
},
);
};
export default messageShredTask;

View File

@@ -0,0 +1,133 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {Config} from '@fluxer/api/src/Config';
import type {AssetDeletionQueue} from '@fluxer/api/src/infrastructure/AssetDeletionQueue';
import type {IPurgeQueue} from '@fluxer/api/src/infrastructure/CloudflarePurgeQueue';
import type {QueuedAssetDeletion} from '@fluxer/api/src/infrastructure/IAssetDeletionQueue';
import type {IStorageService} from '@fluxer/api/src/infrastructure/IStorageService';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
const BATCH_SIZE = 50;
const MAX_ITEMS_PER_RUN = 500;
const processAssetDeletionQueue: WorkerTaskHandler = async (_payload, _helpers) => {
const {assetDeletionQueue, purgeQueue, storageService} = getWorkerDependencies();
const queueSize = await assetDeletionQueue.getQueueSize();
if (queueSize === 0) {
Logger.debug('Asset deletion queue is empty');
return;
}
Logger.info({queueSize}, 'Starting asset deletion queue processing');
let totalProcessed = 0;
let totalDeleted = 0;
let totalFailed = 0;
let totalCdnPurged = 0;
while (totalProcessed < MAX_ITEMS_PER_RUN) {
const batch = await assetDeletionQueue.getBatch(BATCH_SIZE);
if (batch.length === 0) {
break;
}
const results = await Promise.allSettled(
batch.map((item) => processItem(item, storageService, purgeQueue, assetDeletionQueue)),
);
for (let i = 0; i < results.length; i++) {
const result = results[i]!;
const item = batch[i]!;
if (result.status === 'fulfilled') {
totalDeleted++;
if (item.cdnUrl) {
totalCdnPurged++;
}
} else {
totalFailed++;
Logger.error(
{error: result.reason, s3Key: item.s3Key, cdnUrl: item.cdnUrl},
'Failed to process asset deletion',
);
}
}
totalProcessed += batch.length;
}
const remainingSize = await assetDeletionQueue.getQueueSize();
Logger.info(
{
totalProcessed,
totalDeleted,
totalFailed,
totalCdnPurged,
remainingSize,
},
'Finished asset deletion queue processing',
);
if (totalFailed > 0) {
throw new Error(
`Asset deletion queue processing completed with ${totalFailed} failures out of ${totalProcessed} items`,
);
}
};
async function processItem(
item: QueuedAssetDeletion,
storageService: IStorageService,
purgeQueue: IPurgeQueue,
assetDeletionQueue: AssetDeletionQueue,
): Promise<void> {
try {
if (item.s3Key) {
try {
await storageService.deleteObject(Config.s3.buckets.cdn, item.s3Key);
Logger.debug({s3Key: item.s3Key, reason: item.reason}, 'Deleted asset from S3');
} catch (error: unknown) {
const isNotFound =
error instanceof Error &&
(('name' in error && error.name === 'NotFound') ||
('code' in error && (error as {code?: string}).code === 'NoSuchKey'));
if (!isNotFound) {
throw error;
}
Logger.debug({s3Key: item.s3Key}, 'Asset already deleted from S3 (NotFound)');
}
}
if (item.cdnUrl) {
await purgeQueue.addUrls([item.cdnUrl]);
Logger.debug({cdnUrl: item.cdnUrl}, 'Queued asset CDN URL for Cloudflare purge');
}
} catch (error) {
await assetDeletionQueue.requeueItem(item);
throw error;
}
}
export default processAssetDeletionQueue;

View File

@@ -0,0 +1,156 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {Config} from '@fluxer/api/src/Config';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {ms} from 'itty-time';
const TOKEN_BUCKET_CAPACITY = 25;
const TOKEN_REFILL_RATE = 5;
const TOKEN_REFILL_INTERVAL_MS = ms('1 minute');
const MAX_PREFIXES_PER_REQUEST = 100;
const PREFIXES_PER_SECOND_LIMIT = 800;
interface CloudflarePurgeApiResponse {
success?: boolean;
}
const processCloudflarePurgeQueue: WorkerTaskHandler = async (_payload, _helpers) => {
if (!Config.cloudflare.purgeEnabled) {
Logger.debug('Cloudflare cache purge is disabled, skipping queue processing');
return;
}
if (!Config.cloudflare.zoneId || !Config.cloudflare.apiToken) {
Logger.error('Cloudflare cache purge is enabled but credentials are missing');
return;
}
const queue = getWorkerDependencies().purgeQueue;
try {
const queueSize = await queue.getQueueSize();
if (queueSize === 0) {
Logger.debug('Cloudflare purge queue is empty');
return;
}
Logger.debug({queueSize}, 'Processing Cloudflare purge queue');
let totalPrefixesPurged = 0;
let totalRequestsMade = 0;
while (totalPrefixesPurged < queueSize) {
const tokensConsumed = await queue.tryConsumeTokens(
1,
TOKEN_BUCKET_CAPACITY,
TOKEN_REFILL_RATE,
TOKEN_REFILL_INTERVAL_MS,
);
if (tokensConsumed < 1) {
Logger.debug({totalRequestsMade, totalPrefixesPurged}, 'No tokens available, stopping for now');
break;
}
const batch = await queue.getBatch(MAX_PREFIXES_PER_REQUEST);
if (batch.length === 0) {
Logger.debug('Cloudflare purge queue drained before reaching limits');
break;
}
try {
const response = await fetch(
`https://api.cloudflare.com/client/v4/zones/${Config.cloudflare.zoneId}/purge_cache`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${Config.cloudflare.apiToken}`,
},
body: JSON.stringify({
prefixes: batch,
}),
signal: AbortSignal.timeout(ms('30 seconds')),
},
);
if (!response.ok) {
const errorText = await response.text();
Logger.error(
{status: response.status, error: errorText, prefixCount: batch.length},
'Failed to purge Cloudflare cache',
);
await queue.addUrls(batch);
if (response.status === 429) {
Logger.warn('Rate limited by Cloudflare, will retry later');
break;
}
totalRequestsMade++;
continue;
}
const result = (await response.json()) as CloudflarePurgeApiResponse;
if (!result.success) {
Logger.error({result, prefixCount: batch.length}, 'Cloudflare cache purge request failed');
await queue.addUrls(batch);
totalRequestsMade++;
continue;
}
Logger.debug(
{count: batch.length, totalPurged: totalPrefixesPurged + batch.length},
'Successfully purged Cloudflare cache prefix batch',
);
totalPrefixesPurged += batch.length;
totalRequestsMade++;
const delayMs = Math.max(0, (batch.length / PREFIXES_PER_SECOND_LIMIT) * 1000);
if (delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
} catch (error) {
Logger.error({error, prefixCount: batch.length}, 'Error processing Cloudflare purge batch');
await queue.addUrls(batch);
totalRequestsMade++;
}
}
const remainingQueueSize = await queue.getQueueSize();
Logger.debug(
{
totalPrefixesPurged,
totalRequestsMade,
remainingQueueSize,
},
'Finished processing Cloudflare purge queue',
);
} catch (error) {
Logger.error({error}, 'Error processing Cloudflare purge queue');
throw error;
}
};
export default processCloudflarePurgeQueue;

View File

@@ -0,0 +1,232 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {UserID} from '@fluxer/api/src/BrandedTypes';
import {Config} from '@fluxer/api/src/Config';
import type {KVActivityTracker} from '@fluxer/api/src/infrastructure/KVActivityTracker';
import {Logger} from '@fluxer/api/src/Logger';
import type {User} from '@fluxer/api/src/models/User';
import type {UserRepository} from '@fluxer/api/src/user/repositories/UserRepository';
import type {UserDeletionEligibilityService} from '@fluxer/api/src/user/services/UserDeletionEligibilityService';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {DeletionReasons} from '@fluxer/constants/src/Core';
import {UserFlags} from '@fluxer/constants/src/UserConstants';
import type {IEmailService} from '@fluxer/email/src/IEmailService';
import {TestEmailService} from '@fluxer/email/src/TestEmailService';
import type {IKVProvider} from '@fluxer/kv_client/src/IKVProvider';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {ms} from 'itty-time';
const BATCH_SIZE = 100;
export interface InactivityCheckResult {
warningsSent: number;
deletionsScheduled: number;
errors: number;
}
async function scheduleDeletion(userRepository: UserRepository, user: User, userId: UserID): Promise<void> {
const gracePeriodMs = Config.deletionGracePeriodHours * ms('1 hour');
const pendingDeletionAt = new Date(Date.now() + gracePeriodMs);
await userRepository.patchUpsert(
userId,
{
flags: user.flags | UserFlags.SELF_DELETED,
pending_deletion_at: pendingDeletionAt,
},
user.toRow(),
);
await userRepository.addPendingDeletion(userId, pendingDeletionAt, DeletionReasons.INACTIVITY);
Logger.debug({userId, pendingDeletionAt, reason: 'INACTIVITY'}, 'Scheduled inactive user for deletion');
}
interface ProcessUserDeps {
userRepository: UserRepository;
emailService: IEmailService;
activityTracker: KVActivityTracker;
deletionEligibilityService: UserDeletionEligibilityService;
}
async function processUser(user: User, deps: ProcessUserDeps, result: InactivityCheckResult): Promise<void> {
const {userRepository, emailService, activityTracker, deletionEligibilityService} = deps;
const userId = user.id;
if (user.pendingDeletionAt) {
Logger.debug({userId}, 'User already pending deletion, skipping');
return;
}
if (user.isBot) {
Logger.debug({userId}, 'User is a bot, skipping');
return;
}
if (user.flags & UserFlags.APP_STORE_REVIEWER) {
Logger.debug({userId}, 'User is an app store reviewer, skipping');
return;
}
const lastActivity = await activityTracker.getActivity(userId);
const now = new Date();
const userInactiveMs = lastActivity ? now.getTime() - lastActivity.getTime() : Infinity;
if (userInactiveMs < ms('2 years')) {
return;
}
const isEligible = await deletionEligibilityService.isEligibleForInactivityDeletion(user);
if (!isEligible) {
Logger.debug({userId}, 'User not eligible for inactivity deletion');
return;
}
const hasWarningSent = await deletionEligibilityService.hasWarningSent(userId);
if (hasWarningSent) {
const hasGracePeriodExpired = await deletionEligibilityService.hasWarningGracePeriodExpired(userId);
if (hasGracePeriodExpired) {
Logger.debug({userId}, 'Warning grace period expired, scheduling deletion');
await scheduleDeletion(userRepository, user, userId);
result.deletionsScheduled++;
} else {
Logger.debug({userId}, 'Warning grace period still active, skipping (idempotency check)');
}
return;
}
const isTestRun = Config.dev.testModeEnabled;
const usingTestEmailService = emailService instanceof TestEmailService;
const canSendEmail = !!user.email && (Config.email.enabled || usingTestEmailService || isTestRun);
if (!canSendEmail) {
return;
}
try {
const deletionDate = new Date(now.getTime() + ms('30 days'));
const sent = await emailService.sendInactivityWarningEmail(
user.email,
user.username,
deletionDate,
lastActivity || new Date(0),
user.locale,
);
if (sent) {
await deletionEligibilityService.markWarningSent(userId);
result.warningsSent++;
Logger.debug({userId, email: user.email}, 'Sent inactivity warning email');
}
} catch (emailError) {
Logger.error({error: emailError, userId, email: user.email}, 'Failed to send inactivity warning email');
result.errors++;
}
}
interface ProcessInactivityDeletionsDeps {
kvClient: IKVProvider;
userRepository: UserRepository;
emailService: IEmailService;
activityTracker: KVActivityTracker;
deletionEligibilityService: UserDeletionEligibilityService;
}
export async function processInactivityDeletionsCore(
deps: ProcessInactivityDeletionsDeps,
): Promise<InactivityCheckResult> {
const {userRepository, emailService, activityTracker, deletionEligibilityService} = deps;
const result: InactivityCheckResult = {
warningsSent: 0,
deletionsScheduled: 0,
errors: 0,
};
Logger.debug('Starting inactivity deletion check');
const needsRebuild = await activityTracker.needsRebuild();
if (needsRebuild) {
Logger.info('Activity tracker needs rebuild, rebuilding from Cassandra');
await activityTracker.rebuildActivities();
}
const userDeps: ProcessUserDeps = {userRepository, emailService, activityTracker, deletionEligibilityService};
let lastUserId: UserID | undefined;
let processedUsers = 0;
while (true) {
const users = await userRepository.listAllUsersPaginated(BATCH_SIZE, lastUserId);
if (users.length === 0) {
break;
}
for (const user of users) {
try {
await processUser(user, userDeps, result);
} catch (userError) {
Logger.error({error: userError, userId: user.id}, 'Failed to process inactive user');
result.errors++;
}
}
processedUsers += users.length;
lastUserId = users[users.length - 1]!.id;
if (processedUsers % 1000 === 0) {
Logger.debug(
{processedUsers, warningsSent: result.warningsSent, deletionsScheduled: result.deletionsScheduled},
'Inactivity deletion progress',
);
}
}
Logger.info(
{
processedUsers,
warningsSent: result.warningsSent,
deletionsScheduled: result.deletionsScheduled,
errors: result.errors,
},
'Completed inactivity deletion processing',
);
return result;
}
const processInactivityDeletions: WorkerTaskHandler = async (_payload, helpers) => {
helpers.logger.debug('Processing processInactivityDeletions task');
const {kvClient, userRepository, emailService, activityTracker, deletionEligibilityService} = getWorkerDependencies();
await processInactivityDeletionsCore({
kvClient,
userRepository,
emailService,
activityTracker,
deletionEligibilityService,
});
};
export default processInactivityDeletions;

View File

@@ -0,0 +1,84 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createUserID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
const processPendingBulkMessageDeletions: WorkerTaskHandler = async (_payload, helpers) => {
helpers.logger.debug('Processing pending bulk message deletions');
const {bulkMessageDeletionQueueService, userRepository, workerService} = getWorkerDependencies();
const nowMs = Date.now();
const pendingDeletions = await bulkMessageDeletionQueueService.getReadyDeletions(nowMs, 100);
Logger.debug({count: pendingDeletions.length}, 'Pending bulk message deletions found');
for (const deletion of pendingDeletions) {
try {
const userId = createUserID(deletion.userId);
const user = await userRepository.findUnique(userId);
if (!user) {
await bulkMessageDeletionQueueService.removeFromQueue(userId);
continue;
}
if (!user.pendingBulkMessageDeletionAt) {
await bulkMessageDeletionQueueService.removeFromQueue(userId);
continue;
}
if (user.pendingBulkMessageDeletionAt.getTime() > nowMs) {
Logger.debug(
{
userId: userId.toString(),
scheduledAt: user.pendingBulkMessageDeletionAt.getTime(),
},
'Requeueing pending bulk message deletion that is not due yet',
);
await bulkMessageDeletionQueueService.scheduleDeletion(userId, user.pendingBulkMessageDeletionAt);
continue;
}
await workerService.addJob(
'bulkDeleteUserMessages',
{
userId: userId.toString(),
scheduledAt: user.pendingBulkMessageDeletionAt.getTime(),
},
{maxAttempts: 5},
);
Logger.debug(
{
userId: userId.toString(),
scheduledAt: user.pendingBulkMessageDeletionAt.getTime(),
},
'Queued worker job for pending bulk message deletion',
);
await bulkMessageDeletionQueueService.removeFromQueue(userId);
} catch (error) {
Logger.error({error, userId: deletion.userId.toString()}, 'Failed to process pending bulk message deletion');
}
}
};
export default processPendingBulkMessageDeletions;

View File

@@ -0,0 +1,305 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {GuildID, ReportID, UserID} from '@fluxer/api/src/BrandedTypes';
import {createGuildID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import type {User} from '@fluxer/api/src/models/User';
import {
getAuditLogSearchService,
getGuildMemberSearchService,
getGuildSearchService,
getMessageSearchService,
getReportSearchService,
getUserSearchService,
} from '@fluxer/api/src/SearchFactory';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {IKVProvider} from '@fluxer/kv_client/src/IKVProvider';
import type {WorkerTaskHandler, WorkerTaskHelpers} from '@fluxer/worker/src/contracts/WorkerTask';
import {seconds} from 'itty-time';
import {z} from 'zod';
const INDEX_TYPES = ['guilds', 'users', 'reports', 'audit_logs', 'channel_messages', 'guild_members'] as const;
type IndexType = (typeof INDEX_TYPES)[number];
const PayloadSchema = z
.object({
index_type: z.enum(INDEX_TYPES),
job_id: z.string(),
admin_user_id: z.string(),
guild_id: z.string().optional(),
user_id: z.string().optional(),
})
.refine(
(data) => {
if (data.index_type === 'channel_messages' || data.index_type === 'guild_members') {
return data.guild_id !== undefined;
}
return true;
},
{message: 'guild_id is required for the channel_messages and guild_members index type'},
);
type RefreshSearchIndexPayload = z.infer<typeof PayloadSchema>;
const BATCH_SIZE = 100;
const PROGRESS_TTL = seconds('1 hour');
function requireSearchService<T>(service: T | null): T {
if (!service) {
throw new Error('Search is not enabled');
}
return service;
}
async function setProgress(kvClient: IKVProvider, progressKey: string, data: Record<string, unknown>): Promise<void> {
await kvClient.set(progressKey, JSON.stringify(data), 'EX', PROGRESS_TTL);
}
async function reportInProgress(
kvClient: IKVProvider,
progressKey: string,
indexType: IndexType,
indexed: number,
): Promise<void> {
await setProgress(kvClient, progressKey, {
status: 'in_progress',
index_type: indexType,
total: indexed,
indexed,
started_at: new Date().toISOString(),
});
}
interface PaginateAndIndexOptions<TCursor, TItem> {
fetchPage: (cursor: TCursor | undefined) => Promise<Array<TItem>>;
indexBatch: (items: Array<TItem>) => Promise<void>;
getCursor: (item: TItem) => TCursor;
label: string;
kvClient: IKVProvider;
progressKey: string;
indexType: IndexType;
}
async function paginateAndIndex<TCursor, TItem>(options: PaginateAndIndexOptions<TCursor, TItem>): Promise<number> {
let cursor: TCursor | undefined;
let indexedCount = 0;
let hasMore = true;
while (hasMore) {
const items = await options.fetchPage(cursor);
if (items.length > 0) {
await options.indexBatch(items);
indexedCount += items.length;
cursor = options.getCursor(items[items.length - 1]!);
await reportInProgress(options.kvClient, options.progressKey, options.indexType, indexedCount);
Logger.debug({count: items.length, total: indexedCount}, `Indexed ${options.label} batch`);
}
hasMore = items.length === BATCH_SIZE;
}
Logger.debug({count: indexedCount}, `Refreshed ${options.label} search index`);
return indexedCount;
}
type IndexHandler = (
payload: RefreshSearchIndexPayload,
helpers: WorkerTaskHelpers,
kvClient: IKVProvider,
progressKey: string,
) => Promise<number>;
const refreshGuilds: IndexHandler = async (_payload, _helpers, kvClient, progressKey) => {
const {guildRepository} = getWorkerDependencies();
const searchService = requireSearchService(getGuildSearchService());
await searchService.deleteAllDocuments();
return paginateAndIndex({
fetchPage: (cursor?: GuildID) => guildRepository.listAllGuildsPaginated(BATCH_SIZE, cursor),
indexBatch: (guilds) => searchService.indexGuilds(guilds),
getCursor: (guild) => guild.id,
label: 'guild',
kvClient,
progressKey,
indexType: 'guilds',
});
};
const refreshUsers: IndexHandler = async (_payload, _helpers, kvClient, progressKey) => {
const {userRepository} = getWorkerDependencies();
const searchService = requireSearchService(getUserSearchService());
await searchService.deleteAllDocuments();
return paginateAndIndex({
fetchPage: (cursor?: UserID) => userRepository.listAllUsersPaginated(BATCH_SIZE, cursor),
indexBatch: (users) => searchService.indexUsers(users),
getCursor: (user) => user.id,
label: 'user',
kvClient,
progressKey,
indexType: 'users',
});
};
const refreshReports: IndexHandler = async (_payload, _helpers, kvClient, progressKey) => {
const {reportRepository} = getWorkerDependencies();
const searchService = requireSearchService(getReportSearchService());
await searchService.deleteAllDocuments();
return paginateAndIndex({
fetchPage: (cursor?: ReportID) => reportRepository.listAllReportsPaginated(BATCH_SIZE, cursor),
indexBatch: (reports) => searchService.indexReports(reports),
getCursor: (report) => report.reportId,
label: 'report',
kvClient,
progressKey,
indexType: 'reports',
});
};
const refreshAuditLogs: IndexHandler = async (_payload, _helpers, kvClient, progressKey) => {
const {adminRepository} = getWorkerDependencies();
const searchService = requireSearchService(getAuditLogSearchService());
await searchService.deleteAllDocuments();
return paginateAndIndex({
fetchPage: (cursor?: bigint) => adminRepository.listAllAuditLogsPaginated(BATCH_SIZE, cursor),
indexBatch: (logs) => searchService.indexAuditLogs(logs),
getCursor: (log) => log.logId,
label: 'audit log',
kvClient,
progressKey,
indexType: 'audit_logs',
});
};
const refreshChannelMessages: IndexHandler = async (payload, helpers, _kvClient, _progressKey) => {
const {channelRepository} = getWorkerDependencies();
const searchService = requireSearchService(getMessageSearchService());
const guildId = createGuildID(BigInt(payload.guild_id!));
await searchService.deleteGuildMessages(guildId);
const channels = await channelRepository.listGuildChannels(guildId);
for (const channel of channels) {
Logger.debug({channelId: channel.id.toString()}, 'Indexing channel messages');
await helpers.addJob(
'indexChannelMessages',
{channelId: channel.id.toString()},
{jobKey: `index-channel-${channel.id}-initial`, maxAttempts: 3},
);
}
Logger.debug({channels: channels.length, guildId: guildId.toString()}, 'Queued channel message indexing jobs');
return channels.length;
};
const refreshGuildMembers: IndexHandler = async (payload, _helpers, kvClient, progressKey) => {
const {guildRepository, userRepository} = getWorkerDependencies();
const searchService = requireSearchService(getGuildMemberSearchService());
const guildId = createGuildID(BigInt(payload.guild_id!));
await searchService.deleteGuildMembers(guildId);
const indexedCount = await paginateAndIndex({
fetchPage: async (cursor?: UserID) => {
const members = await guildRepository.listMembersPaginated(guildId, BATCH_SIZE, cursor);
const userIds = new Set(members.map((m) => m.userId));
const userMap = new Map<UserID, User>();
for (const uid of userIds) {
const user = await userRepository.findUnique(uid);
if (user) {
userMap.set(uid, user);
}
}
return members
.map((member) => {
const user = userMap.get(member.userId);
return user ? {member, user} : null;
})
.filter((item): item is NonNullable<typeof item> => item != null);
},
indexBatch: (membersWithUsers) => searchService.indexMembers(membersWithUsers),
getCursor: (item) => item.member.userId,
label: 'guild member',
kvClient,
progressKey,
indexType: 'guild_members',
});
const guild = await guildRepository.findUnique(guildId);
if (guild) {
await guildRepository.upsert({
...guild.toRow(),
members_indexed_at: new Date(),
});
}
return indexedCount;
};
const INDEX_HANDLERS: Record<IndexType, IndexHandler> = {
guilds: refreshGuilds,
users: refreshUsers,
reports: refreshReports,
audit_logs: refreshAuditLogs,
channel_messages: refreshChannelMessages,
guild_members: refreshGuildMembers,
};
const refreshSearchIndex: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing refreshSearchIndex task');
const {kvClient} = getWorkerDependencies();
const progressKey = `index_refresh_status:${validated.job_id}`;
await reportInProgress(kvClient, progressKey, validated.index_type, 0);
try {
const handler = INDEX_HANDLERS[validated.index_type];
const indexedCount = await handler(validated, helpers, kvClient, progressKey);
await setProgress(kvClient, progressKey, {
status: 'completed',
index_type: validated.index_type,
total: indexedCount,
indexed: indexedCount,
completed_at: new Date().toISOString(),
});
} catch (error) {
Logger.error({error, payload: validated}, 'Failed to refresh search index');
await setProgress(kvClient, progressKey, {
status: 'failed',
index_type: validated.index_type,
error: error instanceof Error ? error.message : 'Unknown error',
failed_at: new Date().toISOString(),
});
throw error;
}
};
export default refreshSearchIndex;

View File

@@ -0,0 +1,96 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createUserID} from '@fluxer/api/src/BrandedTypes';
import {mapConnectionToResponse} from '@fluxer/api/src/connection/ConnectionMappers';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
userId: z.string(),
});
const revalidateUserConnections: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing revalidateUserConnections task');
const userId = createUserID(BigInt(validated.userId));
const {connectionRepository, connectionService, gatewayService} = getWorkerDependencies();
const connections = await connectionRepository.findByUserId(userId);
const verifiedConnections = connections.filter((conn) => conn.verified);
if (verifiedConnections.length === 0) {
helpers.logger.debug({userId: userId.toString()}, 'No verified connections to revalidate');
return;
}
let hasChanges = false;
for (const connection of verifiedConnections) {
try {
const {isValid, updateParams} = await connectionService.revalidateConnection(connection);
if (updateParams) {
await connectionRepository.update(userId, connection.connection_type, connection.connection_id, updateParams);
hasChanges = true;
if (!isValid) {
Logger.info(
{
userId: userId.toString(),
connectionId: connection.connection_id,
connectionType: connection.connection_type,
},
'Connection verification failed, marked as unverified',
);
}
}
} catch (error) {
Logger.error(
{
error,
userId: userId.toString(),
connectionId: connection.connection_id,
connectionType: connection.connection_type,
},
'Failed to revalidate connection',
);
}
}
if (hasChanges) {
const updatedConnections = await connectionRepository.findByUserId(userId);
await gatewayService.dispatchPresence({
userId,
event: 'USER_CONNECTIONS_UPDATE',
data: {connections: updatedConnections.map(mapConnectionToResponse)},
});
Logger.info({userId: userId.toString()}, 'Dispatched USER_CONNECTIONS_UPDATE event');
}
helpers.logger.debug(
{userId: userId.toString(), checked: verifiedConnections.length, hasChanges},
'Completed connection revalidation',
);
};
export default revalidateUserConnections;

View File

@@ -0,0 +1,85 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {withSpan} from '@fluxer/api/src/telemetry/Tracing';
import {
ScheduledMessageExecutor,
type SendScheduledMessageParams,
} from '@fluxer/api/src/worker/executors/ScheduledMessageExecutor';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {recordCounter, recordHistogram} from '@fluxer/telemetry/src/Metrics';
import type {WorkerTaskHelpers} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
userId: z.string(),
scheduledMessageId: z.string(),
expectedScheduledAt: z.string(),
});
export async function sendScheduledMessage(payload: unknown, helpers: WorkerTaskHelpers): Promise<void> {
const validated = PayloadSchema.parse(payload) as SendScheduledMessageParams;
const start = Date.now();
try {
const result = await withSpan(
{
name: 'fluxer.message.scheduled.execute',
attributes: {
user_id: validated.userId,
scheduled_message_id: validated.scheduledMessageId,
},
},
async () => {
const deps = getWorkerDependencies();
const executor = new ScheduledMessageExecutor(deps, helpers.logger);
return await executor.execute(validated);
},
);
recordCounter({
name: 'fluxer.messages.scheduled_executed',
dimensions: {status: 'success'},
value: 1,
});
recordHistogram({
name: 'fluxer.message.scheduled_execution.latency',
valueMs: Date.now() - start,
});
return result;
} catch (error) {
recordCounter({
name: 'fluxer.messages.scheduled_executed',
dimensions: {
status: 'error',
error_type: error instanceof Error ? error.name : 'unknown',
},
value: 1,
});
recordHistogram({
name: 'fluxer.message.scheduled_execution.latency',
valueMs: Date.now() - start,
});
throw error;
}
}

View File

@@ -0,0 +1,34 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {SystemDmExecutor} from '@fluxer/api/src/worker/executors/SystemDmExecutor';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHelpers} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
job_id: z.string(),
});
export async function sendSystemDm(payload: unknown, helpers: WorkerTaskHelpers): Promise<void> {
const validated = PayloadSchema.parse(payload);
const deps = getWorkerDependencies();
const executor = new SystemDmExecutor(deps, helpers.logger);
await executor.execute(validated);
}

View File

@@ -0,0 +1,81 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import type {GuildID} from '@fluxer/api/src/BrandedTypes';
import {GuildDiscoveryRepository} from '@fluxer/api/src/guild/repositories/GuildDiscoveryRepository';
import {Logger} from '@fluxer/api/src/Logger';
import {getGuildSearchService} from '@fluxer/api/src/SearchFactory';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {DiscoveryApplicationStatus} from '@fluxer/constants/src/DiscoveryConstants';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
const BATCH_SIZE = 50;
const syncDiscoveryIndex: WorkerTaskHandler = async (_payload, helpers) => {
helpers.logger.info('Starting discovery index sync');
const guildSearchService = getGuildSearchService();
if (!guildSearchService) {
helpers.logger.warn('Search service not available, skipping discovery index sync');
return;
}
const {guildRepository, gatewayService} = getWorkerDependencies();
const discoveryRepository = new GuildDiscoveryRepository();
const approvedRows = await discoveryRepository.listByStatus(DiscoveryApplicationStatus.APPROVED, 1000);
if (approvedRows.length === 0) {
helpers.logger.info('No discoverable guilds to sync');
return;
}
const guildIds = approvedRows.map((row) => row.guild_id);
let onlineCounts = new Map<GuildID, number>();
try {
onlineCounts = await gatewayService.getDiscoveryOnlineCounts(guildIds);
} catch (error) {
Logger.warn({err: error}, 'Failed to fetch online counts from gateway, proceeding with zero counts');
}
let synced = 0;
for (let i = 0; i < guildIds.length; i += BATCH_SIZE) {
const batch = guildIds.slice(i, i + BATCH_SIZE);
for (const guildId of batch) {
const guild = await guildRepository.findUnique(guildId);
if (!guild) continue;
const discoveryRow = await discoveryRepository.findByGuildId(guildId);
if (!discoveryRow) continue;
await guildSearchService.updateGuild(guild, {
description: discoveryRow.description,
categoryId: discoveryRow.category_id,
onlineCount: onlineCounts.get(guildId) ?? 0,
});
synced++;
}
}
helpers.logger.info({synced, total: guildIds.length}, 'Discovery index sync completed');
};
export default syncDiscoveryIndex;

View File

@@ -0,0 +1,47 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createUserID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {processUserDeletion} from '@fluxer/api/src/user/services/UserDeletionService';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
import {z} from 'zod';
const PayloadSchema = z.object({
userId: z.string(),
deletionReasonCode: z.number(),
});
const userProcessPendingDeletion: WorkerTaskHandler = async (payload, helpers) => {
const validated = PayloadSchema.parse(payload);
helpers.logger.debug({payload: validated}, 'Processing userProcessPendingDeletion task');
const userId = createUserID(BigInt(validated.userId));
try {
const deps = getWorkerDependencies();
await processUserDeletion(userId, validated.deletionReasonCode, deps);
} catch (error) {
Logger.error({error, userId}, 'Failed to delete user account');
throw error;
}
};
export default userProcessPendingDeletion;

View File

@@ -0,0 +1,101 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {createUserID} from '@fluxer/api/src/BrandedTypes';
import {Logger} from '@fluxer/api/src/Logger';
import {getWorkerDependencies} from '@fluxer/api/src/worker/WorkerContext';
import {UserFlags} from '@fluxer/constants/src/UserConstants';
import type {WorkerTaskHandler} from '@fluxer/worker/src/contracts/WorkerTask';
const userProcessPendingDeletions: WorkerTaskHandler = async (_payload, helpers) => {
helpers.logger.debug('Processing userProcessPendingDeletions task');
const {userRepository, workerService, deletionQueueService} = getWorkerDependencies();
try {
Logger.debug('Processing pending user deletions from KV queue');
const needsRebuild = await deletionQueueService.needsRebuild();
if (needsRebuild) {
Logger.info('Deletion queue needs rebuild, acquiring lock');
const lockToken = await deletionQueueService.acquireRebuildLock();
if (lockToken) {
try {
await deletionQueueService.rebuildState();
await deletionQueueService.releaseRebuildLock(lockToken);
} catch (error) {
await deletionQueueService.releaseRebuildLock(lockToken);
throw error;
}
} else {
Logger.info('Another worker is rebuilding the queue, skipping this run');
return;
}
}
const nowMs = Date.now();
const pendingDeletions = await deletionQueueService.getReadyDeletions(nowMs, 1000);
Logger.debug({count: pendingDeletions.length}, 'Found users pending deletion from KV');
let scheduled = 0;
for (const deletion of pendingDeletions) {
try {
const userId = createUserID(deletion.userId);
const user = await userRepository.findUnique(userId);
if (!user || !user.pendingDeletionAt) {
Logger.warn({userId}, 'User not found or not pending deletion in Cassandra, removing from KV');
await deletionQueueService.removeFromQueue(userId);
continue;
}
if (user.isBot) {
Logger.info({userId}, 'User is a bot, skipping deletion');
continue;
}
if (user.flags & UserFlags.APP_STORE_REVIEWER) {
Logger.info({userId}, 'User is an app store reviewer, skipping deletion');
continue;
}
await workerService.addJob('userProcessPendingDeletion', {
userId: deletion.userId.toString(),
deletionReasonCode: deletion.deletionReasonCode,
});
await deletionQueueService.removeFromQueue(userId);
await userRepository.removePendingDeletion(userId, user.pendingDeletionAt);
scheduled++;
} catch (error) {
Logger.error({error, userId: deletion.userId.toString()}, 'Failed to schedule user deletion');
}
}
Logger.debug({scheduled, total: pendingDeletions.length}, 'Scheduled user deletion tasks');
} catch (error) {
Logger.error({error}, 'Failed to process pending deletions');
throw error;
}
};
export default userProcessPendingDeletions;

View File

@@ -0,0 +1,404 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {randomUUID} from 'node:crypto';
import {Config} from '@fluxer/api/src/Config';
import type {CsamScanQueueEntry, CsamScanResultMessage} from '@fluxer/api/src/csam/CsamTypes';
import {type ApiTestHarness, createApiTestHarness} from '@fluxer/api/src/test/ApiTestHarness';
import {
createPhotoDnaErrorHandler,
createPhotoDnaMatchHandler,
} from '@fluxer/api/src/test/msw/handlers/PhotoDnaHandlers';
import {server} from '@fluxer/api/src/test/msw/server';
import {clearWorkerDependencies, setWorkerDependenciesForTest} from '@fluxer/api/src/worker/WorkerContext';
import {KVCacheProvider} from '@fluxer/cache/src/providers/KVCacheProvider';
import type {WorkerTaskHandler, WorkerTaskHelpers} from '@fluxer/worker/src/contracts/WorkerTask';
import {afterAll, afterEach, beforeAll, beforeEach, describe, expect, it, type MockInstance, vi} from 'vitest';
const LOCK_KEY = 'csam:scan:consumer:lock';
const LOCK_TTL_SECONDS = 5;
const QUEUE_KEY = 'csam:scan:queue';
function createQueueEntry(overrides: Partial<CsamScanQueueEntry> = {}): CsamScanQueueEntry {
return {
requestId: randomUUID(),
hashes: [`hash-${randomUUID()}`],
...overrides,
};
}
describe('CsamScanConsumerWorker', () => {
let harness: ApiTestHarness;
let cacheService: KVCacheProvider;
let csamScanConsumer: WorkerTaskHandler;
let mockHelpers: WorkerTaskHelpers;
let publishSpy: MockInstance;
beforeAll(async () => {
harness = await createApiTestHarness();
});
afterAll(async () => {
await harness?.shutdown();
});
beforeEach(async () => {
await harness.reset();
vi.clearAllMocks();
cacheService = new KVCacheProvider({client: harness.kvProvider});
setWorkerDependenciesForTest({
kvClient: harness.kvProvider,
cacheService,
});
publishSpy = vi.spyOn(harness.kvProvider, 'publish');
mockHelpers = {
logger: {
trace: vi.fn(),
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
child: () => mockHelpers.logger,
},
addJob: vi.fn(async () => {}),
};
const module = await import('@fluxer/api/src/worker/tasks/CsamScanConsumerWorker');
csamScanConsumer = module.default;
});
afterEach(() => {
clearWorkerDependencies();
vi.clearAllMocks();
});
describe('lock acquisition', () => {
it('acquires lock before processing with correct key and TTL', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const acquireLockSpy = vi.spyOn(cacheService, 'acquireLock');
await csamScanConsumer({}, mockHelpers);
expect(acquireLockSpy).toHaveBeenCalledTimes(1);
expect(acquireLockSpy).toHaveBeenCalledWith(LOCK_KEY, LOCK_TTL_SECONDS);
});
it('processes queue entries only when lock is acquired', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = [createQueueEntry(), createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(2);
});
});
describe('lock not acquired', () => {
it('skips processing when lock cannot be acquired', async () => {
const entries = [createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
const lockToken = await cacheService.acquireLock(LOCK_KEY, 60);
expect(lockToken).not.toBeNull();
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).not.toHaveBeenCalled();
const queueLength = await harness.kvProvider.llen(QUEUE_KEY);
expect(queueLength).toBe(1);
await cacheService.releaseLock(LOCK_KEY, lockToken!);
});
});
describe('batch processing', () => {
it('processes multiple queue entries in a single batch', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = [
createQueueEntry({requestId: 'request-1'}),
createQueueEntry({requestId: 'request-2'}),
createQueueEntry({requestId: 'request-3'}),
];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(3);
const publishedChannels = publishSpy.mock.calls.map((call) => call[0]);
expect(publishedChannels).toContain('csam:result:request-1');
expect(publishedChannels).toContain('csam:result:request-2');
expect(publishedChannels).toContain('csam:result:request-3');
});
it('respects batch size limit of 5 entries', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = Array.from({length: 10}, (_, i) => createQueueEntry({requestId: `request-${i}`}));
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(5);
const remainingQueueLength = await harness.kvProvider.llen(QUEUE_KEY);
expect(remainingQueueLength).toBe(5);
});
});
describe('result publishing', () => {
it('publishes results to correct channels with format csam:result:{requestId}', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = [createQueueEntry({requestId: 'test-request-123'})];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(1);
expect(publishSpy).toHaveBeenCalledWith('csam:result:test-request-123', expect.any(String));
});
it('publishes result message with correct format', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = [createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(1);
const publishedMessage = publishSpy.mock.calls[0]![1];
const result = JSON.parse(publishedMessage) as CsamScanResultMessage;
expect(result).toHaveProperty('isMatch');
expect(typeof result.isMatch).toBe('boolean');
});
});
describe('empty queue handling', () => {
it('handles empty queue gracefully without errors', async () => {
const releaseLockSpy = vi.spyOn(cacheService, 'releaseLock');
await expect(csamScanConsumer({}, mockHelpers)).resolves.not.toThrow();
expect(publishSpy).not.toHaveBeenCalled();
expect(releaseLockSpy).toHaveBeenCalledTimes(1);
});
});
describe('PhotoDNA disabled', () => {
it('publishes isMatch: false for all requests when PhotoDNA is disabled', async () => {
const configSpy = vi.spyOn(Config.photoDna, 'enabled', 'get').mockReturnValue(false);
const entries = [
createQueueEntry({requestId: 'disabled-request-1'}),
createQueueEntry({requestId: 'disabled-request-2'}),
];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(2);
for (const call of publishSpy.mock.calls) {
const result = JSON.parse(call[1]) as CsamScanResultMessage;
expect(result.isMatch).toBe(false);
}
configSpy.mockRestore();
});
});
describe('PhotoDNA API errors', () => {
it('publishes isMatch: false when PhotoDNA API returns HTTP error', async () => {
server.use(createPhotoDnaErrorHandler(500, {error: 'Internal server error'}));
const entries = [
createQueueEntry({requestId: 'error-request-1'}),
createQueueEntry({requestId: 'error-request-2'}),
];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
const releaseLockSpy = vi.spyOn(cacheService, 'releaseLock');
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(2);
for (const call of publishSpy.mock.calls) {
const result = JSON.parse(call[1]) as CsamScanResultMessage;
expect(result.isMatch).toBe(false);
expect(result.error).toBeUndefined();
}
expect(releaseLockSpy).toHaveBeenCalledTimes(1);
});
});
describe('lock release', () => {
it('releases lock after successful processing', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = [createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
const releaseLockSpy = vi.spyOn(cacheService, 'releaseLock');
await csamScanConsumer({}, mockHelpers);
expect(releaseLockSpy).toHaveBeenCalledTimes(1);
expect(releaseLockSpy).toHaveBeenCalledWith(LOCK_KEY, expect.any(String));
});
it('releases lock after empty queue processing', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const releaseLockSpy = vi.spyOn(cacheService, 'releaseLock');
await csamScanConsumer({}, mockHelpers);
expect(releaseLockSpy).toHaveBeenCalledTimes(1);
});
it('releases lock even when PhotoDNA API returns HTTP error', async () => {
server.use(createPhotoDnaErrorHandler(500, {error: 'Internal server error'}));
const entries = [createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
const releaseLockSpy = vi.spyOn(cacheService, 'releaseLock');
await csamScanConsumer({}, mockHelpers);
expect(releaseLockSpy).toHaveBeenCalledTimes(1);
});
it('releases lock when PhotoDNA is disabled', async () => {
const configSpy = vi.spyOn(Config.photoDna, 'enabled', 'get').mockReturnValue(false);
const entries = [createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
const releaseLockSpy = vi.spyOn(cacheService, 'releaseLock');
await csamScanConsumer({}, mockHelpers);
expect(releaseLockSpy).toHaveBeenCalledTimes(1);
configSpy.mockRestore();
});
});
describe('match detection', () => {
it('publishes isMatch: true with matchResult when PhotoDNA detects match', async () => {
server.use(
createPhotoDnaMatchHandler({
isMatch: true,
matchId: 'test-match-id',
source: 'test-database',
violations: ['CSAM'],
matchDistance: 0.01,
}),
);
const entries = [createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(1);
const publishedMessage = publishSpy.mock.calls[0]![1];
const result = JSON.parse(publishedMessage) as CsamScanResultMessage;
expect(result.isMatch).toBe(true);
expect(result.matchResult).toBeDefined();
expect(result.matchResult!.isMatch).toBe(true);
});
it('publishes isMatch: false without matchResult when PhotoDNA finds no match', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = [createQueueEntry()];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(1);
const publishedMessage = publishSpy.mock.calls[0]![1];
const result = JSON.parse(publishedMessage) as CsamScanResultMessage;
expect(result.isMatch).toBe(false);
expect(result.matchResult).toBeUndefined();
});
});
describe('entries with empty hashes', () => {
it('publishes isMatch: false when entries have no hashes', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
const entries = [createQueueEntry({hashes: []}), createQueueEntry({hashes: []})];
for (const entry of entries) {
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(entry));
}
await csamScanConsumer({}, mockHelpers);
expect(publishSpy).toHaveBeenCalledTimes(2);
for (const call of publishSpy.mock.calls) {
const result = JSON.parse(call[1]) as CsamScanResultMessage;
expect(result.isMatch).toBe(false);
}
});
});
describe('invalid queue entry handling', () => {
it('continues processing after encountering invalid JSON in queue', async () => {
server.use(createPhotoDnaMatchHandler({isMatch: false}));
await harness.kvProvider.rpush(QUEUE_KEY, 'invalid-json-{{{');
const validEntry = createQueueEntry({requestId: 'valid-request'});
await harness.kvProvider.rpush(QUEUE_KEY, JSON.stringify(validEntry));
await expect(csamScanConsumer({}, mockHelpers)).resolves.not.toThrow();
expect(publishSpy).toHaveBeenCalledTimes(1);
expect(publishSpy).toHaveBeenCalledWith('csam:result:valid-request', expect.any(String));
});
});
});

View File

@@ -0,0 +1,92 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {type ChannelID, createChannelID, type MessageID} from '@fluxer/api/src/BrandedTypes';
import type {IChannelRepository} from '@fluxer/api/src/channel/IChannelRepository';
import type {IGatewayService} from '@fluxer/api/src/infrastructure/IGatewayService';
export function chunkArray<T>(items: Array<T>, chunkSize: number): Array<Array<T>> {
const chunks: Array<Array<T>> = [];
for (let i = 0; i < items.length; i += chunkSize) {
chunks.push(items.slice(i, i + chunkSize));
}
return chunks;
}
interface BulkDeleteDispatcherDeps {
channelRepository: IChannelRepository;
gatewayService: IGatewayService;
batchSize: number;
}
export function createBulkDeleteDispatcher({channelRepository, gatewayService, batchSize}: BulkDeleteDispatcherDeps) {
const messagesByChannel = new Map<string, Array<MessageID>>();
const track = (channelId: ChannelID, messageId: MessageID) => {
const channelIdStr = channelId.toString();
if (!messagesByChannel.has(channelIdStr)) {
messagesByChannel.set(channelIdStr, []);
}
messagesByChannel.get(channelIdStr)!.push(messageId);
};
const flush = async (force: boolean) => {
for (const [channelIdStr, messageIdsBatch] of messagesByChannel.entries()) {
if (!force && messageIdsBatch.length < batchSize) {
continue;
}
if (messageIdsBatch.length === 0) {
continue;
}
const channelId = createChannelID(BigInt(channelIdStr));
const channel = await channelRepository.findUnique(channelId);
if (channel) {
const payloadIds = messageIdsBatch.map((id) => id.toString());
if (channel.guildId) {
await gatewayService.dispatchGuild({
guildId: channel.guildId,
event: 'MESSAGE_DELETE_BULK',
data: {
channel_id: channelIdStr,
ids: payloadIds,
},
});
} else {
for (const recipientId of channel.recipientIds) {
await gatewayService.dispatchPresence({
userId: recipientId,
event: 'MESSAGE_DELETE_BULK',
data: {
channel_id: channelIdStr,
ids: payloadIds,
},
});
}
}
}
messagesByChannel.set(channelIdStr, []);
}
};
return {track, flush};
}

View File

@@ -0,0 +1,90 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
import {S3ServiceException} from '@aws-sdk/client-s3';
import {Config} from '@fluxer/api/src/Config';
import type {IStorageService} from '@fluxer/api/src/infrastructure/IStorageService';
import {Logger} from '@fluxer/api/src/Logger';
import type archiver from 'archiver';
let _cdnBucket: string | null = null;
function getCdnBucket(): string {
if (!_cdnBucket) {
_cdnBucket = Config.s3.buckets.cdn;
}
return _cdnBucket;
}
function stripAnimationPrefix(hash: string): string {
return hash.startsWith('a_') ? hash.slice(2) : hash;
}
export function buildHashedAssetKey(prefix: string, entityId: string, hash: string): string {
return `${prefix}/${entityId}/${stripAnimationPrefix(hash)}`;
}
export function buildSimpleAssetKey(prefix: string, key: string): string {
return `${prefix}/${key}`;
}
export function getAnimatedAssetExtension(hash: string): 'gif' | 'png' {
return hash.startsWith('a_') ? 'gif' : 'png';
}
export function getEmojiExtension(animated: boolean): 'gif' | 'webp' {
return animated ? 'gif' : 'webp';
}
async function readCdnAssetIfExists(storageService: IStorageService, key: string): Promise<Buffer | null> {
try {
const data = await storageService.readObject(getCdnBucket(), key);
return Buffer.from(data);
} catch (error) {
if (error instanceof S3ServiceException && error.name === 'NoSuchKey') {
return null;
}
throw error;
}
}
export interface AppendAssetToArchiveParams {
archive: archiver.Archiver;
storageService: IStorageService;
storageKey: string;
archiveName: string;
label: string;
subjectId: string;
}
export async function appendAssetToArchive({
archive,
storageService,
storageKey,
archiveName,
label,
subjectId,
}: AppendAssetToArchiveParams): Promise<void> {
const buffer = await readCdnAssetIfExists(storageService, storageKey);
if (!buffer) {
Logger.warn({subjectId, storageKey}, `Skipping missing ${label}`);
return;
}
archive.append(buffer, {name: archiveName});
}