refactor progress

This commit is contained in:
Hampus Kraft
2026-02-17 12:22:36 +00:00
parent cb31608523
commit d5abd1a7e4
8257 changed files with 1190207 additions and 761040 deletions

View File

@@ -10,17 +10,17 @@ echo "Waiting for Cassandra to start..."
sleep 30
# 2. Extract backup and apply schema
docker exec cass bash -c 'apt-get update -qq && apt-get install -y -qq age'
docker exec cass sh -c 'apt-get update -qq && apt-get install -y -qq age'
docker cp ~/Downloads/backup.tar.age cass:/tmp/
docker cp ~/Downloads/key.txt cass:/tmp/
docker exec cass bash -c 'age -d -i /tmp/key.txt /tmp/backup.tar.age | tar -C /tmp -xf -'
docker exec cass bash -c 'sed "/^WARNING:/d" /tmp/cassandra-backup-*/schema.cql | cqlsh'
docker exec cass sh -c 'age -d -i /tmp/key.txt /tmp/backup.tar.age | tar -C /tmp -xf -'
docker exec cass sh -c 'sed "/^WARNING:/d" /tmp/cassandra-backup-*/schema.cql | cqlsh'
# 3. Copy backup to volume and stop Cassandra
docker exec cass bash -c 'cp -r /tmp/cassandra-backup-* /var/lib/cassandra/'
docker exec cass sh -c 'cp -r /tmp/cassandra-backup-* /var/lib/cassandra/'
docker stop cass
docker run -d --name cass-util -v cassandra_data:/var/lib/cassandra --entrypoint sleep cassandra:5.0 infinity
docker exec cass-util bash -c '
docker exec cass-util sh -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
DATA_DIR=/var/lib/cassandra/data
for keyspace_dir in "$BACKUP_DIR"/*/; do
@@ -46,7 +46,7 @@ docker start cass
sleep 30
# 5. Run nodetool refresh on all tables
docker exec cass bash -c '
docker exec cass sh -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
for keyspace_dir in "$BACKUP_DIR"/*/; do
keyspace=$(basename "$keyspace_dir")
@@ -89,16 +89,16 @@ docker cp "/tmp/${BACKUP_NAME}" ${CASSANDRA_CONTAINER}:/tmp/
docker cp /etc/cassandra/age_private_key.txt ${CASSANDRA_CONTAINER}:/tmp/key.txt
# 3. Stop Cassandra and prepare
docker exec ${CASSANDRA_CONTAINER} bash -c 'apt-get update -qq && apt-get install -y -qq age'
docker exec ${CASSANDRA_CONTAINER} sh -c 'apt-get update -qq && apt-get install -y -qq age'
docker stop ${CASSANDRA_CONTAINER}
# 4. Extract backup in utility container
docker run -d --name cass-restore-util --volumes-from ${CASSANDRA_CONTAINER} --entrypoint sleep cassandra:5.0 infinity
docker exec cass-restore-util bash -c 'age -d -i /tmp/key.txt /tmp/${BACKUP_NAME} | tar -C /tmp -xf -'
docker exec cass-restore-util bash -c 'cp -r /tmp/cassandra-backup-* /var/lib/cassandra/'
docker exec cass-restore-util sh -c 'age -d -i /tmp/key.txt /tmp/${BACKUP_NAME} | tar -C /tmp -xf -'
docker exec cass-restore-util sh -c 'cp -r /tmp/cassandra-backup-* /var/lib/cassandra/'
# 5. Copy SSTable files to existing schema directories
docker exec cass-restore-util bash -c '
docker exec cass-restore-util sh -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
DATA_DIR=/var/lib/cassandra/data
for keyspace_dir in "$BACKUP_DIR"/*/; do
@@ -124,7 +124,7 @@ docker start ${CASSANDRA_CONTAINER}
sleep 30
# 7. Run nodetool refresh
docker exec ${CASSANDRA_CONTAINER} bash -c '
docker exec ${CASSANDRA_CONTAINER} sh -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
for keyspace_dir in "$BACKUP_DIR"/*/; do
keyspace=$(basename "$keyspace_dir")

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env sh
# Copyright (C) 2026 Fluxer Contributors
#
@@ -17,7 +17,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -euo pipefail
set -eu
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
BACKUP_NAME="cassandra-backup-${TIMESTAMP}"
@@ -50,9 +50,9 @@ echo "[$(date)] Collecting snapshot files"
mkdir -p "${TEMP_DIR}"
# Find all snapshot directories and copy to temp location
find "${DATA_DIR}" -type d -name "${SNAPSHOT_TAG}" | while read snapshot_dir; do
find "${DATA_DIR}" -type d -name "${SNAPSHOT_TAG}" | while IFS= read -r snapshot_dir; do
# Get relative path from data dir
rel_path=$(dirname $(echo "${snapshot_dir}" | sed "s|${DATA_DIR}/||"))
rel_path=$(dirname "${snapshot_dir#$DATA_DIR/}")
target_dir="${TEMP_DIR}/${rel_path}"
mkdir -p "${target_dir}"
cp -r "${snapshot_dir}" "${target_dir}/"
@@ -89,7 +89,7 @@ fi
# Step 4: Create tar archive and encrypt with age (streaming)
echo "[$(date)] Encrypting backup with age..."
if ! tar -C /tmp -cf - "${BACKUP_NAME}" | \
age -r "$(cat ${AGE_PUBLIC_KEY_FILE})" -o "/tmp/${ENCRYPTED_BACKUP}"; then
age -r "$(cat "${AGE_PUBLIC_KEY_FILE}")" -o "/tmp/${ENCRYPTED_BACKUP}"; then
echo "[$(date)] Error: Encryption failed"
rm -rf "${TEMP_DIR}"
nodetool -h "${CASSANDRA_HOST}" clearsnapshot -t "${SNAPSHOT_TAG}"
@@ -130,11 +130,11 @@ aws s3 ls "s3://${B2_BUCKET_NAME}/" --endpoint-url="${B2_ENDPOINT_URL}" | \
awk '{print $4}' | \
sort -r | \
tail -n +$((MAX_BACKUP_COUNT + 1)) | \
while read -r old_backup; do
while IFS= read -r old_backup; do
echo "[$(date)] Deleting old backup: ${old_backup}"
aws s3 rm "s3://${B2_BUCKET_NAME}/${old_backup}" --endpoint-url="${B2_ENDPOINT_URL}" || true
done
echo "[$(date)] Backup process completed successfully"
echo "[$(date)] Backup name: ${ENCRYPTED_BACKUP}"
echo "[$(date)] Backup size: ${BACKUP_SIZE}"
echo "[$(date)] Backup size: ${BACKUP_SIZE}"

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.users ADD traits set<text>;

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.user_settings ADD bot_default_guilds_restricted boolean;
ALTER TABLE fluxer.user_settings ADD bot_restricted_guilds set<bigint>;

View File

@@ -0,0 +1,20 @@
CREATE TABLE IF NOT EXISTS fluxer.system_dm_jobs (
job_type text,
job_id bigint,
admin_user_id bigint,
status text,
content text,
registration_start timestamp,
registration_end timestamp,
excluded_guild_ids set<text>,
target_count int,
sent_count int,
failed_count int,
last_error text,
worker_job_key text,
created_at timestamp,
updated_at timestamp,
approved_by bigint,
approved_at timestamp,
PRIMARY KEY ((job_type), job_id)
) WITH CLUSTERING ORDER BY (job_id DESC);

View File

@@ -0,0 +1,22 @@
CREATE TABLE IF NOT EXISTS fluxer.admin_api_keys (
key_id bigint,
key_hash text,
name text,
created_by_user_id bigint,
created_at timestamp,
last_used_at timestamp,
expires_at timestamp,
version int,
PRIMARY KEY (key_id)
);
CREATE TABLE IF NOT EXISTS fluxer.admin_api_keys_by_creator (
created_by_user_id bigint,
key_id bigint,
created_at timestamp,
name text,
expires_at timestamp,
last_used_at timestamp,
version int,
PRIMARY KEY (created_by_user_id, key_id)
) WITH CLUSTERING ORDER BY (key_id DESC);

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.admin_api_keys ADD acls set<text>;
ALTER TABLE fluxer.admin_api_keys_by_creator ADD acls set<text>;

View File

@@ -0,0 +1,9 @@
CREATE TABLE IF NOT EXISTS fluxer.relationships_by_target (
target_user_id bigint,
source_user_id bigint,
type int,
nickname text,
since timestamp,
version int,
PRIMARY KEY (target_user_id, source_user_id, type)
) WITH CLUSTERING ORDER BY (source_user_id ASC, type ASC);

View File

@@ -0,0 +1,6 @@
CREATE TABLE IF NOT EXISTS fluxer.messages_by_author_id_v2 (
author_id bigint,
message_id bigint,
channel_id bigint,
PRIMARY KEY ((author_id), message_id)
) WITH CLUSTERING ORDER BY (message_id DESC);

View File

@@ -0,0 +1,28 @@
CREATE TABLE IF NOT EXISTS fluxer.csam_evidence_packages (
report_id bigint PRIMARY KEY,
resource_type text,
bucket text,
key text,
cdn_url text,
filename text,
content_type text,
channel_id bigint,
message_id bigint,
guild_id bigint,
user_id bigint,
match_tracking_id text,
match_details text,
frames text,
hashes text,
context_snapshot text,
created_at timestamp,
expires_at timestamp,
integrity_sha256 text,
evidence_zip_key text
);
CREATE TABLE IF NOT EXISTS fluxer.csam_evidence_legal_holds (
report_id bigint PRIMARY KEY,
held_until timestamp,
created_at timestamp
);

View File

@@ -0,0 +1,21 @@
CREATE TABLE IF NOT EXISTS fluxer.csam_scan_jobs (
job_id text PRIMARY KEY,
resource_type text,
bucket text,
key text,
cdn_url text,
filename text,
content_type text,
channel_id bigint,
message_id bigint,
guild_id bigint,
user_id bigint,
status text,
enqueue_time timestamp,
last_updated timestamp,
match_tracking_id text,
match_details text,
hashes text,
error_message text,
expires_at timestamp
);

View File

@@ -0,0 +1,6 @@
CREATE TABLE IF NOT EXISTS fluxer.csam_evidence_expirations (
bucket text,
expires_at timestamp,
report_id bigint,
PRIMARY KEY ((bucket), expires_at, report_id)
) WITH CLUSTERING ORDER BY (expires_at ASC);

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.guild_stickers ADD animated boolean;
ALTER TABLE fluxer.guild_stickers_by_sticker_id ADD animated boolean;

View File

@@ -0,0 +1 @@
ALTER TYPE fluxer.message_sticker_item ADD animated boolean;

View File

@@ -0,0 +1,10 @@
CREATE TABLE IF NOT EXISTS fluxer.ncmec_submissions (
report_id bigint PRIMARY KEY,
status text,
ncmec_report_id text,
submitted_at timestamp,
submitted_by_admin_id bigint,
failure_reason text,
created_at timestamp,
updated_at timestamp
);

View File

@@ -0,0 +1,2 @@
ALTER TYPE fluxer.message_attachment ADD duration_secs int;
ALTER TYPE fluxer.message_attachment ADD waveform text;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.favorite_memes ADD klipy_slug text;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.user_settings ADD trusted_domains set<text>;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.user_settings ADD default_hide_muted_channels boolean;

View File

@@ -0,0 +1,2 @@
ALTER TYPE fluxer.guild_folder ADD flags int;
ALTER TYPE fluxer.guild_folder ADD icon text;

View File

@@ -0,0 +1,17 @@
CREATE TYPE IF NOT EXISTS fluxer.message_embed_child (
type text,
title text,
description text,
url text,
timestamp timestamp,
color int,
author frozen<message_embed_author>,
provider frozen<message_embed_provider>,
thumbnail frozen<message_embed_media>,
image frozen<message_embed_media>,
video frozen<message_embed_media>,
footer frozen<message_embed_footer>,
fields frozen<list<message_embed_field>>,
nsfw boolean
);
ALTER TYPE fluxer.message_embed ADD children frozen<list<message_embed_child>>;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.guilds ADD message_history_cutoff timestamp;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.guilds ADD members_indexed_at timestamp;

View File

@@ -0,0 +1,16 @@
CREATE TABLE IF NOT EXISTS fluxer.user_connections (
user_id bigint,
connection_type text,
connection_id text,
identifier text,
name text,
verified boolean,
visibility_flags int,
sort_order int,
verification_token text,
verified_at timestamp,
last_verified_at timestamp,
created_at timestamp,
version int,
PRIMARY KEY ((user_id), connection_type, connection_id)
) WITH CLUSTERING ORDER BY (connection_type ASC, connection_id DESC);

View File

@@ -0,0 +1,46 @@
CREATE TABLE IF NOT EXISTS fluxer.donors (
email text,
stripe_customer_id text,
business_name text,
tax_id text,
tax_id_type text,
stripe_subscription_id text,
subscription_amount_cents int,
subscription_currency text,
subscription_interval text,
subscription_current_period_end timestamp,
subscription_cancel_at timestamp,
created_at timestamp,
updated_at timestamp,
version int,
PRIMARY KEY ((email))
);
CREATE TABLE IF NOT EXISTS fluxer.donors_by_stripe_customer_id (
stripe_customer_id text,
email text,
PRIMARY KEY ((stripe_customer_id), email)
);
CREATE TABLE IF NOT EXISTS fluxer.donors_by_stripe_subscription_id (
stripe_subscription_id text,
email text,
PRIMARY KEY ((stripe_subscription_id), email)
);
CREATE TABLE IF NOT EXISTS fluxer.donor_magic_link_tokens (
token_ text,
donor_email text,
expires_at timestamp,
used_at timestamp,
PRIMARY KEY ((token_))
) WITH default_time_to_live = 900;
CREATE TABLE IF NOT EXISTS fluxer.donor_magic_link_tokens_by_email (
donor_email text,
token_ text,
PRIMARY KEY ((donor_email), token_)
) WITH default_time_to_live = 900;

View File

@@ -0,0 +1,12 @@
CREATE TABLE IF NOT EXISTS fluxer.password_change_tickets (
ticket text PRIMARY KEY,
user_id bigint,
code text,
code_sent_at timestamp,
code_expires_at timestamp,
verified boolean,
verification_proof text,
status text,
created_at timestamp,
updated_at timestamp
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.applications ADD bot_require_code_grant boolean;

View File

@@ -0,0 +1,21 @@
CREATE TABLE IF NOT EXISTS fluxer.guild_discovery (
guild_id bigint,
status text,
category_id int,
description text,
applied_at timestamp,
reviewed_at timestamp,
reviewed_by bigint,
review_reason text,
removed_at timestamp,
removed_by bigint,
removal_reason text,
PRIMARY KEY ((guild_id))
);
CREATE TABLE IF NOT EXISTS fluxer.guild_discovery_by_status (
status text,
applied_at timestamp,
guild_id bigint,
PRIMARY KEY ((status), applied_at, guild_id)
) WITH CLUSTERING ORDER BY (applied_at DESC, guild_id DESC);

View File

@@ -1,52 +0,0 @@
services:
clickhouse:
image: clickhouse/clickhouse-server:24.8
hostname: clickhouse
env_file:
- /etc/fluxer/clickhouse.env
volumes:
- clickhouse_data:/var/lib/clickhouse
- clickhouse_logs:/var/log/clickhouse-server
- ./conf/config.xml:/etc/clickhouse-server/config.d/custom.xml:ro
- ./conf/users.xml:/etc/clickhouse-server/users.d/custom.xml:ro
networks:
- fluxer-shared
ports:
- target: 8123
published: 8123
protocol: tcp
mode: host
- target: 9000
published: 9000
protocol: tcp
mode: host
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '4'
memory: 16G
healthcheck:
test: ['CMD-SHELL', 'wget -qO- http://127.0.0.1:8123/ping || exit 1']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
ulimits:
nofile:
soft: 262144
hard: 262144
networks:
fluxer-shared:
external: true
volumes:
clickhouse_data:
driver: local
clickhouse_logs:
driver: local

View File

@@ -1,20 +0,0 @@
<?xml version="1.0"?>
<clickhouse>
<logger>
<level>information</level>
<console>true</console>
</logger>
<max_concurrent_queries>100</max_concurrent_queries>
<max_connections>4096</max_connections>
<mark_cache_size>5368709120</mark_cache_size>
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
<background_pool_size>16</background_pool_size>
<background_schedule_pool_size>16</background_schedule_pool_size>
</clickhouse>

View File

@@ -1,14 +0,0 @@
<?xml version="1.0"?>
<clickhouse>
<users>
<fluxer>
<password from_env="CLICKHOUSE_PASSWORD"/>
<networks>
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
<access_management>1</access_management>
</fluxer>
</users>
</clickhouse>

View File

@@ -0,0 +1,86 @@
services:
ghost-blog-mysql:
image: mysql:8.0
hostname: ghost-blog-mysql
env_file:
- /etc/fluxer/ghost-blog.env
environment:
- MYSQL_DATABASE=ghost
volumes:
- ghost_blog_mysql:/var/lib/mysql
networks:
- fluxer-shared
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '2'
memory: 2G
healthcheck:
test: ['CMD', 'mysqladmin', 'ping', '-h', 'localhost']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
ghost-blog:
image: ghost:5-alpine
hostname: ghost-blog
env_file:
- /etc/fluxer/ghost-blog.env
environment:
- url=https://blog.fluxer.app
- database__client=mysql
- database__connection__host=ghost-blog-mysql
- database__connection__database=ghost
- database__pool__min=0
- database__pool__acquireTimeoutMillis=60000
- database__connection__connectTimeout=60000
volumes:
- ghost_blog_content:/var/lib/ghost/content
networks:
- fluxer-shared
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '2'
memory: 1G
labels:
- 'caddy=blog.fluxer.app'
- 'caddy.reverse_proxy={{upstreams 2368}}'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
- 'caddy.header.X-Xss-Protection="1; mode=block"'
- 'caddy.header.X-Content-Type-Options=nosniff'
- 'caddy.header.Referrer-Policy=strict-origin-when-cross-origin'
- 'caddy.header.X-Frame-Options=SAMEORIGIN'
healthcheck:
test:
[
'CMD',
'node',
'-e',
"require('http').get('http://127.0.0.1:2368/', r => process.exit(r.statusCode < 400 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 30s
timeout: 10s
retries: 5
start_period: 120s
networks:
fluxer-shared:
external: true
volumes:
ghost_blog_mysql:
driver: local
ghost_blog_content:
driver: local

View File

@@ -1,9 +0,0 @@
FROM caddy:2-builder AS builder
RUN xcaddy build \
--with github.com/abiosoft/caddy-yaml \
--with github.com/mholt/caddy-l4
FROM caddy:2
COPY --from=builder /usr/bin/caddy /usr/bin/caddy

View File

@@ -1,40 +0,0 @@
.PHONY: help build-caddy build up down restart logs clean
help:
@echo "Available commands:"
@echo " make build-caddy - Build the Caddy Docker image"
@echo " make build - Build all images (including Caddy)"
@echo " make up - Start all services"
@echo " make down - Stop all services"
@echo " make restart - Restart all services"
@echo " make logs - View logs from all services"
@echo " make logs-caddy - View Caddy logs"
@echo " make logs-livekit - View LiveKit logs"
@echo " make clean - Stop services and remove volumes"
build-caddy:
docker build -f Dockerfile.caddy -t fluxer-livekit-caddy:latest .
build: build-caddy
docker compose build
up:
docker compose up -d
down:
docker compose down
restart:
docker compose restart
logs:
docker compose logs -f
logs-caddy:
docker compose logs -f caddy
logs-livekit:
docker compose logs -f livekit
clean:
docker compose down -v

View File

@@ -1,51 +0,0 @@
logging:
logs:
default:
level: INFO
storage:
module: 'file_system'
root: '/data'
apps:
tls:
automation:
policies:
- subjects:
- '${LIVEKIT_DOMAIN}'
- '${LIVEKIT_DOMAIN_TURN}'
issuers:
- module: acme
on_demand: false
certificates:
automate:
- '${LIVEKIT_DOMAIN}'
- '${LIVEKIT_DOMAIN_TURN}'
http:
servers:
main:
listen: [':80', ':443']
routes:
- match:
- host:
- '${LIVEKIT_DOMAIN}'
handle:
- handler: reverse_proxy
upstreams:
- dial: 'livekit:7880'
layer4:
servers:
turn:
listen: [':5349']
routes:
- match:
- tls:
sni:
- '${LIVEKIT_DOMAIN_TURN}'
handle:
- handler: tls
- handler: proxy
upstreams:
- dial: ['livekit:5349']

View File

@@ -1,65 +0,0 @@
services:
valkey:
image: valkey/valkey:7.2-alpine
hostname: valkey
command: >
valkey-server
--requirepass ${REDIS_PASSWORD}
--maxmemory 3gb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
--appendonly yes
volumes:
- valkey_data:/data
restart: unless-stopped
livekit:
image: livekit/livekit-server:latest
hostname: livekit
entrypoint: /entrypoint.sh
env_file:
- .env
volumes:
- ./conf/livekit.yaml.template:/etc/livekit.yaml.template:ro
- ./entrypoint.sh:/entrypoint.sh:ro
ports:
- '7881:7881'
- '7882:7882/udp'
- '3478:3478/udp'
depends_on:
- valkey
restart: unless-stopped
deploy:
resources:
limits:
cpus: '4'
memory: 6G
reservations:
cpus: '1'
memory: 2G
caddy:
build:
context: .
dockerfile: Dockerfile.caddy
image: fluxer-livekit-caddy:latest
hostname: livekit-caddy
entrypoint: /entrypoint-caddy.sh
env_file:
- .env
volumes:
- ./caddy.yaml.template:/etc/caddy.yaml.template:ro
- ./entrypoint-caddy.sh:/entrypoint-caddy.sh:ro
- caddy_data:/data
ports:
- '443:443'
- '80:80'
depends_on:
- livekit
restart: unless-stopped
volumes:
valkey_data:
caddy_data:

View File

@@ -1,37 +0,0 @@
port: 7880
bind_addresses:
- ''
rtc:
tcp_port: 7881
udp_port: 7882
use_external_ip: false
node_ip: ${NODE_IP}
enable_loopback_candidate: false
redis:
address: valkey:6379
username: ''
password: ${REDIS_PASSWORD}
db: 0
use_tls: false
turn:
enabled: true
domain: ${LIVEKIT_DOMAIN_TURN}
tls_port: 5349
udp_port: 3478
external_tls: true
keys:
${LIVEKIT_API_KEY}: ${LIVEKIT_API_SECRET}
room:
auto_create: true
empty_timeout: 300
departure_timeout: 20
webhook:
api_key: ${LIVEKIT_API_KEY}
urls:
- ${LIVEKIT_WEBHOOK_URL}

View File

@@ -1,30 +0,0 @@
#!/bin/sh
# Copyright (C) 2026 Fluxer Contributors
#
# This file is part of Fluxer.
#
# Fluxer is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fluxer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -e
sed -e "s|\${NODE_IP}|${NODE_IP}|g" \
-e "s|\${REDIS_PASSWORD}|${REDIS_PASSWORD}|g" \
-e "s|\${LIVEKIT_API_KEY}|${LIVEKIT_API_KEY}|g" \
-e "s|\${LIVEKIT_API_SECRET}|${LIVEKIT_API_SECRET}|g" \
-e "s|\${LIVEKIT_WEBHOOK_URL}|${LIVEKIT_WEBHOOK_URL}|g" \
-e "s|\${LIVEKIT_DOMAIN_TURN}|${LIVEKIT_DOMAIN_TURN}|g" \
/etc/livekit.yaml.template > /tmp/livekit.yaml
exec /livekit-server --config /tmp/livekit.yaml "$@"

View File

@@ -0,0 +1,172 @@
# livekitctl
A CLI tool for bootstrapping self-hosted LiveKit SFU infrastructure for Fluxer voice and video.
## Installation
```bash
curl -fsSL https://fluxer.app/get/livekitctl | sudo bash
```
## Overview
livekitctl automates the installation and configuration of a complete LiveKit media server stack including:
- **LiveKit** - WebRTC SFU for voice and video
- **Caddy** - Reverse proxy with automatic TLS (built with L4 module for TCP/UDP)
- **coturn** - TURN/STUN server for NAT traversal
- **KV store** - Redis-compatible key-value store for LiveKit state
## Prerequisites
- Linux server (Debian/Ubuntu, RHEL/CentOS, or Arch-based)
- Root access
- DNS records configured for your LiveKit and TURN domains pointing to your server's public IP
## Commands
### bootstrap
Install and configure the complete LiveKit stack.
```bash
livekitctl bootstrap \
--livekit-domain livekit.example.com \
--turn-domain turn.example.com \
--email admin@example.com
```
Required flags:
- `--livekit-domain <domain>` - Domain for LiveKit WebSocket/HTTP connections
- `--turn-domain <domain>` - Domain for TURN relay server
- `--email <email>` - ACME email for TLS certificate issuance
Optional flags:
- `--livekit-version <version>` - LiveKit version (default: v1.9.11)
- `--caddy-version <version>` - Caddy version (default: v2.10.2)
- `--caddy-l4-version <version>` - Caddy L4 module version (default: master)
- `--xcaddy-version <version>` - xcaddy build tool version (default: v0.4.5)
- `--install-dir <path>` - Override LiveKit install directory (default: /opt/livekit)
- `--firewall` - Configure detected firewall tool (ufw, firewalld, iptables)
- `--kv-port <port>` - KV store port (default: 6379)
- `--kv-port-auto` - Pick a free KV port from 6379-6382
- `--webhook-url <url>` - Webhook URL (repeatable)
- `--webhook-urls-file <file>` - File with webhook URLs (one per line)
- `--allow-http-webhooks` - Allow http:// webhook URLs
- `--dns-timeout <seconds>` - DNS wait timeout (default: 900)
- `--dns-interval <seconds>` - DNS check interval (default: 10)
- `--print-secrets` - Print generated secrets JSON to stdout
### status
Show systemd service status for all managed services.
```bash
livekitctl status
```
### logs
Show systemd logs for a specific service.
```bash
livekitctl logs --service livekit.service [--lines 200]
```
Flags:
- `--service <unit>` - systemd unit name (required), e.g., `livekit.service`, `caddy.service`
- `--lines <n>` - Number of log lines to show (default: 200)
### restart
Restart one or more services. If no services specified, restarts all managed services.
```bash
livekitctl restart [services...]
```
Examples:
```bash
livekitctl restart # Restart all services
livekitctl restart livekit.service # Restart only LiveKit
livekitctl restart caddy.service livekit-coturn.service
```
Managed services:
- `livekit-kv.service` - KV store
- `livekit-coturn.service` - TURN server
- `livekit.service` - LiveKit SFU
- `caddy.service` - Reverse proxy
### webhook
Manage LiveKit webhook URLs. Changes are written to config and LiveKit is restarted.
```bash
livekitctl webhook list
livekitctl webhook add <url> [--allow-http-webhooks]
livekitctl webhook remove <url>
livekitctl webhook set --url <url> [--url <url>...] [--file <path>] [--allow-http-webhooks]
```
Subcommands:
- `list` - List configured webhook URLs
- `add <url>` - Add a webhook URL
- `remove <url>` - Remove a webhook URL
- `set` - Replace all webhook URLs
## Port configuration
Default port allocations:
| Port | Protocol | Service |
| ----------- | -------- | ------------------------- |
| 7880 | TCP | LiveKit HTTP (internal) |
| 7881 | TCP | LiveKit RTC |
| 50000-60000 | UDP | LiveKit RTC media |
| 3478 | UDP | TURN listen |
| 40000-49999 | UDP | TURN relay |
| 6379 | TCP | KV store (localhost only) |
## State and configuration files
```
/etc/livekit/
livekitctl-state.json # Bootstrap state
secrets.json # Generated API keys and secrets
livekit.yaml # LiveKit server config
caddy.json # Caddy config
coturn.conf # TURN server config
/opt/livekit/
bin/
livekit-server # LiveKit binary
```
## DNS setup
Before running bootstrap, create DNS records pointing to your server's public IP:
```
A livekit.example.com → <your-ipv4>
A turn.example.com → <your-ipv4>
```
If your server has IPv6:
```
AAAA livekit.example.com → <your-ipv6>
AAAA turn.example.com → <your-ipv6>
```
The bootstrap command waits for DNS propagation before requesting TLS certificates.
## Global flags
- `--state <path>` - Path to state file (default: /etc/livekit/livekitctl-state.json)

View File

@@ -0,0 +1,272 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package cmd
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/constants"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/dnswait"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/errors"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/firewall"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/install"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/netutil"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/ops"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/platform"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/secrets"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/state"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/validate"
)
var bootstrapCmd = &cobra.Command{
Use: "bootstrap",
Short: "Install and configure LiveKit, Caddy (l4), coturn, and KV store",
Run: runBootstrap,
}
var (
livekitDomain string
turnDomain string
email string
livekitVersion string
caddyVersion string
caddyL4Version string
xcaddyVersion string
installDir string
enableFirewall bool
kvPort int
kvPortAuto bool
webhookURLs []string
webhookURLsFile string
allowHTTPWebhooks bool
dnsTimeout int
dnsInterval int
printSecrets bool
)
func init() {
rootCmd.AddCommand(bootstrapCmd)
bootstrapCmd.Flags().StringVar(&livekitDomain, "livekit-domain", "", "LiveKit domain (required)")
bootstrapCmd.Flags().StringVar(&turnDomain, "turn-domain", "", "TURN domain (required)")
bootstrapCmd.Flags().StringVar(&email, "email", "", "ACME email (required)")
bootstrapCmd.Flags().StringVar(&livekitVersion, "livekit-version", constants.DefaultLiveKitVersion, "LiveKit version")
bootstrapCmd.Flags().StringVar(&caddyVersion, "caddy-version", constants.DefaultCaddyVersion, "Caddy version")
bootstrapCmd.Flags().StringVar(&caddyL4Version, "caddy-l4-version", constants.DefaultCaddyL4Version, "Caddy L4 version")
bootstrapCmd.Flags().StringVar(&xcaddyVersion, "xcaddy-version", constants.DefaultXcaddyVersion, "xcaddy version")
bootstrapCmd.Flags().StringVar(&installDir, "install-dir", "", "Override LiveKit install dir (default: /opt/livekit)")
bootstrapCmd.Flags().BoolVar(&enableFirewall, "firewall", false, "Configure detected firewall tool")
bootstrapCmd.Flags().IntVar(&kvPort, "kv-port", 0, "KV port (default: 6379)")
bootstrapCmd.Flags().BoolVar(&kvPortAuto, "kv-port-auto", false, "Pick a free KV port from 6379-6382")
bootstrapCmd.Flags().StringArrayVar(&webhookURLs, "webhook-url", nil, "Webhook URL (repeatable)")
bootstrapCmd.Flags().StringVar(&webhookURLsFile, "webhook-urls-file", "", "File with webhook URLs (one per line)")
bootstrapCmd.Flags().BoolVar(&allowHTTPWebhooks, "allow-http-webhooks", false, "Allow http:// webhook URLs")
bootstrapCmd.Flags().IntVar(&dnsTimeout, "dns-timeout", 900, "DNS wait timeout in seconds")
bootstrapCmd.Flags().IntVar(&dnsInterval, "dns-interval", 10, "DNS check interval in seconds")
bootstrapCmd.Flags().BoolVar(&printSecrets, "print-secrets", false, "Print secrets JSON to stdout")
bootstrapCmd.MarkFlagRequired("livekit-domain")
bootstrapCmd.MarkFlagRequired("turn-domain")
bootstrapCmd.MarkFlagRequired("email")
}
func runBootstrap(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
livekitDomainValidated, err := validate.RequireDomain(livekitDomain, "livekit domain")
exitOnError(err)
turnDomainValidated, err := validate.RequireDomain(turnDomain, "turn domain")
exitOnError(err)
acmeEmail, err := validate.RequireEmail(email)
exitOnError(err)
ports := constants.DefaultPorts()
if kvPort > 0 {
ports.KVPort = kvPort
}
livekitVersionValidated, err := validate.NormaliseVersionTag(livekitVersion)
exitOnError(err)
caddyVersionValidated, err := validate.NormaliseVersionTag(caddyVersion)
exitOnError(err)
caddyL4VersionValidated, err := validate.NormaliseVersionTag(caddyL4Version)
exitOnError(err)
xcaddyVersionValidated, err := validate.NormaliseVersionTag(xcaddyVersion)
exitOnError(err)
var webhooks []string
for _, u := range webhookURLs {
validated, err := validate.RequireWebhookURL(u, allowHTTPWebhooks)
exitOnError(err)
webhooks = append(webhooks, validated)
}
if webhookURLsFile != "" {
lines, err := ops.ReadLinesFile(webhookURLsFile)
exitOnError(err)
for _, u := range lines {
validated, err := validate.RequireWebhookURL(u, allowHTTPWebhooks)
exitOnError(err)
webhooks = append(webhooks, validated)
}
}
pm := platform.DetectPackageManager()
if pm == nil {
exitOnError(errors.NewPlatformError("No supported package manager detected."))
}
exitOnError(install.InstallBasePackages(pm))
exitOnError(install.EnsureUsers())
kvBin, err := install.InstallKVBinary(pm)
exitOnError(err)
paths := state.DefaultPaths()
if installDir != "" {
paths.LiveKitInstallDir = installDir
paths.LiveKitBinDir = filepath.Join(installDir, "bin")
}
if kvPortAuto {
for _, cand := range []int{6379, 6380, 6381, 6382} {
output, exitCode := util.RunCaptureNoCheck([]string{"bash", "-lc", fmt.Sprintf("ss -lnt | awk '{print $4}' | grep -q ':%d$'", cand)})
_ = output
if exitCode != 0 {
ports.KVPort = cand
break
}
}
}
fwTool := firewall.DetectFirewallTool()
firewallCfg := state.FirewallConfig{Enabled: enableFirewall, Tool: fwTool.Name}
st := state.NewState(state.NewStateParams{
ACMEEmail: acmeEmail,
Domains: state.Domains{
LiveKit: livekitDomainValidated,
TURN: turnDomainValidated,
},
Ports: ports,
Versions: state.Versions{
LiveKit: livekitVersionValidated,
Caddy: caddyVersionValidated,
CaddyL4: caddyL4VersionValidated,
Xcaddy: xcaddyVersionValidated,
},
KV: state.KVConfig{
BindHost: ports.KVBindHost,
Port: ports.KVPort,
},
Webhooks: webhooks,
Firewall: firewallCfg,
Paths: &paths,
})
exitOnError(os.MkdirAll(st.Paths.ConfigDir, 0755))
sec := secrets.GenerateNewSecrets()
exitOnError(ops.SaveSecrets(st, sec))
pub4 := netutil.DetectPublicIP("4")
if pub4 == "" {
exitOnError(errors.NewPlatformError("Could not detect public IPv4."))
}
var pub6 string
if netutil.HasGlobalIPv6() {
pub6 = netutil.DetectPublicIP("6")
}
priv4 := netutil.PrimaryPrivateIPv4()
util.Log("")
util.Log("DNS records needed before TLS issuance:")
util.Logf("A %s -> %s", livekitDomainValidated, pub4)
util.Logf("A %s -> %s", turnDomainValidated, pub4)
if pub6 != "" {
util.Logf("AAAA %s -> %s", livekitDomainValidated, pub6)
util.Logf("AAAA %s -> %s", turnDomainValidated, pub6)
}
util.Log("")
okDNS := dnswait.WaitForDNS(livekitDomainValidated, turnDomainValidated, pub4, pub6, dnsTimeout, dnsInterval)
if !okDNS {
util.Log("DNS not verified yet. Continuing. ACME may fail until DNS is correct.")
}
_, err = install.InstallLiveKitBinary(livekitVersionValidated, st.Paths.LiveKitInstallDir, "")
exitOnError(err)
exitOnError(install.EnsureCaddyWithL4(
"/tmp/livekitctl-caddy-build",
caddyVersionValidated,
caddyL4VersionValidated,
xcaddyVersionValidated,
st.Paths.CaddyBin,
))
exitOnError(state.SaveState(st))
ops.StopConflictingServices()
exitOnError(ops.ApplyConfigAndRestart(st, kvBin, pub4, priv4))
if st.Firewall.Enabled {
msg, err := ops.ConfigureFirewallFromState(st)
exitOnError(err)
util.Log(msg)
}
util.Log("")
util.Log("Bootstrap completed.")
util.Log("")
util.Log("State:")
util.Logf(" %s", st.Paths.StatePath)
util.Log("Secrets:")
util.Logf(" %s", st.Paths.SecretsPath)
util.Log("")
if printSecrets {
data, err := os.ReadFile(st.Paths.SecretsPath)
if err == nil {
util.Log("Secrets JSON:")
util.Log(strings.TrimSpace(string(data)))
util.Log("")
}
}
util.Log(ops.RunBasicHealthChecks(st))
}

View File

@@ -0,0 +1,56 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package cmd
import (
"github.com/spf13/cobra"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/ops"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
var logsCmd = &cobra.Command{
Use: "logs",
Short: "Show systemd logs",
Run: runLogs,
}
var (
logsService string
logsLines int
)
func init() {
rootCmd.AddCommand(logsCmd)
logsCmd.Flags().StringVar(&logsService, "service", "", "systemd unit, eg livekit.service (required)")
logsCmd.Flags().IntVar(&logsLines, "lines", 200, "Number of log lines")
logsCmd.MarkFlagRequired("service")
}
func runLogs(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
st, err := ops.EnsureStateLoadedOrFail(statePath)
exitOnError(err)
util.Log(ops.OpLogs(st, logsService, logsLines))
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package cmd
import (
"github.com/spf13/cobra"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/ops"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
var restartCmd = &cobra.Command{
Use: "restart [services...]",
Short: "Restart one or more services",
Long: "Restart one or more services. If no services specified, restarts all managed services.",
Run: runRestart,
}
func init() {
rootCmd.AddCommand(restartCmd)
}
func runRestart(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
services := args
if len(services) == 0 {
services = []string{"livekit-kv.service", "livekit-coturn.service", "livekit.service", "caddy.service"}
}
exitOnError(ops.OpRestart(services))
util.Log("Restart requested.")
}

View File

@@ -0,0 +1,50 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
var statePath string
var rootCmd = &cobra.Command{
Use: "livekitctl",
Short: "LiveKit bootstrap and operations CLI",
Long: "Self-hosted LiveKit bootstrap and operations CLI for installing and managing LiveKit servers.",
}
func Execute() error {
return rootCmd.Execute()
}
func init() {
rootCmd.PersistentFlags().StringVar(&statePath, "state", "", "Path to state file (default: /etc/livekit/livekitctl-state.json)")
}
func exitOnError(err error) {
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,46 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package cmd
import (
"github.com/spf13/cobra"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/ops"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
var statusCmd = &cobra.Command{
Use: "status",
Short: "Show systemd status for managed services",
Run: runStatus,
}
func init() {
rootCmd.AddCommand(statusCmd)
}
func runStatus(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
st, err := ops.EnsureStateLoadedOrFail(statePath)
exitOnError(err)
util.Log(ops.OpStatus(st))
}

View File

@@ -0,0 +1,172 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/install"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/netutil"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/ops"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/platform"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/state"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
var webhookCmd = &cobra.Command{
Use: "webhook",
Short: "Manage LiveKit webhooks (writes config and restarts LiveKit)",
}
var webhookListCmd = &cobra.Command{
Use: "list",
Short: "List webhook URLs",
Run: runWebhookList,
}
var webhookAddCmd = &cobra.Command{
Use: "add <url>",
Short: "Add a webhook URL",
Args: cobra.ExactArgs(1),
Run: runWebhookAdd,
}
var webhookRemoveCmd = &cobra.Command{
Use: "remove <url>",
Short: "Remove a webhook URL",
Args: cobra.ExactArgs(1),
Run: runWebhookRemove,
}
var webhookSetCmd = &cobra.Command{
Use: "set",
Short: "Replace webhook URLs",
Run: runWebhookSet,
}
var (
webhookAllowHTTP bool
webhookSetURLs []string
webhookSetFile string
)
func init() {
rootCmd.AddCommand(webhookCmd)
webhookCmd.AddCommand(webhookListCmd)
webhookAddCmd.Flags().BoolVar(&webhookAllowHTTP, "allow-http-webhooks", false, "Allow http:// webhook URLs")
webhookCmd.AddCommand(webhookAddCmd)
webhookCmd.AddCommand(webhookRemoveCmd)
webhookSetCmd.Flags().StringArrayVar(&webhookSetURLs, "url", nil, "Webhook URL (repeatable)")
webhookSetCmd.Flags().StringVar(&webhookSetFile, "file", "", "File with webhook URLs (one per line)")
webhookSetCmd.Flags().BoolVar(&webhookAllowHTTP, "allow-http-webhooks", false, "Allow http:// webhook URLs")
webhookCmd.AddCommand(webhookSetCmd)
}
func runWebhookList(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
st, err := ops.EnsureStateLoadedOrFail(statePath)
exitOnError(err)
for _, u := range ops.WebhookList(st) {
fmt.Println(u)
}
}
func runWebhookAdd(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
st, err := ops.EnsureStateLoadedOrFail(statePath)
exitOnError(err)
changed, err := ops.WebhookAdd(st, args[0], webhookAllowHTTP)
exitOnError(err)
if changed {
exitOnError(applyAndRestart(st))
util.Log("Webhook added and LiveKit restarted.")
} else {
util.Log("Webhook already present.")
}
}
func runWebhookRemove(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
st, err := ops.EnsureStateLoadedOrFail(statePath)
exitOnError(err)
changed, err := ops.WebhookRemove(st, args[0])
exitOnError(err)
if changed {
exitOnError(applyAndRestart(st))
util.Log("Webhook removed and LiveKit restarted.")
} else {
util.Log("Webhook not found.")
}
}
func runWebhookSet(cmd *cobra.Command, args []string) {
exitOnError(ops.EnsureLinuxRoot())
st, err := ops.EnsureStateLoadedOrFail(statePath)
exitOnError(err)
var urls []string
urls = append(urls, webhookSetURLs...)
if webhookSetFile != "" {
lines, err := ops.ReadLinesFile(webhookSetFile)
exitOnError(err)
urls = append(urls, lines...)
}
exitOnError(ops.WebhookSet(st, urls, webhookAllowHTTP))
exitOnError(applyAndRestart(st))
util.Log("Webhooks updated and LiveKit restarted.")
}
func applyAndRestart(st *state.BootstrapState) error {
pm := platform.DetectPackageManager()
if pm == nil {
return fmt.Errorf("no supported package manager detected")
}
kvBin, err := install.InstallKVBinary(pm)
if err != nil {
return err
}
pub4 := netutil.DetectPublicIP("4")
if pub4 == "" {
util.Log("Warning: Could not detect public IPv4, using 0.0.0.0")
pub4 = "0.0.0.0"
}
priv4 := netutil.PrimaryPrivateIPv4()
return ops.ApplyConfigAndRestart(st, kvBin, pub4, priv4)
}

View File

@@ -0,0 +1,10 @@
module github.com/fluxerapp/fluxer/fluxer_devops/livekitctl
go 1.24
require github.com/spf13/cobra v1.8.1
require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
)

View File

@@ -0,0 +1,10 @@
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -0,0 +1,433 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package configgen
import (
"encoding/json"
"fmt"
"path/filepath"
"strings"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/secrets"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/state"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
func GenerateLiveKitYAML(st *state.BootstrapState, sec *secrets.Secrets, redisAddr string) string {
var webhookBlock string
if len(st.Webhooks) > 0 {
var urls []string
for _, u := range st.Webhooks {
urls = append(urls, fmt.Sprintf(" - '%s'", u))
}
webhookBlock = fmt.Sprintf(`webhook:
api_key: '%s'
urls:
%s
`, sec.LiveKitAPIKey, strings.Join(urls, "\n"))
}
return fmt.Sprintf(`port: %d
bind_addresses:
- "127.0.0.1"
log_level: info
rtc:
tcp_port: %d
port_range_start: %d
port_range_end: %d
use_external_ip: true
turn_servers:
- host: "%s"
port: 443
protocol: tls
username: "%s"
credential: "%s"
- host: "%s"
port: %d
protocol: udp
username: "%s"
credential: "%s"
redis:
address: "%s"
username: ""
password: "%s"
db: 0
use_tls: false
keys:
"%s": "%s"
%s`,
st.Ports.LiveKitHTTPLocal,
st.Ports.LiveKitRTCTCP,
st.Ports.LiveKitRTCUDPStart,
st.Ports.LiveKitRTCUDPEnd,
st.Domains.TURN,
sec.TURNUsername,
sec.TURNPassword,
st.Domains.TURN,
st.Ports.TURNListenPort,
sec.TURNUsername,
sec.TURNPassword,
redisAddr,
sec.KVPassword,
sec.LiveKitAPIKey,
sec.LiveKitAPISecret,
strings.TrimSpace(webhookBlock),
)
}
func GenerateKVConf(sec *secrets.Secrets, bindHost string, port int, dataDir string) string {
return fmt.Sprintf(`bind %s
protected-mode yes
port %d
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
supervised no
dir %s
dbfilename dump.rdb
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
requirepass %s
`, bindHost, port, dataDir, sec.KVPassword)
}
func GenerateCoTURNConf(st *state.BootstrapState, sec *secrets.Secrets, publicIPv4, privateIPv4 string) string {
external := publicIPv4
if privateIPv4 != "" && privateIPv4 != publicIPv4 {
external = fmt.Sprintf("%s/%s", publicIPv4, privateIPv4)
}
return fmt.Sprintf(`listening-port=%d
fingerprint
lt-cred-mech
user=%s:%s
realm=%s
server-name=%s
no-multicast-peers
no-loopback-peers
stale-nonce
no-tls
no-dtls
min-port=%d
max-port=%d
external-ip=%s
`, st.Ports.TURNListenPort,
sec.TURNUsername, sec.TURNPassword,
st.Domains.TURN,
st.Domains.TURN,
st.Ports.TURNRelayUDPStart,
st.Ports.TURNRelayUDPEnd,
external)
}
func GenerateLiveKitUnit(st *state.BootstrapState) string {
return fmt.Sprintf(`[Unit]
Description=LiveKit Server
After=network-online.target
Wants=network-online.target
[Service]
User=livekit
Group=livekit
ExecStart=%s/livekit-server --config %s/livekit.yaml
Restart=on-failure
RestartSec=2
LimitNOFILE=1048576
WorkingDirectory=%s
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=%s %s %s /var/lib/livekit
LockPersonality=true
MemoryDenyWriteExecute=true
RestrictSUIDSGID=true
RestrictRealtime=true
[Install]
WantedBy=multi-user.target
`, st.Paths.LiveKitBinDir, st.Paths.ConfigDir, st.Paths.LiveKitInstallDir,
st.Paths.LiveKitLogDir, st.Paths.LiveKitInstallDir, st.Paths.ConfigDir)
}
func GenerateCaddyJSON(st *state.BootstrapState) string {
caddyConfig := map[string]interface{}{
"storage": map[string]interface{}{
"module": "file_system",
"root": st.Paths.CaddyStorageDir,
},
"logging": map[string]interface{}{
"logs": map[string]interface{}{
"default": map[string]interface{}{
"level": "INFO",
},
},
},
"apps": map[string]interface{}{
"tls": map[string]interface{}{
"automation": map[string]interface{}{
"policies": []interface{}{
map[string]interface{}{
"subjects": []string{st.Domains.LiveKit, st.Domains.TURN},
"issuers": []interface{}{
map[string]interface{}{
"module": "acme",
"email": st.ACMEEmail,
},
},
},
},
},
"certificates": map[string]interface{}{
"automate": []string{st.Domains.LiveKit, st.Domains.TURN},
},
},
"layer4": map[string]interface{}{
"servers": map[string]interface{}{
"main443": map[string]interface{}{
"listen": []string{":443"},
"routes": []interface{}{
map[string]interface{}{
"match": []interface{}{
map[string]interface{}{
"tls": map[string]interface{}{
"sni": []string{st.Domains.TURN},
},
},
},
"handle": []interface{}{
map[string]interface{}{
"handler": "tls",
"connection_policies": []interface{}{
map[string]interface{}{
"alpn": []string{"acme-tls/1", "h2", "http/1.1"},
},
},
},
map[string]interface{}{
"handler": "proxy",
"upstreams": []interface{}{
map[string]interface{}{
"dial": []string{fmt.Sprintf("127.0.0.1:%d", st.Ports.TURNListenPort)},
},
},
},
},
},
map[string]interface{}{
"match": []interface{}{
map[string]interface{}{
"tls": map[string]interface{}{
"sni": []string{st.Domains.LiveKit},
},
},
},
"handle": []interface{}{
map[string]interface{}{
"handler": "tls",
"connection_policies": []interface{}{
map[string]interface{}{
"alpn": []string{"acme-tls/1", "http/1.1"},
},
},
},
map[string]interface{}{
"handler": "proxy",
"upstreams": []interface{}{
map[string]interface{}{
"dial": []string{fmt.Sprintf("127.0.0.1:%d", st.Ports.LiveKitHTTPLocal)},
},
},
},
},
},
},
},
},
},
},
}
data, err := json.MarshalIndent(caddyConfig, "", " ")
if err != nil {
panic("failed to marshal caddy config: " + err.Error())
}
return string(data) + "\n"
}
func GenerateCaddyUnit(st *state.BootstrapState) string {
return fmt.Sprintf(`[Unit]
Description=Caddy (custom build with caddy-l4) for LiveKit + TURN/TLS
After=network-online.target
Wants=network-online.target
[Service]
User=caddy
Group=caddy
ExecStart=%s run --config %s/caddy.json
ExecReload=%s reload --config %s/caddy.json
Restart=on-failure
LimitNOFILE=1048576
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
NoNewPrivileges=true
WorkingDirectory=%s
[Install]
WantedBy=multi-user.target
`, st.Paths.CaddyBin, st.Paths.ConfigDir, st.Paths.CaddyBin, st.Paths.ConfigDir, st.Paths.CaddyStorageDir)
}
func GenerateCoTURNUnit(st *state.BootstrapState) string {
return fmt.Sprintf(`[Unit]
Description=CoTURN for LiveKit
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/usr/bin/turnserver -c %s/coturn.conf -n
Restart=on-failure
RestartSec=2
LimitNOFILE=1048576
NoNewPrivileges=true
PrivateTmp=true
[Install]
WantedBy=multi-user.target
`, st.Paths.ConfigDir)
}
func GenerateKVUnit(st *state.BootstrapState, kvBin string) string {
return fmt.Sprintf(`[Unit]
Description=Redis-compatible KV store for LiveKit (managed by livekitctl)
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=%s %s/kv.conf
Restart=on-failure
RestartSec=2
LimitNOFILE=1048576
NoNewPrivileges=true
PrivateTmp=true
[Install]
WantedBy=multi-user.target
`, kvBin, st.Paths.ConfigDir)
}
type WriteAllConfigsParams struct {
State *state.BootstrapState
Secrets *secrets.Secrets
PublicIPv4 string
PrivateIPv4 string
KVBin string
}
func WriteAllConfigs(params WriteAllConfigsParams) error {
st := params.State
sec := params.Secrets
cfgDir := st.Paths.ConfigDir
if err := util.EnsureDir(cfgDir, 0755, -1, -1); err != nil {
return err
}
ugLiveKit := util.LookupUserGroup("livekit")
ugCaddy := util.LookupUserGroup("caddy")
lkUID, lkGID := -1, -1
if ugLiveKit != nil {
lkUID, lkGID = ugLiveKit.UID, ugLiveKit.GID
}
caddyUID, caddyGID := -1, -1
if ugCaddy != nil {
caddyUID, caddyGID = ugCaddy.UID, ugCaddy.GID
}
if err := util.EnsureDir(st.Paths.LiveKitLogDir, 0755, lkUID, lkGID); err != nil {
return err
}
if err := util.EnsureDir(st.Paths.CaddyStorageDir, 0700, caddyUID, caddyGID); err != nil {
return err
}
if err := util.EnsureDir(st.Paths.CaddyLogDir, 0755, caddyUID, caddyGID); err != nil {
return err
}
if err := util.EnsureDir(st.Paths.KVDataDir, 0700, -1, -1); err != nil {
return err
}
redisAddr := fmt.Sprintf("%s:%d", st.KV.BindHost, st.KV.Port)
livekitYAML := GenerateLiveKitYAML(st, sec, redisAddr)
if err := util.AtomicWriteText(filepath.Join(cfgDir, "livekit.yaml"), livekitYAML, 0640, lkUID, lkGID); err != nil {
return err
}
kvConf := GenerateKVConf(sec, st.KV.BindHost, st.KV.Port, st.Paths.KVDataDir)
if err := util.AtomicWriteText(filepath.Join(cfgDir, "kv.conf"), kvConf, 0600, -1, -1); err != nil {
return err
}
coturnConf := GenerateCoTURNConf(st, sec, params.PublicIPv4, params.PrivateIPv4)
if err := util.AtomicWriteText(filepath.Join(cfgDir, "coturn.conf"), coturnConf, 0600, -1, -1); err != nil {
return err
}
caddyJSON := GenerateCaddyJSON(st)
if err := util.AtomicWriteText(filepath.Join(cfgDir, "caddy.json"), caddyJSON, 0644, -1, -1); err != nil {
return err
}
if util.FileExists(st.Paths.UnitDir) {
if err := util.AtomicWriteText(filepath.Join(st.Paths.UnitDir, "livekit.service"), GenerateLiveKitUnit(st), 0644, -1, -1); err != nil {
return err
}
if err := util.AtomicWriteText(filepath.Join(st.Paths.UnitDir, "caddy.service"), GenerateCaddyUnit(st), 0644, -1, -1); err != nil {
return err
}
if err := util.AtomicWriteText(filepath.Join(st.Paths.UnitDir, "livekit-coturn.service"), GenerateCoTURNUnit(st), 0644, -1, -1); err != nil {
return err
}
if err := util.AtomicWriteText(filepath.Join(st.Paths.UnitDir, "livekit-kv.service"), GenerateKVUnit(st, params.KVBin), 0644, -1, -1); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,53 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package constants
const (
DefaultLiveKitVersion = "v1.9.11"
DefaultXcaddyVersion = "v0.4.5"
DefaultCaddyVersion = "v2.10.2"
DefaultCaddyL4Version = "master"
)
type Ports struct {
LiveKitHTTPLocal int `json:"livekit_http_local"`
LiveKitRTCTCP int `json:"livekit_rtc_tcp"`
LiveKitRTCUDPStart int `json:"livekit_rtc_udp_start"`
LiveKitRTCUDPEnd int `json:"livekit_rtc_udp_end"`
TURNListenPort int `json:"turn_listen_port"`
TURNRelayUDPStart int `json:"turn_relay_udp_start"`
TURNRelayUDPEnd int `json:"turn_relay_udp_end"`
KVBindHost string `json:"kv_bind_host"`
KVPort int `json:"kv_port"`
}
func DefaultPorts() Ports {
return Ports{
LiveKitHTTPLocal: 7880,
LiveKitRTCTCP: 7881,
LiveKitRTCUDPStart: 50000,
LiveKitRTCUDPEnd: 60000,
TURNListenPort: 3478,
TURNRelayUDPStart: 40000,
TURNRelayUDPEnd: 49999,
KVBindHost: "127.0.0.1",
KVPort: 6379,
}
}

View File

@@ -0,0 +1,96 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package dnswait
import (
"net"
"time"
)
func ResolveA(host string) []string {
var out []string
addrs, err := net.LookupIP(host)
if err != nil {
return out
}
for _, addr := range addrs {
if ip4 := addr.To4(); ip4 != nil {
out = append(out, ip4.String())
}
}
return out
}
func ResolveAAAA(host string) []string {
var out []string
addrs, err := net.LookupIP(host)
if err != nil {
return out
}
for _, addr := range addrs {
if addr.To4() == nil {
out = append(out, addr.String())
}
}
return out
}
func contains(slice []string, item string) bool {
for _, s := range slice {
if s == item {
return true
}
}
return false
}
func WaitForDNS(livekitDomain, turnDomain, publicIPv4, publicIPv6 string, timeoutS, intervalS int) bool {
if timeoutS < 1 {
timeoutS = 1
}
if intervalS < 1 {
intervalS = 1
}
deadline := time.Now().Add(time.Duration(timeoutS) * time.Second)
for {
a1 := ResolveA(livekitDomain)
a2 := ResolveA(turnDomain)
ok4 := contains(a1, publicIPv4) && contains(a2, publicIPv4)
ok6 := true
if publicIPv6 != "" {
aaaa1 := ResolveAAAA(livekitDomain)
aaaa2 := ResolveAAAA(turnDomain)
ok6 = contains(aaaa1, publicIPv6) && contains(aaaa2, publicIPv6)
}
if ok4 && ok6 {
return true
}
if time.Now().After(deadline) {
return false
}
time.Sleep(time.Duration(intervalS) * time.Second)
}
}

View File

@@ -0,0 +1,150 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package download
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"time"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/errors"
)
type DownloadResult struct {
Path string
SHA256Verified bool
}
func httpGet(url string, timeoutS int) ([]byte, error) {
client := &http.Client{Timeout: time.Duration(timeoutS) * time.Second}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", "livekitctl/0.1")
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, fmt.Errorf("HTTP %d", resp.StatusCode)
}
return io.ReadAll(resp.Body)
}
func httpHeadOK(url string, timeoutS int) bool {
client := &http.Client{Timeout: time.Duration(timeoutS) * time.Second}
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return false
}
req.Header.Set("User-Agent", "livekitctl/0.1")
resp, err := client.Do(req)
if err != nil {
return false
}
resp.Body.Close()
return resp.StatusCode >= 200 && resp.StatusCode < 400
}
func parseSHA256File(text string) string {
t := strings.TrimSpace(text)
if t == "" {
return ""
}
parts := strings.Fields(t)
if len(parts) == 0 {
return ""
}
h := strings.ToLower(strings.TrimSpace(parts[0]))
if len(h) != 64 {
return ""
}
for _, c := range h {
if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) {
return ""
}
}
return h
}
func DownloadWithOptionalSHA256(url, dest string, timeoutS, retries int) (*DownloadResult, error) {
if timeoutS <= 0 {
timeoutS = 30
}
if retries < 0 {
retries = 0
}
var lastErr error
for i := 0; i <= retries; i++ {
data, err := httpGet(url, timeoutS)
if err != nil {
lastErr = err
continue
}
dir := filepath.Dir(dest)
if err := os.MkdirAll(dir, 0755); err != nil {
lastErr = err
continue
}
if err := os.WriteFile(dest, data, 0644); err != nil {
lastErr = err
continue
}
shaURL := url + ".sha256"
verified := false
if httpHeadOK(shaURL, timeoutS) {
shaText, err := httpGet(shaURL, timeoutS)
if err == nil {
expected := parseSHA256File(string(shaText))
if expected != "" {
h := sha256.Sum256(data)
got := hex.EncodeToString(h[:])
if got != expected {
lastErr = errors.NewCmdError(fmt.Sprintf("SHA256 mismatch for %s", url), nil)
os.Remove(dest)
continue
}
verified = true
}
}
}
return &DownloadResult{Path: dest, SHA256Verified: verified}, nil
}
return nil, errors.NewCmdError(fmt.Sprintf("Download failed: %s (%v)", url, lastErr), nil)
}

View File

@@ -0,0 +1,66 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package errors
import "fmt"
type LiveKitCtlError struct {
Message string
Err error
}
func (e *LiveKitCtlError) Error() string {
if e.Err != nil {
return fmt.Sprintf("%s: %v", e.Message, e.Err)
}
return e.Message
}
func (e *LiveKitCtlError) Unwrap() error {
return e.Err
}
type CmdError struct {
LiveKitCtlError
}
func NewCmdError(msg string, err error) *CmdError {
return &CmdError{LiveKitCtlError{Message: msg, Err: err}}
}
type ValidationError struct {
LiveKitCtlError
}
func NewValidationError(msg string) *ValidationError {
return &ValidationError{LiveKitCtlError{Message: msg}}
}
type PlatformError struct {
LiveKitCtlError
}
func NewPlatformError(msg string) *PlatformError {
return &PlatformError{LiveKitCtlError{Message: msg}}
}
func NewPlatformErrorf(format string, args ...interface{}) *PlatformError {
return &PlatformError{LiveKitCtlError{Message: fmt.Sprintf(format, args...)}}
}

View File

@@ -0,0 +1,95 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package firewall
import (
"fmt"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/constants"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
type FirewallTool struct {
Name string
}
func DetectFirewallTool() FirewallTool {
if util.Which("ufw") != "" {
return FirewallTool{Name: "ufw"}
}
if util.Which("firewall-cmd") != "" {
return FirewallTool{Name: "firewalld"}
}
return FirewallTool{Name: "none"}
}
func ConfigureFirewall(tool FirewallTool, ports constants.Ports, enable bool) string {
if tool.Name == "none" {
return "Firewall tool not detected. Skipping."
}
if tool.Name == "ufw" {
util.Run([]string{"ufw", "allow", "22/tcp"}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"ufw", "allow", "80/tcp"}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"ufw", "allow", "443/tcp"}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"ufw", "allow", fmt.Sprintf("%d/tcp", ports.LiveKitRTCTCP)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"ufw", "allow", fmt.Sprintf("%d:%d/udp", ports.LiveKitRTCUDPStart, ports.LiveKitRTCUDPEnd)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"ufw", "allow", fmt.Sprintf("%d/udp", ports.TURNListenPort)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"ufw", "allow", fmt.Sprintf("%d/tcp", ports.TURNListenPort)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"ufw", "allow", fmt.Sprintf("%d:%d/udp", ports.TURNRelayUDPStart, ports.TURNRelayUDPEnd)}, util.RunOptions{Check: false, Capture: true})
if enable {
util.Run([]string{"ufw", "--force", "enable"}, util.RunOptions{Check: false, Capture: true})
}
result, _ := util.Run([]string{"ufw", "status", "verbose"}, util.RunOptions{Check: false, Capture: true})
if result != nil {
return result.Output
}
return ""
}
if tool.Name == "firewalld" {
util.Run([]string{"firewall-cmd", "--permanent", "--add-service=ssh"}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"firewall-cmd", "--permanent", "--add-service=http"}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"firewall-cmd", "--permanent", "--add-service=https"}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"firewall-cmd", "--permanent", "--add-port", fmt.Sprintf("%d/tcp", ports.LiveKitRTCTCP)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"firewall-cmd", "--permanent", "--add-port", fmt.Sprintf("%d-%d/udp", ports.LiveKitRTCUDPStart, ports.LiveKitRTCUDPEnd)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"firewall-cmd", "--permanent", "--add-port", fmt.Sprintf("%d/udp", ports.TURNListenPort)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"firewall-cmd", "--permanent", "--add-port", fmt.Sprintf("%d/tcp", ports.TURNListenPort)}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"firewall-cmd", "--permanent", "--add-port", fmt.Sprintf("%d-%d/udp", ports.TURNRelayUDPStart, ports.TURNRelayUDPEnd)}, util.RunOptions{Check: false, Capture: true})
if enable {
util.Run([]string{"firewall-cmd", "--reload"}, util.RunOptions{Check: false, Capture: true})
}
result, _ := util.Run([]string{"firewall-cmd", "--list-all"}, util.RunOptions{Check: false, Capture: true})
if result != nil {
return result.Output
}
return ""
}
return "Unsupported firewall tool."
}

View File

@@ -0,0 +1,404 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package install
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/constants"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/download"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/errors"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/platform"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
func DetectArchLinuxRelease() (string, error) {
m := strings.ToLower(runtime.GOARCH)
switch m {
case "amd64":
return "amd64", nil
case "arm64":
return "arm64", nil
case "arm":
return "armv7", nil
}
return "", errors.NewPlatformErrorf("Unsupported architecture: %s", runtime.GOARCH)
}
func LiveKitReleaseURL(tag, arch string) string {
v := strings.TrimPrefix(tag, "v")
return fmt.Sprintf("https://github.com/livekit/livekit/releases/download/v%s/livekit_%s_linux_%s.tar.gz", v, v, arch)
}
func EnsureUsers() error {
if util.Which("useradd") == "" {
return nil
}
if err := ensureSystemUser("livekit", "/var/lib/livekit"); err != nil {
return err
}
return ensureSystemUser("caddy", "/var/lib/caddy")
}
func ensureSystemUser(name, home string) error {
output, exitCode := util.RunCaptureNoCheck([]string{"id", "-u", name})
if exitCode == 0 && strings.TrimSpace(output) != "" {
return nil
}
return util.RunSimple([]string{
"useradd",
"--system",
"--home", home,
"--shell", "/usr/sbin/nologin",
name,
})
}
func InstallBasePackages(pm *platform.PackageManager) error {
var pkgs []string
switch pm.Kind {
case "apt":
pkgs = []string{
"ca-certificates",
"curl",
"tar",
"xz-utils",
"dnsutils",
"iproute2",
"libcap2-bin",
"coturn",
"git",
"build-essential",
"golang-go",
}
case "dnf", "yum":
pkgs = []string{
"ca-certificates",
"curl",
"tar",
"xz",
"bind-utils",
"iproute",
"libcap",
"coturn",
"git",
"gcc",
"gcc-c++",
"make",
"golang",
}
case "pacman":
pkgs = []string{
"ca-certificates",
"curl",
"tar",
"xz",
"bind",
"iproute2",
"libcap",
"coturn",
"git",
"base-devel",
"go",
}
case "zypper":
pkgs = []string{
"ca-certificates",
"curl",
"tar",
"xz",
"bind-utils",
"iproute2",
"libcap-progs",
"coturn",
"git",
"gcc",
"gcc-c++",
"make",
"go",
}
case "apk":
pkgs = []string{
"ca-certificates",
"curl",
"tar",
"xz",
"bind-tools",
"iproute2",
"libcap",
"coturn",
"git",
"build-base",
"go",
}
}
return pm.Install(pkgs)
}
func InstallKVBinary(pm *platform.PackageManager) (string, error) {
if bin := util.Which("valkey-server"); bin != "" {
util.Logf("Using existing valkey-server: %s", bin)
return bin, nil
}
if bin := util.Which("redis-server"); bin != "" {
util.Logf("Using existing redis-server: %s", bin)
return bin, nil
}
util.Log("Installing KV store...")
switch pm.Kind {
case "apt":
if err := pm.Install([]string{"valkey-server"}); err != nil {
util.Log("valkey-server not available, trying redis-server...")
if err2 := pm.Install([]string{"redis-server"}); err2 != nil {
return "", err
}
}
case "dnf", "yum":
if err := pm.Install([]string{"valkey"}); err != nil {
util.Log("valkey not available, trying redis...")
if err2 := pm.Install([]string{"redis"}); err2 != nil {
return "", err
}
}
case "pacman":
if err := pm.Install([]string{"valkey"}); err != nil {
util.Log("valkey not available, trying redis...")
if err2 := pm.Install([]string{"redis"}); err2 != nil {
return "", err
}
}
case "zypper":
if err := pm.Install([]string{"valkey"}); err != nil {
util.Log("valkey not available, trying redis...")
if err2 := pm.Install([]string{"redis"}); err2 != nil {
return "", err
}
}
case "apk":
if err := pm.Install([]string{"valkey"}); err != nil {
util.Log("valkey not available, trying redis...")
if err2 := pm.Install([]string{"redis"}); err2 != nil {
return "", err
}
}
default:
return "", errors.NewPlatformError("No supported package manager for installing KV store.")
}
if bin := util.Which("valkey-server"); bin != "" {
util.Logf("Installed valkey-server: %s", bin)
return bin, nil
}
if bin := util.Which("redis-server"); bin != "" {
util.Logf("Installed redis-server: %s", bin)
return bin, nil
}
return "", errors.NewPlatformError("Could not install redis-compatible server.")
}
func InstallLiveKitBinary(tag, installDir, arch string) (string, error) {
if arch == "" {
var err error
arch, err = DetectArchLinuxRelease()
if err != nil {
return "", err
}
}
url := LiveKitReleaseURL(tag, arch)
binDir := filepath.Join(installDir, "bin")
if err := util.EnsureDir(binDir, 0755, -1, -1); err != nil {
return "", err
}
tmpFile := filepath.Join(binDir, "livekit.tar.gz")
util.Logf("Downloading LiveKit from %s", url)
if _, err := download.DownloadWithOptionalSHA256(url, tmpFile, 30, 2); err != nil {
return "", err
}
if err := extractTarGz(tmpFile, binDir); err != nil {
return "", err
}
var serverPath string
filepath.Walk(binDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
util.Logf("Warning: error walking %s: %v", path, err)
return nil
}
if info.IsDir() {
return nil
}
if info.Name() == "livekit-server" {
serverPath = path
return filepath.SkipAll
}
if strings.Contains(info.Name(), "livekit") && strings.Contains(info.Name(), "server") {
serverPath = path
}
return nil
})
if serverPath == "" {
return "", errors.NewCmdError("Could not find livekit-server after extracting tarball.", nil)
}
target := filepath.Join(binDir, "livekit-server")
if serverPath != target {
if err := util.CopyFile(serverPath, target); err != nil {
if err2 := util.RunSimple([]string{"cp", "-f", serverPath, target}); err2 != nil {
return "", err
}
}
}
os.Chmod(target, 0755)
return target, nil
}
func extractTarGz(tarGzPath, destDir string) error {
f, err := os.Open(tarGzPath)
if err != nil {
return err
}
defer f.Close()
gzr, err := gzip.NewReader(f)
if err != nil {
return err
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
target := filepath.Join(destDir, header.Name)
switch header.Typeflag {
case tar.TypeDir:
if err := os.MkdirAll(target, 0755); err != nil {
return err
}
case tar.TypeReg:
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
return err
}
outFile, err := os.Create(target)
if err != nil {
return err
}
if _, err := io.Copy(outFile, tr); err != nil {
outFile.Close()
return err
}
outFile.Close()
os.Chmod(target, os.FileMode(header.Mode))
}
}
return nil
}
func EnsureCaddyWithL4(stagingDir, caddyVersion, caddyL4Version, xcaddyVersion, outBin string) error {
if util.FileExists(outBin) {
output, exitCode := util.RunCaptureNoCheck([]string{outBin, "list-modules"})
if exitCode == 0 && strings.Contains(output, "layer4") {
return nil
}
}
if util.Which("go") == "" {
return errors.NewPlatformError("Go is required to build Caddy with caddy-l4.")
}
if util.Which("git") == "" {
return errors.NewPlatformError("git is required to build Caddy with caddy-l4.")
}
env := []string{"GOBIN=/usr/local/bin"}
_, err := util.Run([]string{"bash", "-lc", fmt.Sprintf("go install github.com/caddyserver/xcaddy/cmd/xcaddy@%s", xcaddyVersion)},
util.RunOptions{Check: true, Capture: false, Env: env})
if err != nil {
return err
}
xcaddy := "/usr/local/bin/xcaddy"
if !util.FileExists(xcaddy) {
return errors.NewCmdError("xcaddy install failed.", nil)
}
if err := os.MkdirAll(stagingDir, 0755); err != nil {
return err
}
cmd := []string{
xcaddy,
"build",
caddyVersion,
"--with",
fmt.Sprintf("github.com/mholt/caddy-l4@%s", caddyL4Version),
}
_, err = util.Run(cmd, util.RunOptions{Check: true, Capture: false, Cwd: stagingDir})
if err != nil {
return err
}
built := filepath.Join(stagingDir, "caddy")
if !util.FileExists(built) {
return errors.NewCmdError("xcaddy did not produce a caddy binary.", nil)
}
if err := os.MkdirAll(filepath.Dir(outBin), 0755); err != nil {
return err
}
if err := util.CopyFile(built, outBin); err != nil {
return err
}
os.Chmod(outBin, 0755)
if util.Which("setcap") != "" {
util.Run([]string{"setcap", "cap_net_bind_service=+ep", outBin}, util.RunOptions{Check: false, Capture: true})
}
return nil
}
func DefaultVersions() (string, string, string, string) {
return constants.DefaultLiveKitVersion, constants.DefaultCaddyVersion, constants.DefaultCaddyL4Version, constants.DefaultXcaddyVersion
}

View File

@@ -0,0 +1,122 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package netutil
import (
"io"
"net"
"net/http"
"strings"
"time"
)
func DetectPublicIP(family string) string {
var urls []string
if family == "4" {
urls = []string{"https://api.ipify.org", "https://ipv4.icanhazip.com"}
} else if family == "6" {
urls = []string{"https://api64.ipify.org", "https://ipv6.icanhazip.com"}
}
client := &http.Client{Timeout: 10 * time.Second}
for _, u := range urls {
req, err := http.NewRequest("GET", u, nil)
if err != nil {
continue
}
req.Header.Set("User-Agent", "livekitctl/0.1")
resp, err := client.Do(req)
if err != nil {
continue
}
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
continue
}
ip := strings.TrimSpace(strings.Split(string(body), "\n")[0])
if family == "4" {
if parsed := net.ParseIP(ip); parsed != nil && parsed.To4() != nil {
return ip
}
} else {
if parsed := net.ParseIP(ip); parsed != nil && parsed.To4() == nil {
return ip
}
}
}
return ""
}
func HasGlobalIPv6() bool {
addrs, err := net.InterfaceAddrs()
if err != nil {
return false
}
for _, addr := range addrs {
ipNet, ok := addr.(*net.IPNet)
if !ok {
continue
}
ip := ipNet.IP
if ip.To4() != nil {
continue
}
if ip.IsGlobalUnicast() && !ip.IsPrivate() {
return true
}
}
return false
}
func PrimaryPrivateIPv4() string {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
return ""
}
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
ip := localAddr.IP.To4()
if ip == nil {
return ""
}
return ip.String()
}
func IsPrivateIPv4(ipStr string) bool {
ip := net.ParseIP(ipStr)
if ip == nil {
return false
}
ip4 := ip.To4()
if ip4 == nil {
return false
}
return ip.IsPrivate()
}

View File

@@ -0,0 +1,307 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package ops
import (
"fmt"
"os"
"sort"
"strings"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/configgen"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/errors"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/firewall"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/netutil"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/platform"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/secrets"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/state"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/validate"
)
func secretsPath(st *state.BootstrapState) string {
return st.Paths.SecretsPath
}
func LoadSecrets(st *state.BootstrapState) (*secrets.Secrets, error) {
var sec secrets.Secrets
if err := util.ReadJSON(secretsPath(st), &sec); err != nil {
return nil, err
}
if sec.LiveKitAPIKey == "" {
return nil, errors.NewPlatformError("Secrets file not found. Was bootstrap completed?")
}
return &sec, nil
}
func SaveSecrets(st *state.BootstrapState, sec *secrets.Secrets) error {
return util.WriteJSON(secretsPath(st), sec, 0600, -1, -1)
}
func StatePathDefault() string {
return state.DefaultPaths().StatePath
}
func EnsureLinuxRoot() error {
if !platform.IsLinux() {
return errors.NewPlatformError("This operation is only supported on Linux hosts.")
}
return platform.RequireRoot()
}
func ApplyConfigAndRestart(st *state.BootstrapState, kvBin, publicIPv4, privateIPv4 string) error {
sec, err := LoadSecrets(st)
if err != nil {
return err
}
if err := configgen.WriteAllConfigs(configgen.WriteAllConfigsParams{
State: st,
Secrets: sec,
PublicIPv4: publicIPv4,
PrivateIPv4: privateIPv4,
KVBin: kvBin,
}); err != nil {
return err
}
sm := platform.DetectServiceManager()
if !sm.IsSystemd() {
return errors.NewPlatformError("systemd is required for managed services on this host.")
}
sm.DaemonReload()
sm.Enable("livekit-kv.service")
sm.Enable("livekit-coturn.service")
sm.Enable("livekit.service")
sm.Enable("caddy.service")
sm.Restart("livekit-kv.service")
sm.Restart("livekit-coturn.service")
sm.Restart("livekit.service")
sm.Restart("caddy.service")
return nil
}
func OpStatus(st *state.BootstrapState) string {
sm := platform.DetectServiceManager()
if !sm.IsSystemd() {
return "systemd not detected."
}
var parts []string
for _, svc := range []string{"livekit-kv.service", "livekit-coturn.service", "livekit.service", "caddy.service"} {
parts = append(parts, sm.Status(svc))
}
return strings.TrimSpace(strings.Join(parts, "\n\n"))
}
func OpLogs(st *state.BootstrapState, service string, lines int) string {
sm := platform.DetectServiceManager()
if !sm.IsSystemd() {
return "systemd not detected."
}
return sm.Logs(service, lines)
}
func OpRestart(services []string) error {
sm := platform.DetectServiceManager()
if !sm.IsSystemd() {
return errors.NewPlatformError("systemd not detected.")
}
for _, s := range services {
sm.Restart(s)
}
return nil
}
func WebhookList(st *state.BootstrapState) []string {
return st.Webhooks
}
func WebhookAdd(st *state.BootstrapState, url string, allowHTTP bool) (bool, error) {
u, err := validate.RequireWebhookURL(url, allowHTTP)
if err != nil {
return false, err
}
for _, existing := range st.Webhooks {
if existing == u {
return false, nil
}
}
st.Webhooks = append(st.Webhooks, u)
sort.Strings(st.Webhooks)
st.Touch()
return true, state.SaveState(st)
}
func WebhookRemove(st *state.BootstrapState, url string) (bool, error) {
found := false
var newList []string
for _, existing := range st.Webhooks {
if existing == url {
found = true
} else {
newList = append(newList, existing)
}
}
if !found {
return false, nil
}
st.Webhooks = newList
st.Touch()
return true, state.SaveState(st)
}
func WebhookSet(st *state.BootstrapState, urls []string, allowHTTP bool) error {
var cleaned []string
seen := make(map[string]bool)
for _, u := range urls {
validated, err := validate.RequireWebhookURL(u, allowHTTP)
if err != nil {
return err
}
if !seen[validated] {
seen[validated] = true
cleaned = append(cleaned, validated)
}
}
sort.Strings(cleaned)
st.Webhooks = cleaned
st.Touch()
return state.SaveState(st)
}
func RunBasicHealthChecks(st *state.BootstrapState) string {
var out []string
out = append(out, "Listening sockets:")
result, _ := util.Run([]string{"ss", "-lntup"}, util.RunOptions{Check: false, Capture: true})
if result != nil {
out = append(out, strings.TrimSpace(result.Output))
}
result2, _ := util.Run([]string{"curl", "-fsS", fmt.Sprintf("http://127.0.0.1:%d/", st.Ports.LiveKitHTTPLocal)}, util.RunOptions{Check: false, Capture: true})
if result2 != nil && result2.ExitCode == 0 {
out = append(out, "LiveKit local HTTP reachable.")
} else {
out = append(out, "LiveKit local HTTP not reachable.")
}
return strings.TrimSpace(strings.Join(out, "\n"))
}
func EnsureStateLoadedOrFail(path string) (*state.BootstrapState, error) {
if path == "" {
path = StatePathDefault()
}
st, err := state.LoadState(path)
if err != nil {
return nil, err
}
if st == nil {
return nil, errors.NewPlatformErrorf("State file not found: %s", path)
}
return st, nil
}
func ConfigureFirewallFromState(st *state.BootstrapState) (string, error) {
tool := firewall.DetectFirewallTool()
msg := firewall.ConfigureFirewall(tool, st.Ports, st.Firewall.Enabled)
st.Firewall.Tool = tool.Name
st.Touch()
if err := state.SaveState(st); err != nil {
return msg, err
}
return msg, nil
}
func DetectPublicIPsOrFail() (string, string, string, error) {
pub4 := netutil.DetectPublicIP("4")
if pub4 == "" {
return "", "", "", errors.NewPlatformError("Could not detect public IPv4.")
}
var pub6 string
if netutil.HasGlobalIPv6() {
pub6 = netutil.DetectPublicIP("6")
}
priv4 := netutil.PrimaryPrivateIPv4()
return pub4, pub6, priv4, nil
}
func ReadLinesFile(path string) ([]string, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, errors.NewPlatformErrorf("File not found: %s", path)
}
var lines []string
for _, line := range strings.Split(string(data), "\n") {
s := strings.TrimSpace(line)
if s != "" {
lines = append(lines, s)
}
}
return lines, nil
}
// StopConflictingServices stops system-installed services that conflict with managed ones
func StopConflictingServices() {
sm := platform.DetectServiceManager()
if !sm.IsSystemd() {
return
}
conflicting := []string{
"valkey-server.service",
"valkey.service",
"redis-server.service",
"redis.service",
"coturn.service",
}
for _, svc := range conflicting {
util.Run([]string{"systemctl", "stop", svc}, util.RunOptions{Check: false, Capture: true})
util.Run([]string{"systemctl", "disable", svc}, util.RunOptions{Check: false, Capture: true})
}
managed := []string{
"livekit-kv.service",
"livekit-coturn.service",
"livekit.service",
"caddy.service",
}
for _, svc := range managed {
util.Run([]string{"systemctl", "reset-failed", svc}, util.RunOptions{Check: false, Capture: true})
}
}

View File

@@ -0,0 +1,218 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package platform
import (
"bufio"
"fmt"
"os"
"runtime"
"strings"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/errors"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
func IsLinux() bool {
return runtime.GOOS == "linux"
}
func RequireRoot() error {
if os.Geteuid() != 0 {
return errors.NewPlatformError("Run as root (sudo -i).")
}
return nil
}
func ReadOSRelease() map[string]string {
data := make(map[string]string)
f, err := os.Open("/etc/os-release")
if err != nil {
return data
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" || !strings.Contains(line, "=") {
continue
}
parts := strings.SplitN(line, "=", 2)
key := parts[0]
value := strings.Trim(parts[1], `"`)
data[key] = value
}
return data
}
type PlatformInfo struct {
ID string
IDLike string
Pretty string
}
func DetectPlatform() PlatformInfo {
osr := ReadOSRelease()
return PlatformInfo{
ID: strings.ToLower(osr["ID"]),
IDLike: strings.ToLower(osr["ID_LIKE"]),
Pretty: strings.TrimSpace(osr["PRETTY_NAME"]),
}
}
type PackageManager struct {
Kind string
}
func DetectPackageManager() *PackageManager {
if util.Which("apt-get") != "" {
return &PackageManager{Kind: "apt"}
}
if util.Which("dnf") != "" {
return &PackageManager{Kind: "dnf"}
}
if util.Which("yum") != "" {
return &PackageManager{Kind: "yum"}
}
if util.Which("pacman") != "" {
return &PackageManager{Kind: "pacman"}
}
if util.Which("zypper") != "" {
return &PackageManager{Kind: "zypper"}
}
if util.Which("apk") != "" {
return &PackageManager{Kind: "apk"}
}
return nil
}
func (pm *PackageManager) Install(pkgs []string) error {
if len(pkgs) == 0 {
return nil
}
switch pm.Kind {
case "apt":
if err := util.RunSimple([]string{"apt-get", "update"}); err != nil {
return err
}
args := append([]string{"apt-get", "install", "-y", "--no-install-recommends"}, pkgs...)
return util.RunSimple(args)
case "dnf":
args := append([]string{"dnf", "-y", "install"}, pkgs...)
return util.RunSimple(args)
case "yum":
args := append([]string{"yum", "-y", "install"}, pkgs...)
return util.RunSimple(args)
case "pacman":
args := append([]string{"pacman", "-Sy", "--noconfirm"}, pkgs...)
return util.RunSimple(args)
case "zypper":
args := append([]string{"zypper", "--non-interactive", "install"}, pkgs...)
return util.RunSimple(args)
case "apk":
args := append([]string{"apk", "add", "--no-cache"}, pkgs...)
return util.RunSimple(args)
}
return errors.NewPlatformErrorf("Unsupported package manager: %s", pm.Kind)
}
type ServiceManager struct {
Kind string
}
func DetectServiceManager() *ServiceManager {
if util.Which("systemctl") != "" && util.FileExists("/run/systemd/system") {
return &ServiceManager{Kind: "systemd"}
}
return &ServiceManager{Kind: "none"}
}
func (sm *ServiceManager) IsSystemd() bool {
return sm.Kind == "systemd"
}
func (sm *ServiceManager) DaemonReload() {
if sm.Kind == "systemd" {
util.Run([]string{"systemctl", "daemon-reload"}, util.RunOptions{Check: false, Capture: true})
}
}
func (sm *ServiceManager) Enable(name string) {
if sm.Kind == "systemd" {
util.Run([]string{"systemctl", "enable", name}, util.RunOptions{Check: false, Capture: true})
}
}
func (sm *ServiceManager) Disable(name string) {
if sm.Kind == "systemd" {
util.Run([]string{"systemctl", "disable", name}, util.RunOptions{Check: false, Capture: true})
}
}
func (sm *ServiceManager) Restart(name string) {
if sm.Kind == "systemd" {
util.Run([]string{"systemctl", "restart", name}, util.RunOptions{Check: false, Capture: true})
}
}
func (sm *ServiceManager) Start(name string) {
if sm.Kind == "systemd" {
util.Run([]string{"systemctl", "start", name}, util.RunOptions{Check: false, Capture: true})
}
}
func (sm *ServiceManager) Stop(name string) {
if sm.Kind == "systemd" {
util.Run([]string{"systemctl", "stop", name}, util.RunOptions{Check: false, Capture: true})
}
}
func (sm *ServiceManager) Status(name string) string {
if sm.Kind != "systemd" {
return "Service manager not available."
}
result, _ := util.Run([]string{"systemctl", "status", name, "--no-pager"}, util.RunOptions{Check: false, Capture: true})
if result != nil {
return strings.TrimSpace(result.Output)
}
return ""
}
func (sm *ServiceManager) Logs(name string, lines int) string {
if sm.Kind != "systemd" {
return "Service manager not available."
}
if lines < 1 {
lines = 1
}
result, _ := util.Run([]string{"journalctl", "-u", name, "-n", fmt.Sprintf("%d", lines), "--no-pager"}, util.RunOptions{Check: false, Capture: true})
if result != nil {
return strings.TrimSpace(result.Output)
}
return ""
}

View File

@@ -0,0 +1,91 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package secrets
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/pem"
)
func RandomTokenURLSafe(nbytes int) string {
b := make([]byte, nbytes)
if _, err := rand.Read(b); err != nil {
panic("crypto/rand.Read failed: " + err.Error())
}
return base64.URLEncoding.EncodeToString(b)
}
func RandomTokenHex(nbytes int) string {
b := make([]byte, nbytes)
if _, err := rand.Read(b); err != nil {
panic("crypto/rand.Read failed: " + err.Error())
}
return hex.EncodeToString(b)
}
func SafeAPIKey(prefix string, nbytes int) string {
return prefix + RandomTokenHex(nbytes)
}
type Secrets struct {
KVPassword string `json:"kv_password"`
LiveKitAPIKey string `json:"livekit_api_key"`
LiveKitAPISecret string `json:"livekit_api_secret"`
TURNUsername string `json:"turn_username"`
TURNPassword string `json:"turn_password"`
BlueskyOAuthPrivateKey string `json:"bluesky_oauth_private_key"`
BlueskyOAuthKeyID string `json:"bluesky_oauth_key_id"`
}
func GenerateBlueskyOAuthRSAKey() (string, error) {
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return "", err
}
privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
privateKeyPEM := pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: privateKeyBytes,
})
return string(privateKeyPEM), nil
}
func GenerateNewSecrets() *Secrets {
blueskyPrivateKey, err := GenerateBlueskyOAuthRSAKey()
if err != nil {
panic("Failed to generate Bluesky OAuth RSA key: " + err.Error())
}
return &Secrets{
KVPassword: RandomTokenURLSafe(24),
LiveKitAPIKey: SafeAPIKey("lk_", 16),
LiveKitAPISecret: RandomTokenURLSafe(48),
TURNUsername: "livekit",
TURNPassword: RandomTokenURLSafe(48),
BlueskyOAuthPrivateKey: blueskyPrivateKey,
BlueskyOAuthKeyID: "prod-key-1",
}
}

View File

@@ -0,0 +1,170 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package state
import (
"encoding/json"
"os"
"sort"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/constants"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/util"
)
type Versions struct {
LiveKit string `json:"livekit"`
Caddy string `json:"caddy"`
CaddyL4 string `json:"caddy_l4"`
Xcaddy string `json:"xcaddy"`
}
type Domains struct {
LiveKit string `json:"livekit"`
TURN string `json:"turn"`
}
type Paths struct {
ConfigDir string `json:"config_dir"`
StatePath string `json:"state_path"`
SecretsPath string `json:"secrets_path"`
LiveKitInstallDir string `json:"livekit_install_dir"`
LiveKitBinDir string `json:"livekit_bin_dir"`
CaddyBin string `json:"caddy_bin"`
CaddyStorageDir string `json:"caddy_storage_dir"`
CaddyLogDir string `json:"caddy_log_dir"`
LiveKitLogDir string `json:"livekit_log_dir"`
KVDataDir string `json:"kv_data_dir"`
KVLogDir string `json:"kv_log_dir"`
UnitDir string `json:"unit_dir"`
}
func DefaultPaths() Paths {
return Paths{
ConfigDir: "/etc/livekit",
StatePath: "/etc/livekit/livekitctl-state.json",
SecretsPath: "/etc/livekit/livekitctl-secrets.json",
LiveKitInstallDir: "/opt/livekit",
LiveKitBinDir: "/opt/livekit/bin",
CaddyBin: "/usr/local/bin/caddy",
CaddyStorageDir: "/var/lib/caddy",
CaddyLogDir: "/var/log/caddy",
LiveKitLogDir: "/var/log/livekit",
KVDataDir: "/var/lib/livekit/kv",
KVLogDir: "/var/log/livekit",
UnitDir: "/etc/systemd/system",
}
}
type KVConfig struct {
BindHost string `json:"bind_host"`
Port int `json:"port"`
}
type FirewallConfig struct {
Enabled bool `json:"enabled"`
Tool string `json:"tool"`
}
type BootstrapState struct {
SchemaVersion int `json:"schema_version"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
ACMEEmail string `json:"acme_email"`
Domains Domains `json:"domains"`
Ports constants.Ports `json:"ports"`
Versions Versions `json:"versions"`
KV KVConfig `json:"kv"`
Webhooks []string `json:"webhooks"`
Paths Paths `json:"paths"`
Firewall FirewallConfig `json:"firewall"`
}
type NewStateParams struct {
ACMEEmail string
Domains Domains
Ports constants.Ports
Versions Versions
KV KVConfig
Webhooks []string
Firewall FirewallConfig
Paths *Paths
}
func NewState(params NewStateParams) *BootstrapState {
ts := util.NowRFC3339()
webhooks := params.Webhooks
if webhooks == nil {
webhooks = []string{}
}
unique := make(map[string]bool)
for _, w := range webhooks {
unique[w] = true
}
sorted := make([]string, 0, len(unique))
for w := range unique {
sorted = append(sorted, w)
}
sort.Strings(sorted)
paths := DefaultPaths()
if params.Paths != nil {
paths = *params.Paths
}
return &BootstrapState{
SchemaVersion: 1,
CreatedAt: ts,
UpdatedAt: ts,
ACMEEmail: params.ACMEEmail,
Domains: params.Domains,
Ports: params.Ports,
Versions: params.Versions,
KV: params.KV,
Webhooks: sorted,
Paths: paths,
Firewall: params.Firewall,
}
}
func (st *BootstrapState) Touch() {
st.UpdatedAt = util.NowRFC3339()
}
func LoadState(path string) (*BootstrapState, error) {
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
var st BootstrapState
if err := json.Unmarshal(data, &st); err != nil {
return nil, err
}
return &st, nil
}
func SaveState(st *BootstrapState) error {
return util.WriteJSON(st.Paths.StatePath, st, 0600, -1, -1)
}

View File

@@ -0,0 +1,258 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package util
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"time"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/errors"
)
func Log(msg string) {
fmt.Println(msg)
}
func Logf(format string, args ...interface{}) {
fmt.Printf(format+"\n", args...)
}
func Which(binName string) string {
path, err := exec.LookPath(binName)
if err != nil {
return ""
}
return path
}
type RunOptions struct {
Check bool
Capture bool
Env []string
Cwd string
}
type RunResult struct {
ExitCode int
Output string
}
func Run(cmd []string, opts RunOptions) (*RunResult, error) {
if len(cmd) == 0 {
return nil, errors.NewCmdError("empty command", nil)
}
c := exec.Command(cmd[0], cmd[1:]...)
if opts.Cwd != "" {
c.Dir = opts.Cwd
}
if len(opts.Env) > 0 {
c.Env = append(os.Environ(), opts.Env...)
}
var output bytes.Buffer
if opts.Capture {
c.Stdout = &output
c.Stderr = &output
} else {
c.Stdout = os.Stdout
c.Stderr = os.Stderr
}
err := c.Run()
exitCode := 0
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
exitCode = exitErr.ExitCode()
} else {
return nil, errors.NewCmdError(fmt.Sprintf("command not found: %s", cmd[0]), err)
}
}
result := &RunResult{
ExitCode: exitCode,
Output: output.String(),
}
if opts.Check && exitCode != 0 {
return result, errors.NewCmdError(
fmt.Sprintf("command failed (%d): %v\n%s", exitCode, cmd, result.Output),
nil,
)
}
return result, nil
}
func RunSimple(cmd []string) error {
_, err := Run(cmd, RunOptions{Check: true, Capture: false})
return err
}
func RunCapture(cmd []string) (string, error) {
result, err := Run(cmd, RunOptions{Check: true, Capture: true})
if err != nil {
return "", err
}
return result.Output, nil
}
func RunCaptureNoCheck(cmd []string) (string, int) {
result, _ := Run(cmd, RunOptions{Check: false, Capture: true})
if result == nil {
return "", -1
}
return result.Output, result.ExitCode
}
func AtomicWriteText(path string, content string, mode os.FileMode, uid, gid int) error {
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
tmpFile, err := os.CreateTemp(dir, ".tmp-")
if err != nil {
return err
}
tmpName := tmpFile.Name()
_, err = tmpFile.WriteString(content)
if err != nil {
tmpFile.Close()
os.Remove(tmpName)
return err
}
if err := tmpFile.Sync(); err != nil {
tmpFile.Close()
os.Remove(tmpName)
return err
}
tmpFile.Close()
if err := os.Chmod(tmpName, mode); err != nil {
os.Remove(tmpName)
return err
}
if uid >= 0 || gid >= 0 {
if err := os.Chown(tmpName, uid, gid); err != nil {
os.Remove(tmpName)
return err
}
}
return os.Rename(tmpName, path)
}
func ReadJSON(path string, v interface{}) error {
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return json.Unmarshal(data, v)
}
func WriteJSON(path string, v interface{}, mode os.FileMode, uid, gid int) error {
data, err := json.MarshalIndent(v, "", " ")
if err != nil {
return err
}
content := string(data) + "\n"
return AtomicWriteText(path, content, mode, uid, gid)
}
func NowRFC3339() string {
return time.Now().UTC().Format(time.RFC3339)
}
func EnsureDir(path string, mode os.FileMode, uid, gid int) error {
if err := os.MkdirAll(path, mode); err != nil {
return err
}
if err := os.Chmod(path, mode); err != nil {
return err
}
if uid >= 0 || gid >= 0 {
if err := os.Chown(path, uid, gid); err != nil {
return err
}
}
return nil
}
type UserGroup struct {
UID int
GID int
}
func LookupUserGroup(username string) *UserGroup {
u, err := user.Lookup(username)
if err != nil {
return nil
}
uid, err := strconv.Atoi(u.Uid)
if err != nil {
return nil
}
gid, err := strconv.Atoi(u.Gid)
if err != nil {
return nil
}
return &UserGroup{UID: uid, GID: gid}
}
func FileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
func CopyFile(src, dst string) error {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.Create(dst)
if err != nil {
return err
}
defer dstFile.Close()
_, err = io.Copy(dstFile, srcFile)
return err
}

View File

@@ -0,0 +1,108 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package validate
import (
"net/url"
"regexp"
"strings"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/internal/errors"
)
var labelRE = regexp.MustCompile(`^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$`)
var tldRE = regexp.MustCompile(`^[a-z]{2,63}$`)
func RequireDomain(name, field string) (string, error) {
name = strings.TrimSpace(strings.ToLower(name))
if len(name) < 1 || len(name) > 253 {
return "", errors.NewValidationError("Invalid " + field + ": " + name)
}
parts := strings.Split(name, ".")
if len(parts) < 2 {
return "", errors.NewValidationError("Invalid " + field + ": " + name)
}
for i, part := range parts {
if len(part) < 1 || len(part) > 63 {
return "", errors.NewValidationError("Invalid " + field + ": " + name)
}
if i == len(parts)-1 {
if !tldRE.MatchString(part) {
return "", errors.NewValidationError("Invalid " + field + ": " + name)
}
} else {
if !labelRE.MatchString(part) {
return "", errors.NewValidationError("Invalid " + field + ": " + name)
}
}
}
return name, nil
}
func RequireEmail(email string) (string, error) {
email = strings.TrimSpace(email)
if !strings.Contains(email, "@") || !strings.Contains(email, ".") ||
strings.HasPrefix(email, "@") || strings.HasSuffix(email, "@") {
return "", errors.NewValidationError("Email does not look valid.")
}
return email, nil
}
var versionRE = regexp.MustCompile(`^\d+\.\d+\.\d+$`)
var branchRE = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_-]*$`)
func NormaliseVersionTag(v string) (string, error) {
v = strings.TrimSpace(v)
if v == "latest" {
return v, nil
}
if strings.HasPrefix(v, "v") {
return v, nil
}
if versionRE.MatchString(v) {
return "v" + v, nil
}
if branchRE.MatchString(v) {
return v, nil
}
return "", errors.NewValidationError("Invalid version: " + v)
}
func RequireWebhookURL(urlStr string, allowHTTP bool) (string, error) {
u := strings.TrimSpace(urlStr)
parsed, err := url.Parse(u)
if err != nil {
return "", errors.NewValidationError("Invalid webhook URL: " + u)
}
if parsed.Scheme != "https" && parsed.Scheme != "http" {
return "", errors.NewValidationError("Invalid webhook URL scheme: " + u)
}
if parsed.Scheme == "http" && !allowHTTP {
return "", errors.NewValidationError("Refusing insecure webhook URL: " + u)
}
if parsed.Host == "" {
return "", errors.NewValidationError("Invalid webhook URL host: " + u)
}
return u, nil
}

View File

@@ -0,0 +1,32 @@
/*
* Copyright (C) 2026 Fluxer Contributors
*
* This file is part of Fluxer.
*
* Fluxer is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Fluxer is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
*/
package main
import (
"os"
"github.com/fluxerapp/fluxer/fluxer_devops/livekitctl/cmd"
)
func main() {
if err := cmd.Execute(); err != nil {
os.Exit(1)
}
}

View File

@@ -0,0 +1,132 @@
#!/usr/bin/env sh
# Copyright (C) 2026 Fluxer Contributors
#
# This file is part of Fluxer.
#
# Fluxer is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fluxer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -eu
INSTALL_DIR="${INSTALL_DIR:-/usr/local/bin}"
GITHUB_REPO="fluxerapp/fluxer"
BINARY_NAME="livekitctl"
info() {
echo "[livekitctl] $*"
}
error() {
echo "[livekitctl] ERROR: $*" >&2
exit 1
}
check_root() {
if [ "$(id -u)" -ne 0 ]; then
error "This script must be run as root (use sudo)"
fi
}
detect_arch() {
arch=$(uname -m)
case "$arch" in
x86_64|amd64)
echo "amd64"
;;
aarch64|arm64)
echo "arm64"
;;
*)
error "Unsupported architecture: $arch"
;;
esac
}
detect_os() {
os=$(uname -s | tr '[:upper:]' '[:lower:]')
case "$os" in
linux)
echo "linux"
;;
*)
error "Unsupported OS: $os (livekitctl only supports Linux)"
;;
esac
}
get_latest_version() {
version=$(curl -fsSL "https://api.github.com/repos/${GITHUB_REPO}/releases" | \
grep -oP '"tag_name":\s*"livekitctl-v\K[0-9]+\.[0-9]+\.[0-9]+' | \
head -1)
if [ -z "$version" ]; then
error "Failed to determine latest version"
fi
echo "$version"
}
download_binary() {
version="$1"
os="$2"
arch="$3"
url="https://github.com/${GITHUB_REPO}/releases/download/livekitctl-v${version}/${BINARY_NAME}-${os}-${arch}"
tmp_file=$(mktemp)
info "Downloading livekitctl v${version} for ${os}/${arch}..."
if ! curl -fsSL "$url" -o "$tmp_file"; then
rm -f "$tmp_file"
error "Failed to download from $url"
fi
echo "$tmp_file"
}
install_binary() {
tmp_file="$1"
dest="${INSTALL_DIR}/${BINARY_NAME}"
info "Installing to ${dest}..."
mv "$tmp_file" "$dest"
chmod 755 "$dest"
}
verify_installation() {
if command -v "$BINARY_NAME" >/dev/null 2>&1; then
info "Successfully installed livekitctl"
"$BINARY_NAME" --help | head -5
else
error "Installation failed - binary not found in PATH"
fi
}
main() {
info "livekitctl installer"
info ""
check_root
os=$(detect_os)
arch=$(detect_arch)
info "Detected: ${os}/${arch}"
version=$(get_latest_version)
tmp_file=$(download_binary "$version" "$os" "$arch")
install_binary "$tmp_file"
verify_installation
info ""
info "Run 'livekitctl bootstrap --help' to get started"
}
main "$@"

View File

@@ -1,47 +0,0 @@
services:
meilisearch:
image: getmeili/meilisearch:v1.23
hostname: meilisearch
env_file:
- /etc/fluxer/meilisearch.env
environment:
- MEILI_ENV=production
- MEILI_DB_PATH=/meili_data
- MEILI_HTTP_ADDR=0.0.0.0:7700
- MEILI_MAX_INDEXING_MEMORY=4gb
- MEILI_MAX_INDEXING_THREADS=4
- MEILI_LOG_LEVEL=INFO
- MEILI_NO_ANALYTICS=true
volumes:
- meilisearch_data:/meili_data
networks:
- fluxer-shared
ports:
- '7700:7700'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '4'
memory: 8G
reservations:
cpus: '2'
memory: 4G
healthcheck:
test: ['CMD-SHELL', 'curl -fsS http://127.0.0.1:7700/health > /dev/null']
interval: 10s
timeout: 5s
retries: 5
start_period: 60s
networks:
fluxer-shared:
external: true
volumes:
meilisearch_data:
driver: local

View File

@@ -0,0 +1,2 @@
!.gitignore
*

View File

@@ -1,50 +0,0 @@
services:
postgres:
image: postgres:17-alpine
hostname: postgres
environment:
- POSTGRES_DB=fluxer
- POSTGRES_USER=fluxer
- POSTGRES_PASSWORD_FILE=/run/secrets/postgres_password
secrets:
- postgres_password
volumes:
- postgres_data:/var/lib/postgresql/data
- ./conf/postgresql.conf:/etc/postgresql/postgresql.conf
command: postgres -c config_file=/etc/postgresql/postgresql.conf
networks:
- fluxer-shared
ports:
- '5432:5432'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '4'
memory: 16G
reservations:
cpus: '2'
memory: 8G
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U fluxer -d fluxer']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
shm_size: 2g
networks:
fluxer-shared:
external: true
volumes:
postgres_data:
driver: local
secrets:
postgres_password:
external: true

View File

@@ -1,60 +0,0 @@
listen_addresses = '*'
max_connections = 200
superuser_reserved_connections = 3
shared_buffers = 4GB
effective_cache_size = 12GB
maintenance_work_mem = 1GB
work_mem = 20MB
checkpoint_timeout = 15min
checkpoint_completion_target = 0.9
max_wal_size = 4GB
min_wal_size = 1GB
random_page_cost = 1.1
effective_io_concurrency = 200
wal_buffers = 16MB
wal_compression = on
wal_level = replica
max_wal_senders = 3
logging_collector = on
log_directory = 'log'
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
log_rotation_age = 1d
log_rotation_size = 100MB
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '
log_checkpoints = on
log_connections = on
log_disconnections = on
log_duration = off
log_lock_waits = on
log_statement = 'none'
log_temp_files = 0
log_min_duration_statement = 1000
autovacuum = on
autovacuum_max_workers = 3
autovacuum_naptime = 30s
shared_preload_libraries = 'pg_stat_statements'
track_io_timing = on
track_functions = all
pg_stat_statements.max = 10000
pg_stat_statements.track = all
default_statistics_target = 100
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_worker_processes = 4
datestyle = 'iso, mdy'
timezone = 'UTC'
lc_messages = 'en_US.utf8'
lc_monetary = 'en_US.utf8'
lc_numeric = 'en_US.utf8'
lc_time = 'en_US.utf8'
default_text_search_config = 'pg_catalog.english'

View File

@@ -0,0 +1,407 @@
{
"name": "Fluxer Critical Alerts",
"description": "Critical alerts for Fluxer services",
"version": 2,
"alerts": [
{
"id": "high-api-error-rate",
"name": "High API Error Rate",
"type": "metric",
"condition": {
"query": "sum(rate(http_server_request_count{service_name='fluxer-api',http_response_status_code=~'5..'}[5m])) > 10",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "API error rate is above 10 req/s",
"description": "The fluxer-api service is experiencing a high error rate (5xx responses). This may indicate a service degradation or outage."
},
"labels": {
"service": "fluxer-api",
"alert_type": "error_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "high-api-latency",
"name": "High API Latency",
"type": "metric",
"condition": {
"query": "histogram_quantile(0.95, sum(rate(http_server_request_duration_bucket{service_name='fluxer-api'}[5m])) > 1000",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "warning",
"annotations": {
"summary": "API P95 latency is above 1000ms",
"description": "The fluxer-api service is experiencing high latency. 95% of requests are taking longer than 1 second."
},
"labels": {
"service": "fluxer-api",
"alert_type": "latency"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "queue-depth-high",
"name": "Queue Depth Too High",
"type": "metric",
"condition": {
"query": "fluxer_queue_depth > 10000",
"evaluation_interval": "1m",
"for": "15m"
},
"severity": "warning",
"annotations": {
"summary": "Queue depth is above 10,000 jobs",
"description": "The job queue has accumulated more than 10,000 jobs. This may indicate processing is slower than job arrival."
},
"labels": {
"service": "fluxer-queue",
"alert_type": "queue_depth"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "queue-dlq-rate",
"name": "High Dead Letter Queue Rate",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_queue_dead_letter[5m])) > 5",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "critical",
"annotations": {
"summary": "DLQ rate is above 5 jobs/sec",
"description": "Jobs are being moved to the dead letter queue at a high rate. This may indicate persistent job failures."
},
"labels": {
"service": "fluxer-queue",
"alert_type": "dlq_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "gateway-connection-drop",
"name": "Gateway Connection Drop Rate",
"type": "metric",
"condition": {
"query": "rate(gateway_websocket_disconnections[1m]) / rate(gateway_websocket_connections[1m]) > 0.5",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "Gateway disconnect rate exceeds 50% of connect rate",
"description": "WebSocket connections are dropping at an unusually high rate. This may indicate network issues or service instability."
},
"labels": {
"service": "fluxer-gateway",
"alert_type": "connection_stability"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "gateway-rpc-latency-high",
"name": "Gateway RPC Latency High",
"type": "metric",
"condition": {
"query": "gateway_rpc_latency_p95 > 500",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "warning",
"annotations": {
"summary": "Gateway RPC P95 latency above 500ms",
"description": "RPC calls from gateway to backend are experiencing high latency."
},
"labels": {
"service": "fluxer-gateway",
"alert_type": "latency"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "media-proxy-error-rate",
"name": "Media Proxy High Error Rate",
"type": "metric",
"condition": {
"query": "sum(rate(media_proxy_failure{service_name='fluxer-media-proxy'}[5m])) / sum(rate(http_server_request_count{service_name='fluxer-media-proxy'}[5m])) > 0.1",
"evaluation_interval": "1m",
"for": "10m"
},
"severity": "warning",
"annotations": {
"summary": "Media proxy error rate above 10%",
"description": "The media proxy is failing more than 10% of requests. This may indicate origin issues or cache problems."
},
"labels": {
"service": "fluxer-media-proxy",
"alert_type": "error_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "media-proxy-timeout-rate",
"name": "Media Proxy High Timeout Rate",
"type": "metric",
"condition": {
"query": "sum(rate(media_proxy_failure{error_type='timeout'}[5m])) > 5",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "Media proxy timeout rate above 5 req/s",
"description": "The media proxy is experiencing a high rate of timeouts. This may indicate network issues or slow origin servers."
},
"labels": {
"service": "fluxer-media-proxy",
"alert_type": "timeout"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "telemetry-ingestion-stopped",
"name": "Telemetry Ingestion Stopped",
"type": "metric",
"condition": {
"query": "increase(signoz_traces_signoz_index_v2[15m]) == 0",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "No traces being ingested",
"description": "The SigNoz collector has not received any traces in the last 15 minutes. This may indicate a collector issue or service instrumentation failure."
},
"labels": {
"service": "signoz",
"alert_type": "telemetry"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "cron-job-overdue",
"name": "Cron Job Overdue",
"type": "metric",
"condition": {
"query": "time() - max(fluxer_queue_cron_tick_timestamp by (cron)) > 3600",
"evaluation_interval": "5m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "Cron job has not executed in over 1 hour",
"description": "A scheduled cron job has not run in over an hour. This may indicate a hung cron process or scheduling issue."
},
"labels": {
"service": "fluxer-queue",
"alert_type": "cron"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "csam-match-detected",
"name": "CSAM Match Detected",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_matches_total{service_name='fluxer-api'}[1m])) > 0",
"evaluation_interval": "1m",
"for": "0m"
},
"severity": "critical",
"annotations": {
"summary": "CSAM content has been detected",
"description": "CSAM content has been detected. Immediate review required."
},
"labels": {
"service": "fluxer-api",
"alert_type": "csam_match"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "photodna-api-error-rate-high",
"name": "PhotoDNA API Error Rate High",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_photodna_api_total{service_name='fluxer-api',status='error'}[5m])) / sum(rate(fluxer_csam_photodna_api_total{service_name='fluxer-api'}[5m])) > 0.1",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "PhotoDNA API error rate exceeds 10%",
"description": "PhotoDNA API error rate exceeds 10%"
},
"labels": {
"service": "fluxer-api",
"alert_type": "photodna_error_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "ncmec-submission-failure",
"name": "NCMEC Submission Failure",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_ncmec_submissions{service_name='fluxer-api',status='error'}[5m])) > 0",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "critical",
"annotations": {
"summary": "NCMEC report submission has failed",
"description": "NCMEC report submission has failed. Manual intervention required."
},
"labels": {
"service": "fluxer-api",
"alert_type": "ncmec_submission"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-critical"
}
]
},
{
"id": "csam-scan-failure-rate-high",
"name": "CSAM Scan Failure Rate High",
"type": "metric",
"condition": {
"query": "sum(rate(fluxer_csam_scans_total{service_name='fluxer-api',status='error'}[5m])) / sum(rate(fluxer_csam_scans_total{service_name='fluxer-api'}[5m])) > 0.05",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "CSAM scan failure rate exceeds 5%",
"description": "CSAM scan failure rate exceeds 5%"
},
"labels": {
"service": "fluxer-api",
"alert_type": "csam_scan_failure_rate"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
},
{
"id": "photodna-api-latency-high",
"name": "PhotoDNA API Latency High",
"type": "metric",
"condition": {
"query": "histogram_quantile(0.95, sum(rate(fluxer_csam_photodna_api_duration_ms_bucket{service_name='fluxer-api'}[5m])) by (le)) > 5000",
"evaluation_interval": "1m",
"for": "5m"
},
"severity": "warning",
"annotations": {
"summary": "PhotoDNA API p95 latency exceeds 5 seconds",
"description": "PhotoDNA API p95 latency exceeds 5 seconds"
},
"labels": {
"service": "fluxer-api",
"alert_type": "photodna_latency"
},
"actions": [
{
"type": "notification",
"channel": "slack",
"target": "#alerts-warning"
}
]
}
],
"notification_channels": {
"slack": {
"type": "webhook",
"url": "${ALERT_WEBHOOK_URL}",
"channel_mapping": {
"critical": "#alerts-critical",
"warning": "#alerts-warning"
}
}
}
}

View File

@@ -0,0 +1,329 @@
groups:
- name: fluxer_api_alerts
interval: 30s
rules:
- alert: FluxerHighErrorRate
expr: |
(
rate(http_server_request_count{http_response_status_code=~"5.."}[5m])
/
rate(http_server_request_count[5m])
) > 0.05
and rate(http_server_request_count[5m]) > 10
for: 5m
labels:
severity: critical
service: fluxer-api
alert_type: error_rate
annotations:
summary: 'High error rate on {{ $labels.service_name }}'
description: 'Error rate is above 5% (minimum 10 requests/5m) on {{ $labels.service_name }}. Current value: {{ $value | humanizePercentage }}'
runbook: 'https://docs.fluxer.dev/runbooks/high-error-rate'
- alert: FluxerElevatedErrorRate
expr: |
(
rate(http_server_request_count{http_response_status_code=~"5.."}[5m])
/
rate(http_server_request_count[5m])
) > 0.01
and rate(http_server_request_count[5m]) > 10
for: 10m
labels:
severity: warning
service: fluxer-api
alert_type: error_rate
annotations:
summary: 'Elevated error rate on {{ $labels.service_name }}'
description: 'Error rate is above 1% on {{ $labels.service_name }}. Current value: {{ $value | humanizePercentage }}'
runbook: 'https://docs.fluxer.dev/runbooks/high-error-rate'
- name: fluxer_queue_alerts
interval: 30s
rules:
- alert: FluxerQueueDepthCritical
expr: |
fluxer_queue_depth{service_name="fluxer-queue"} > 10000
for: 5m
labels:
severity: critical
service: fluxer-queue
alert_type: queue_depth
annotations:
summary: 'Queue depth critically high for {{ $labels.queue_name }}'
description: 'Queue {{ $labels.queue_name }} has {{ $value }} jobs pending (threshold: 10,000). Jobs may be delayed or processing is stalled.'
runbook: 'https://docs.fluxer.dev/runbooks/queue-depth-critical'
- alert: FluxerQueueDepthElevated
expr: |
fluxer_queue_depth{service_name="fluxer-queue"} > 5000
for: 10m
labels:
severity: warning
service: fluxer-queue
alert_type: queue_depth
annotations:
summary: 'Queue depth elevated for {{ $labels.queue_name }}'
description: 'Queue {{ $labels.queue_name }} has {{ $value }} jobs pending (threshold: 5,000). Monitor for escalation.'
- alert: FluxerDLQRateCritical
expr: |
sum(rate(fluxer_queue_dead_letter{service_name="fluxer-queue"}[5m])) > 5
for: 5m
labels:
severity: critical
service: fluxer-queue
alert_type: dlq_rate
annotations:
summary: 'High dead letter queue rate'
description: 'Jobs are failing and moving to DLQ at rate {{ $value | humanize }} jobs/sec. Check job failures and error logs.'
runbook: 'https://docs.fluxer.dev/runbooks/high-dlq-rate'
- name: fluxer_gateway_alerts
interval: 30s
rules:
- alert: FluxerGatewayConnectionDropCritical
expr: |
sum(rate(gateway_websocket_disconnections{reason="error"}[1m])) by (service_name) > 10
for: 3m
labels:
severity: critical
service: fluxer-gateway
alert_type: connection_drop
annotations:
summary: 'Critical WebSocket error disconnect rate'
description: 'Gateway experiencing {{ $value | humanize }} error disconnects/min. This may indicate service instability or network issues.'
runbook: 'https://docs.fluxer.dev/runbooks/gateway-connection-drop'
- alert: FluxerGatewayDisconnectElevated
expr: |
sum(rate(gateway_websocket_disconnections{reason="error"}[1m])) by (service_name) > 5
for: 5m
labels:
severity: warning
service: fluxer-gateway
alert_type: connection_drop
annotations:
summary: 'Elevated WebSocket error disconnect rate'
description: 'Gateway experiencing {{ $value | humanize }} error disconnects/min. Monitor for escalation.'
- alert: FluxerGatewayDisconnectRatioHigh
expr: |
(
sum(rate(gateway_websocket_disconnections{reason="error"}[5m])) by (service_name)
/
sum(rate(gateway_websocket_connections[5m])) by (service_name)
) > 0.1
for: 5m
labels:
severity: critical
service: fluxer-gateway
alert_type: disconnect_ratio
annotations:
summary: 'Gateway disconnect ratio above 10%'
description: 'Error disconnects represent {{ $value | humanizePercentage }} of new connections. Check gateway stability.'
runbook: 'https://docs.fluxer.dev/runbooks/gateway-connection-drop'
- alert: FluxerGatewayRPCLatencyHigh
expr: |
histogram_quantile(0.95,
sum(rate(gateway_rpc_latency_bucket{service_name="fluxer-gateway"}[5m])) by (le)
) > 500
for: 10m
labels:
severity: warning
service: fluxer-gateway
alert_type: rpc_latency
annotations:
summary: 'Gateway RPC P95 latency above 500ms'
description: 'Gateway RPC calls experiencing high latency. Current P95: {{ $value | humanize }}ms'
runbook: 'https://docs.fluxer.dev/runbooks/gateway-rpc-latency'
- name: fluxer_log_alerts
interval: 30s
rules:
- alert: FluxerLogErrorSpikeCritical
expr: |
sum(rate(logs_count{severity_text="ERROR"}[5m])) by (service_name) > 50
for: 2m
labels:
severity: critical
alert_type: log_error_spike
annotations:
summary: 'Critical error log volume spike on {{ $labels.service_name }}'
description: 'Service {{ $labels.service_name }} logging {{ $value | humanize }} errors/sec. Check logs and traces for root cause.'
runbook: 'https://docs.fluxer.dev/runbooks/log-error-spike'
- alert: FluxerLogErrorElevated
expr: |
sum(rate(logs_count{severity_text="ERROR"}[5m])) by (service_name) > 20
for: 10m
labels:
severity: warning
alert_type: log_error_elevated
annotations:
summary: 'Elevated error log volume on {{ $labels.service_name }}'
description: 'Service {{ $labels.service_name }} logging {{ $value | humanize }} errors/sec. Monitor for escalation.'
- alert: FluxerLogWarningElevated
expr: |
sum(rate(logs_count{severity_text="WARN"}[5m])) by (service_name) > 100
for: 10m
labels:
severity: warning
alert_type: log_warning_elevated
annotations:
summary: 'Elevated warning log volume on {{ $labels.service_name }}'
description: 'Service {{ $labels.service_name }} logging {{ $value | humanize }} warnings/sec. Review warning patterns.'
- name: fluxer_api_performance_alerts
interval: 30s
rules:
- alert: FluxerAPILatencyCritical
expr: |
histogram_quantile(0.95,
sum(rate(http_server_request_duration_bucket{service_name="fluxer-api"}[5m])) by (le, http_route)
) > 2000
for: 5m
labels:
severity: critical
service: fluxer-api
alert_type: latency
annotations:
summary: 'Critical API latency on route {{ $labels.http_route }}'
description: 'P95 latency for route {{ $labels.http_route }} is above 2 seconds. Current: {{ $value | humanize }}ms'
runbook: 'https://docs.fluxer.dev/runbooks/high-api-latency'
- alert: FluxerAPILatencyElevated
expr: |
histogram_quantile(0.95,
sum(rate(http_server_request_duration_bucket{service_name="fluxer-api"}[5m])) by (le, http_route)
) > 1000
for: 10m
labels:
severity: warning
service: fluxer-api
alert_type: latency
annotations:
summary: 'Elevated API latency on route {{ $labels.http_route }}'
description: 'P95 latency for route {{ $labels.http_route }} is above 1 second. Current: {{ $value | humanize }}ms'
- name: fluxer_database_alerts
interval: 30s
rules:
- alert: FluxerDBLatencyCritical
expr: |
histogram_quantile(0.95,
sum(rate(db_query_latency_bucket[5m])) by (le, query_type)
) > 1000
for: 5m
labels:
severity: critical
alert_type: database_latency
annotations:
summary: 'Critical database query latency for {{ $labels.query_type }}'
description: 'P95 {{ $labels.query_type }} query latency above 1 second. Current: {{ $value | humanize }}ms'
runbook: 'https://docs.fluxer.dev/runbooks/database-latency'
- alert: FluxerDBConnectionPoolHigh
expr: |
db_connection_pool_active / db_connection_pool_max > 0.8
for: 10m
labels:
severity: warning
alert_type: connection_pool
annotations:
summary: 'Database connection pool usage above 80%'
description: 'Connection pool at {{ $value | humanizePercentage }} capacity. May lead to connection waits.'
runbook: 'https://docs.fluxer.dev/runbooks/connection-pool'
- name: fluxer_cache_alerts
interval: 30s
rules:
- alert: FluxerCacheHitRateLow
expr: |
sum(rate(cache_operation{status="hit"}[5m])) by (cache_name)
/
sum(rate(cache_operation{status=~"hit|miss"}[5m])) by (cache_name) < 0.5
for: 15m
labels:
severity: warning
alert_type: cache_efficiency
annotations:
summary: 'Low cache hit rate for {{ $labels.cache_name }}'
description: 'Cache {{ $labels.cache_name }} hit rate below 50%. Current: {{ $value | humanizePercentage }}'
runbook: 'https://docs.fluxer.dev/runbooks/low-cache-hit-rate'
- name: fluxer_worker_alerts
interval: 30s
rules:
- alert: FluxerWorkerFailureRateCritical
expr: |
sum(rate(fluxer_worker_task_failure[5m])) by (task_name) > 1
for: 5m
labels:
severity: critical
alert_type: worker_failure
annotations:
summary: 'Critical worker task failure rate for {{ $labels.task_name }}'
description: 'Worker task {{ $labels.task_name }} failing at {{ $value | humanize }} tasks/sec. Check task logs.'
runbook: 'https://docs.fluxer.dev/runbooks/worker-failures'
- alert: FluxerCronJobOverdue
expr: |
time() - max(fluxer_queue_cron_tick_timestamp by (cron)) > 3600
for: 5m
labels:
severity: warning
service: fluxer-queue
alert_type: cron
annotations:
summary: 'Cron job {{ $labels.cron }} has not executed in over 1 hour'
description: "Scheduled cron job hasn't run since {{ $value | humanizeTimestamp }}. May indicate hung process."
runbook: 'https://docs.fluxer.dev/runbooks/cron-overdue'
- name: fluxer_telemetry_alerts
interval: 60s
rules:
- alert: FluxerTelemetryIngestionStopped
expr: |
increase(signoz_traces_signoz_index_v2[15m]) == 0
for: 5m
labels:
severity: critical
alert_type: telemetry
annotations:
summary: 'No traces being ingested'
description: "SigNoz collector hasn't received traces in 15 minutes. Check collector health and service instrumentation."
runbook: 'https://docs.fluxer.dev/runbooks/telemetry-down'
- name: fluxer_media_proxy_alerts
interval: 30s
rules:
- alert: FluxerMediaProxyErrorRate
expr: |
sum(rate(media_proxy_failure{service_name="fluxer-media-proxy"}[5m]))
/
sum(rate(http_server_request_count{service_name="fluxer-media-proxy"}[5m])) > 0.1
for: 10m
labels:
severity: warning
service: fluxer-media-proxy
alert_type: error_rate
annotations:
summary: 'Media proxy error rate above 10%'
description: 'Media proxy failing {{ $value | humanizePercentage }} of requests. Check origin servers and cache.'
runbook: 'https://docs.fluxer.dev/runbooks/media-proxy-errors'
- alert: FluxerMediaProxyTimeoutRate
expr: |
sum(rate(media_proxy_failure{error_type="timeout"}[5m])) > 5
for: 5m
labels:
severity: warning
service: fluxer-media-proxy
alert_type: timeout
annotations:
summary: 'Media proxy timeout rate above 5 req/s'
description: 'Media proxy experiencing high timeout rate. May indicate network issues or slow origins.'
runbook: 'https://docs.fluxer.dev/runbooks/media-proxy-timeouts'

View File

@@ -0,0 +1,213 @@
x-common: &common
networks:
- fluxer-shared
logging:
options:
max-size: 50m
max-file: '3'
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
x-clickhouse-defaults: &clickhouse_defaults
<<: *common
image: clickhouse/clickhouse-server:25.5.6
tty: true
environment:
- CLICKHOUSE_SKIP_USER_SETUP=1
deploy:
<<: *deploy_base
labels:
signoz.io/scrape: 'true'
signoz.io/port: '9363'
signoz.io/path: '/metrics'
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- 0.0.0.0:8123/ping
interval: 30s
timeout: 5s
retries: 3
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
x-zookeeper-defaults: &zookeeper_defaults
<<: *common
image: signoz/zookeeper:3.7.1
user: root
deploy:
<<: *deploy_base
labels:
signoz.io/scrape: 'true'
signoz.io/port: '9141'
signoz.io/path: '/metrics'
healthcheck:
test:
- CMD-SHELL
- curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null
interval: 30s
timeout: 5s
retries: 3
services:
init-clickhouse:
<<: *common
image: clickhouse/clickhouse-server:25.5.6
command:
- bash
- -c
- |
version="v0.0.1"
node_os=$$(uname -s | tr '[:upper:]' '[:lower:]')
node_arch=$$(uname -m | sed s/aarch64/arm64/ | sed s/x86_64/amd64/)
echo "Fetching histogram-binary for $${node_os}/$${node_arch}"
cd /tmp
wget -O histogram-quantile.tar.gz "https://github.com/SigNoz/signoz/releases/download/histogram-quantile%2F$${version}/histogram-quantile_$${node_os}_$${node_arch}.tar.gz"
tar -xvzf histogram-quantile.tar.gz
mv histogram-quantile /var/lib/clickhouse/user_scripts/histogramQuantile
deploy:
restart_policy:
condition: on-failure
volumes:
- ./conf/clickhouse/user_scripts:/var/lib/clickhouse/user_scripts:rw
zookeeper-1:
<<: *zookeeper_defaults
environment:
- ZOO_SERVER_ID=1
- ALLOW_ANONYMOUS_LOGIN=yes
- ZOO_AUTOPURGE_INTERVAL=1
- ZOO_ENABLE_PROMETHEUS_METRICS=yes
- ZOO_PROMETHEUS_METRICS_PORT_NUMBER=9141
volumes:
- zookeeper-1:/bitnami/zookeeper
clickhouse:
<<: *clickhouse_defaults
hostname: clickhouse
configs:
- source: clickhouse-config
target: /etc/clickhouse-server/config.xml
- source: clickhouse-users
target: /etc/clickhouse-server/users.xml
- source: clickhouse-custom-function
target: /etc/clickhouse-server/custom-function.xml
- source: clickhouse-cluster
target: /etc/clickhouse-server/config.d/cluster.xml
volumes:
- clickhouse:/var/lib/clickhouse/
schema-migrator:
<<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
entrypoint: sh
command:
- -c
- /signoz-schema-migrator sync --dsn=tcp://clickhouse:9000 --up= && /signoz-schema-migrator async --dsn=tcp://clickhouse:9000 --up=
deploy:
restart_policy:
condition: on-failure
delay: 5s
signoz:
<<: *common
image: signoz/signoz:${SIGNOZ_IMAGE_TAG:-v0.108.0}
command:
- --config=/root/config/prometheus.yml
environment:
- SIGNOZ_ALERTMANAGER_PROVIDER=signoz
- SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://clickhouse:9000
- SIGNOZ_SQLSTORE_SQLITE_PATH=/var/lib/signoz/signoz.db
- DASHBOARDS_PATH=/root/config/dashboards
- STORAGE=clickhouse
- GODEBUG=netdns=go
- TELEMETRY_ENABLED=true
- DEPLOYMENT_TYPE=docker-swarm
- DOT_METRICS_ENABLED=true
configs:
- source: signoz-prometheus-config
target: /root/config/prometheus.yml
volumes:
- sqlite:/var/lib/signoz/
- ./dashboards:/root/config/dashboards:ro
deploy:
<<: *deploy_base
replicas: 1
labels:
- 'caddy=signoz.fluxer.app'
- 'caddy.reverse_proxy={{upstreams 8080}}'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
- 'caddy.header.X-Xss-Protection="1; mode=block"'
- 'caddy.header.X-Content-Type-Options=nosniff'
- 'caddy.header.Referrer-Policy=strict-origin-when-cross-origin'
- 'caddy.header.X-Frame-Options=DENY'
healthcheck:
test:
- CMD
- wget
- --spider
- -q
- localhost:8080/api/v1/health
interval: 30s
timeout: 5s
retries: 3
otel-collector:
<<: *common
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.12}
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
- --copy-path=/var/tmp/collector-config.yaml
- --feature-gates=-pkg.translator.prometheus.NormalizeName
configs:
- source: otel-collector-config
target: /etc/otel-collector-config.yaml
- source: otel-manager-config
target: /etc/manager-config.yaml
environment:
- OTEL_RESOURCE_ATTRIBUTES=host.name={{.Node.Hostname}},os.type={{.Node.Platform.OS}}
- LOW_CARDINAL_EXCEPTION_GROUPING=false
ports:
- '4317:4317'
- '4318:4318'
deploy:
<<: *deploy_base
replicas: 3
networks:
fluxer-shared:
external: true
volumes:
clickhouse:
driver: local
sqlite:
driver: local
zookeeper-1:
driver: local
configs:
clickhouse-config:
file: ./conf/clickhouse/config.xml
clickhouse-users:
file: ./conf/clickhouse/users.xml
clickhouse-custom-function:
file: ./conf/clickhouse/custom-function.xml
clickhouse-cluster:
file: ./conf/clickhouse/cluster.xml
signoz-prometheus-config:
file: ./conf/signoz/prometheus.yml
otel-collector-config:
file: ./conf/signoz/otel-collector-config.yaml
otel-manager-config:
file: ./conf/signoz/otel-collector-opamp-config.yaml

View File

@@ -0,0 +1,75 @@
<?xml version="1.0"?>
<clickhouse>
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
Optional. If you don't use replicated tables, you could omit that.
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
-->
<zookeeper>
<node index="1">
<host>zookeeper-1</host>
<port>2181</port>
</node>
<!-- <node index="2">
<host>zookeeper-2</host>
<port>2181</port>
</node>
<node index="3">
<host>zookeeper-3</host>
<port>2181</port>
</node> -->
</zookeeper>
<!-- Configuration of clusters that could be used in Distributed tables.
https://clickhouse.com/docs/en/operations/table_engines/distributed/
-->
<remote_servers>
<cluster>
<!-- Inter-server per-cluster secret for Distributed queries
default: no secret (no authentication will be performed)
If set, then Distributed queries will be validated on shards, so at least:
- such cluster should exist on the shard,
- such cluster should have the same secret.
And also (and which is more important), the initial_user will
be used as current user for the query.
Right now the protocol is pretty simple and it only takes into account:
- cluster name
- query
Also it will be nice if the following will be implemented:
- source hostname (see interserver_http_host), but then it will depends from DNS,
it can use IP address instead, but then the you need to get correct on the initiator node.
- target hostname / ip address (same notes as for source hostname)
- time-based security tokens
-->
<!-- <secret></secret> -->
<shard>
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). -->
<!-- <internal_replication>false</internal_replication> -->
<!-- Optional. Shard weight when writing data. Default: 1. -->
<!-- <weight>1</weight> -->
<replica>
<host>clickhouse</host>
<port>9000</port>
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). -->
<!-- <priority>1</priority> -->
</replica>
</shard>
<!-- <shard>
<replica>
<host>clickhouse-2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>clickhouse-3</host>
<port>9000</port>
</replica>
</shard> -->
</cluster>
</remote_servers>
</clickhouse>

View File

@@ -0,0 +1,8 @@
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
</keeper_server>
</clickhouse>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
<functions>
<function>
<type>executable</type>
<name>histogramQuantile</name>
<return_type>Float64</return_type>
<argument>
<type>Array(Float64)</type>
<name>buckets</name>
</argument>
<argument>
<type>Array(Float64)</type>
<name>counts</name>
</argument>
<argument>
<type>Float64</type>
<name>quantile</name>
</argument>
<format>CSV</format>
<command>./histogramQuantile</command>
</function>
</functions>

View File

@@ -0,0 +1,123 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<!-- See also the files in users.d directory where the password can be overridden.
Password could be specified in plaintext or in SHA256 (in hex format).
If you want to specify password in plaintext (not recommended), place it in 'password' element.
Example: <password>qwerty</password>.
Password could be empty.
If you want to specify SHA256, place it in 'password_sha256_hex' element.
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
If you want to specify a previously defined LDAP server (see 'ldap_servers' in the main config) for authentication,
place its name in 'server' element inside 'ldap' element.
Example: <ldap><server>my_ldap_server</server></ldap>
If you want to authenticate the user via Kerberos (assuming Kerberos is enabled, see 'kerberos' in the main config),
place 'kerberos' element instead of 'password' (and similar) elements.
The name part of the canonical principal name of the initiator must match the user name for authentication to succeed.
You can also place 'realm' element inside 'kerberos' element to further restrict authentication to only those requests
whose initiator's realm matches it.
Example: <kerberos />
Example: <kerberos><realm>EXAMPLE.COM</realm></kerberos>
How to generate decent password:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
In first line will be password and in second - corresponding SHA256.
How to generate double SHA1:
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
In first line will be password and in second - corresponding double SHA1.
-->
<password></password>
<!-- List of networks with open access.
To open access from everywhere, specify:
<ip>::/0</ip>
To open access only from localhost, specify:
<ip>::1</ip>
<ip>127.0.0.1</ip>
Each element of list has one of the following forms:
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
<host> Hostname. Example: server01.clickhouse.com.
To check access, DNS query is performed, and all received addresses compared to peer address.
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.clickhouse\.com$
To check access, DNS PTR query is performed for peer address and then regexp is applied.
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
Strongly recommended that regexp is ends with $
All results of DNS requests are cached till server restart.
-->
<networks>
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@@ -0,0 +1,109 @@
connectors:
signozmeter:
metrics_flush_interval: 1h
dimensions:
- name: service.name
- name: deployment.environment
- name: host.name
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector
static_configs:
- targets:
- localhost:8888
labels:
job_name: otel-collector
processors:
batch:
send_batch_size: 10000
send_batch_max_size: 11000
timeout: 10s
batch/meter:
send_batch_max_size: 25000
send_batch_size: 20000
timeout: 1s
resourcedetection:
detectors: [env, system]
timeout: 2s
signozspanmetrics/delta:
metrics_exporter: signozclickhousemetrics
metrics_flush_interval: 60s
latency_histogram_buckets:
[100us, 1ms, 2ms, 6ms, 10ms, 50ms, 100ms, 250ms, 500ms, 1000ms, 1400ms, 2000ms, 5s, 10s, 20s, 40s, 60s]
dimensions_cache_size: 100000
aggregation_temporality: AGGREGATION_TEMPORALITY_DELTA
enable_exp_histogram: true
dimensions:
- name: service.namespace
default: default
- name: deployment.environment
default: default
- name: signoz.collector.id
- name: service.version
- name: browser.platform
- name: browser.mobile
- name: k8s.cluster.name
- name: k8s.node.name
- name: k8s.namespace.name
- name: host.name
- name: host.type
- name: container.name
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1777
exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
signozclickhousemetrics:
dsn: tcp://clickhouse:9000/signoz_metrics
clickhouselogsexporter:
dsn: tcp://clickhouse:9000/signoz_logs
timeout: 10s
use_new_schema: true
signozclickhousemeter:
dsn: tcp://clickhouse:9000/signoz_meter
timeout: 45s
sending_queue:
enabled: false
service:
telemetry:
logs:
encoding: json
extensions:
- health_check
- pprof
pipelines:
traces:
receivers: [otlp]
processors: [signozspanmetrics/delta, batch]
exporters: [clickhousetraces, signozmeter]
metrics:
receivers: [otlp]
processors: [batch]
exporters: [signozclickhousemetrics, signozmeter]
metrics/prometheus:
receivers: [prometheus]
processors: [batch]
exporters: [signozclickhousemetrics, signozmeter]
logs:
receivers: [otlp]
processors: [batch]
exporters: [clickhouselogsexporter, signozmeter]
metrics/meter:
receivers: [signozmeter]
processors: [batch/meter]
exporters: [signozclickhousemeter]

View File

@@ -0,0 +1 @@
server_endpoint: ws://signoz:4320/v1/opamp

View File

@@ -0,0 +1,16 @@
global:
scrape_interval: 5s
evaluation_interval: 15s
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
rule_files: []
scrape_configs: []
remote_read:
- url: tcp://clickhouse:9000/signoz_metrics

View File

View File

@@ -1,4 +1,4 @@
#!/bin/sh
#!/usr/bin/env sh
# Copyright (C) 2026 Fluxer Contributors
#
@@ -17,10 +17,22 @@
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -e
set -eu
sed -e "s|\${LIVEKIT_DOMAIN}|${LIVEKIT_DOMAIN}|g" \
-e "s|\${LIVEKIT_DOMAIN_TURN}|${LIVEKIT_DOMAIN_TURN}|g" \
/etc/caddy.yaml.template > /etc/caddy.yaml
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
STACK=${STACK:-fluxer-signoz}
SIGNOZ_IMAGE_TAG=${SIGNOZ_IMAGE_TAG:-v0.105.1}
exec caddy run --config /etc/caddy.yaml --adapter yaml "$@"
if ! docker network inspect fluxer-shared >/dev/null 2>&1; then
docker network create -d overlay fluxer-shared
fi
if [ "$(docker info --format '{{.Swarm.LocalNodeState}}')" != "active" ]; then
echo "Docker swarm must be active for stack deployment. Run 'docker swarm init' and try again."
exit 1
fi
export STACK
export SIGNOZ_IMAGE_TAG
docker stack deploy --with-registry-auth -c "$SCRIPT_DIR/compose.yaml" "$STACK"

View File

@@ -0,0 +1,3 @@
TURBO_TOKEN=<generate-with-openssl-rand-hex-32>
AWS_ACCESS_KEY_ID=<ovh-access-key>
AWS_SECRET_ACCESS_KEY=<ovh-secret-key>

View File

@@ -0,0 +1,37 @@
services:
turborepo-cache:
image: ducktors/turborepo-remote-cache:latest
hostname: turborepo-cache
env_file:
- /etc/fluxer/turborepo-cache.env
environment:
- NODE_ENV=production
- PORT=3000
- STORAGE_PROVIDER=s3
- STORAGE_PATH=fluxer-turborepo
- S3_ENDPOINT=https://s3.us-east-va.io.cloud.ovh.us
- AWS_REGION=us-east-va
- LOG_LEVEL=info
networks:
- fluxer-shared
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '1'
memory: 512M
reservations:
cpus: '0.5'
memory: 256M
labels:
- 'caddy=turborepo.fluxer.dev'
- 'caddy.reverse_proxy={{upstreams 3000}}'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
networks:
fluxer-shared:
external: true

0
fluxer_devops/valkey/entrypoint.sh Normal file → Executable file
View File

View File

@@ -0,0 +1,36 @@
WEBLATE_SITE_DOMAIN=i18n.fluxer.app
WEBLATE_SITE_TITLE=Fluxer Localization Platform
WEBLATE_ADMIN_EMAIL=admin@fluxer.app
WEBLATE_ADMIN_PASSWORD=
WEBLATE_ADMIN_NAME=Fluxer Admin
WEBLATE_SERVER_EMAIL=admin@fluxer.app
WEBLATE_DEFAULT_FROM_EMAIL=noreply@fluxer.app
WEBLATE_EMAIL_HOST=
WEBLATE_EMAIL_PORT=587
WEBLATE_EMAIL_USE_SSL=0
WEBLATE_EMAIL_USE_TLS=1
WEBLATE_EMAIL_HOST_USER=apikey
WEBLATE_EMAIL_HOST_PASSWORD=
WEBLATE_DB_PASSWORD=
POSTGRES_DB=weblate
POSTGRES_USER=weblate
POSTGRES_PASSWORD=
POSTGRES_HOST=weblate-postgres
POSTGRES_PORT=5432
VALKEY_PASSWORD=
REDIS_HOST=valkey
REDIS_PORT=6379
REDIS_DB=1
REDIS_PASSWORD=
CACHE_URL=redis://:@valkey:6379/1
BROKER_URL=redis://:@valkey:6379/2
CELERY_BROKER_URL=redis://:@valkey:6379/2
WEBLATE_SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,https
WEBLATE_ENABLE_HTTPS=1
WEBLATE_IP_BEHIND_REVERSE_PROXY=1
WEBLATE_IP_PROXY_HEADER=HTTP_X_FORWARDED_FOR

View File

@@ -0,0 +1,93 @@
services:
weblate-postgres:
image: postgres:17-alpine
hostname: weblate-postgres
env_file:
- /etc/fluxer/weblate.env
volumes:
- weblate_postgres:/var/lib/postgresql/data
networks:
- fluxer-shared
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '2'
memory: 4G
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U weblate -d weblate']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
weblate:
image: weblate/weblate:latest
hostname: weblate
env_file:
- /etc/fluxer/weblate.env
volumes:
- weblate_data:/app/data
- weblate_cache:/app/cache
networks:
- fluxer-shared
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '4'
memory: 8G
labels:
- 'caddy=i18n.fluxer.app'
- 'caddy.reverse_proxy={{upstreams 8080}}'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
- 'caddy.header.X-Xss-Protection="1; mode=block"'
- 'caddy.header.X-Content-Type-Options=nosniff'
- 'caddy.header.Referrer-Policy=strict-origin-when-cross-origin'
- 'caddy.header.X-Frame-Options=DENY'
healthcheck:
test: ['CMD-SHELL', 'curl -fsS http://127.0.0.1:8080/ >/dev/null || exit 1']
interval: 30s
timeout: 5s
retries: 5
start_period: 60s
weblate-worker:
image: weblate/weblate:latest
hostname: weblate-worker
env_file:
- /etc/fluxer/weblate.env
environment:
- WEBLATE_SERVICE=celery
networks:
- fluxer-shared
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '2'
memory: 4G
networks:
fluxer-shared:
external: true
volumes:
weblate_postgres:
driver: local
weblate_data:
driver: local
weblate_cache:
driver: local