initial commit

This commit is contained in:
Hampus Kraft
2026-01-01 20:42:59 +00:00
commit 2f557eda8c
9029 changed files with 1490197 additions and 0 deletions

View File

@@ -0,0 +1,16 @@
{
servers {
listener_wrappers {
proxy_protocol {
timeout 5s
allow 127.0.0.0/8
allow 10.0.0.0/8
allow 172.16.0.0/12
allow ::1/128
}
tls
}
trusted_proxies static private_ranges
trusted_proxies_strict
}
}

View File

@@ -0,0 +1,38 @@
services:
caddy-gateway:
image: lucaslorentz/caddy-docker-proxy:ci-alpine
environment:
- CADDY_INGRESS_NETWORKS=fluxer-shared
- CADDY_DOCKER_LABEL_PREFIX=caddy_gw
- CADDY_DOCKER_CADDYFILE_PATH=/config/Caddyfile.base
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- caddy_gateway_data:/data
configs:
- source: caddyfile_config
target: /config/Caddyfile.base
networks:
- fluxer-shared
ports:
- target: 443
published: 9443
protocol: tcp
mode: host
deploy:
mode: global
placement:
constraints:
- node.role == manager
restart_policy:
condition: on-failure
configs:
caddyfile_config:
file: ./Caddyfile.global
networks:
fluxer-shared:
external: true
volumes:
caddy_gateway_data:

View File

@@ -0,0 +1,16 @@
{
servers {
listener_wrappers {
proxy_protocol {
timeout 5s
allow 127.0.0.0/8
allow 10.0.0.0/8
allow 172.16.0.0/12
allow ::1/128
}
tls
}
trusted_proxies static private_ranges
trusted_proxies_strict
}
}

View File

@@ -0,0 +1,41 @@
services:
caddy:
image: lucaslorentz/caddy-docker-proxy:ci-alpine
environment:
- CADDY_INGRESS_NETWORKS=fluxer-shared
- CADDY_DOCKER_CADDYFILE_PATH=/config/Caddyfile.base
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- caddy_data:/data
configs:
- source: caddyfile_config
target: /config/Caddyfile.base
networks:
- fluxer-shared
ports:
- target: 80
published: 8080
protocol: tcp
mode: host
- target: 443
published: 8443
protocol: tcp
mode: host
deploy:
mode: global
placement:
constraints:
- node.role == manager
restart_policy:
condition: on-failure
configs:
caddyfile_config:
file: ./Caddyfile.global
networks:
fluxer-shared:
external: true
volumes:
caddy_data:

View File

@@ -0,0 +1,42 @@
FROM cassandra:5.0
# Install backup tools only
RUN apt-get update && apt-get install -y \
age \
awscli \
&& rm -rf /var/lib/apt/lists/*
# Copy backup script
COPY backup.sh /usr/local/bin/backup.sh
RUN chmod +x /usr/local/bin/backup.sh
# Create entrypoint that runs backups in a loop
RUN echo '#!/bin/bash\n\
set -e\n\
\n\
# Create age public key file from environment variable\n\
if [ -n "${AGE_PUBLIC_KEY}" ]; then\n\
echo "${AGE_PUBLIC_KEY}" > /tmp/age_public_key.txt\n\
chmod 644 /tmp/age_public_key.txt\n\
echo "Age encryption enabled for backups"\n\
else\n\
echo "Warning: AGE_PUBLIC_KEY not set - backups will not be encrypted"\n\
fi\n\
\n\
echo "Starting backup service - first backup in 5 minutes, then hourly"\n\
\n\
# Wait 5 minutes before first backup\n\
echo "Waiting 5 minutes for Cassandra to be ready..."\n\
sleep 300\n\
\n\
# Run backups in a loop\n\
while true; do\n\
echo "-----------------------------------"\n\
echo "Starting backup at $(date)"\n\
/usr/local/bin/backup.sh || echo "Backup failed at $(date)"\n\
echo "Next backup in 1 hour"\n\
sleep 3600\n\
done\n\
' > /usr/local/bin/backup-entrypoint.sh && chmod +x /usr/local/bin/backup-entrypoint.sh
ENTRYPOINT ["/usr/local/bin/backup-entrypoint.sh"]

View File

@@ -0,0 +1,146 @@
# Cassandra Restore
## Fresh Instance from Local Backup
```bash
# 1. Create volume and start Cassandra
docker volume create cassandra_data
docker run -d --name cass -v cassandra_data:/var/lib/cassandra -p 9042:9042 cassandra:5.0
echo "Waiting for Cassandra to start..."
sleep 30
# 2. Extract backup and apply schema
docker exec cass bash -c 'apt-get update -qq && apt-get install -y -qq age'
docker cp ~/Downloads/backup.tar.age cass:/tmp/
docker cp ~/Downloads/key.txt cass:/tmp/
docker exec cass bash -c 'age -d -i /tmp/key.txt /tmp/backup.tar.age | tar -C /tmp -xf -'
docker exec cass bash -c 'sed "/^WARNING:/d" /tmp/cassandra-backup-*/schema.cql | cqlsh'
# 3. Copy backup to volume and stop Cassandra
docker exec cass bash -c 'cp -r /tmp/cassandra-backup-* /var/lib/cassandra/'
docker stop cass
docker run -d --name cass-util -v cassandra_data:/var/lib/cassandra --entrypoint sleep cassandra:5.0 infinity
docker exec cass-util bash -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
DATA_DIR=/var/lib/cassandra/data
for keyspace_dir in "$BACKUP_DIR"/*/; do
keyspace=$(basename "$keyspace_dir")
[[ "$keyspace" =~ ^system ]] && continue
[ ! -d "$keyspace_dir" ] && continue
for snapshot_dir in "$keyspace_dir"/*/snapshots/backup-*/; do
[ ! -d "$snapshot_dir" ] && continue
table_with_uuid=$(basename $(dirname $(dirname "$snapshot_dir")))
table_name=$(echo "$table_with_uuid" | cut -d- -f1)
target_dir=$(ls -d "$DATA_DIR/$keyspace/${table_name}"-* 2>/dev/null | head -1)
if [ -n "$target_dir" ]; then
cp "$snapshot_dir"/* "$target_dir"/ 2>/dev/null || true
fi
done
done
chown -R cassandra:cassandra "$DATA_DIR"
'
# 4. Restart Cassandra and refresh tables
docker rm -f cass-util
docker start cass
sleep 30
# 5. Run nodetool refresh on all tables
docker exec cass bash -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
for keyspace_dir in "$BACKUP_DIR"/*/; do
keyspace=$(basename "$keyspace_dir")
[[ "$keyspace" =~ ^system ]] && continue
for snapshot_dir in "$keyspace_dir"/*/snapshots/backup-*/; do
[ ! -d "$snapshot_dir" ] && continue
table_with_uuid=$(basename $(dirname $(dirname "$snapshot_dir")))
table_name=$(echo "$table_with_uuid" | cut -d- -f1)
nodetool refresh -- "$keyspace" "$table_name" 2>&1 | grep -v deprecated || true
done
done
'
# 6. Verify
docker exec cass cqlsh -e "SELECT COUNT(*) FROM fluxer.users;"
```
## Production Restore from B2
> [!IMPORTANT]
> This assumes you have B2 credentials configured on the server.
```bash
# 0. Set variables
BACKUP_NAME="cassandra-backup-20251016-103753.tar.age" # Replace with actual backup name
CASSANDRA_CONTAINER="cassandra-prod"
# 1. Download backup from B2 (on the server)
export AWS_ACCESS_KEY_ID="${B2_KEY_ID}"
export AWS_SECRET_ACCESS_KEY="${B2_APPLICATION_KEY}"
export AWS_DEFAULT_REGION="${B2_REGION}"
B2_ENDPOINT_URL="https://${B2_ENDPOINT}"
aws s3 cp "s3://${B2_BUCKET_NAME}/${BACKUP_NAME}" \
"/tmp/${BACKUP_NAME}" \
--endpoint-url="${B2_ENDPOINT_URL}"
# 2. Copy backup and key to Cassandra container
docker cp "/tmp/${BACKUP_NAME}" ${CASSANDRA_CONTAINER}:/tmp/
docker cp /etc/cassandra/age_private_key.txt ${CASSANDRA_CONTAINER}:/tmp/key.txt
# 3. Stop Cassandra and prepare
docker exec ${CASSANDRA_CONTAINER} bash -c 'apt-get update -qq && apt-get install -y -qq age'
docker stop ${CASSANDRA_CONTAINER}
# 4. Extract backup in utility container
docker run -d --name cass-restore-util --volumes-from ${CASSANDRA_CONTAINER} --entrypoint sleep cassandra:5.0 infinity
docker exec cass-restore-util bash -c 'age -d -i /tmp/key.txt /tmp/${BACKUP_NAME} | tar -C /tmp -xf -'
docker exec cass-restore-util bash -c 'cp -r /tmp/cassandra-backup-* /var/lib/cassandra/'
# 5. Copy SSTable files to existing schema directories
docker exec cass-restore-util bash -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
DATA_DIR=/var/lib/cassandra/data
for keyspace_dir in "$BACKUP_DIR"/*/; do
keyspace=$(basename "$keyspace_dir")
[[ "$keyspace" =~ ^system ]] && continue
[ ! -d "$keyspace_dir" ] && continue
for snapshot_dir in "$keyspace_dir"/*/snapshots/backup-*/; do
[ ! -d "$snapshot_dir" ] && continue
table_with_uuid=$(basename $(dirname $(dirname "$snapshot_dir")))
table_name=$(echo "$table_with_uuid" | cut -d- -f1)
target_dir=$(ls -d "$DATA_DIR/$keyspace/${table_name}"-* 2>/dev/null | head -1)
if [ -n "$target_dir" ]; then
cp "$snapshot_dir"/* "$target_dir"/ 2>/dev/null || true
fi
done
done
chown -R cassandra:cassandra "$DATA_DIR"
'
# 6. Restart Cassandra
docker rm -f cass-restore-util
docker start ${CASSANDRA_CONTAINER}
sleep 30
# 7. Run nodetool refresh
docker exec ${CASSANDRA_CONTAINER} bash -c '
BACKUP_DIR=$(ls -d /var/lib/cassandra/cassandra-backup-* | head -1)
for keyspace_dir in "$BACKUP_DIR"/*/; do
keyspace=$(basename "$keyspace_dir")
[[ "$keyspace" =~ ^system ]] && continue
for snapshot_dir in "$keyspace_dir"/*/snapshots/backup-*/; do
[ ! -d "$snapshot_dir" ] && continue
table_with_uuid=$(basename $(dirname $(dirname "$snapshot_dir")))
table_name=$(echo "$table_with_uuid" | cut -d- -f1)
nodetool refresh -- "$keyspace" "$table_name" 2>&1 | grep -v deprecated || true
done
done
'
# 8. Verify
docker exec ${CASSANDRA_CONTAINER} cqlsh -e "SELECT COUNT(*) FROM fluxer.users;"
# 9. Cleanup
rm -f "/tmp/${BACKUP_NAME}"
```

140
fluxer_devops/cassandra/backup.sh Executable file
View File

@@ -0,0 +1,140 @@
#!/bin/bash
# Copyright (C) 2026 Fluxer Contributors
#
# This file is part of Fluxer.
#
# Fluxer is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fluxer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -euo pipefail
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
BACKUP_NAME="cassandra-backup-${TIMESTAMP}"
SNAPSHOT_TAG="backup-${TIMESTAMP}"
DATA_DIR="/var/lib/cassandra/data"
TEMP_DIR="/tmp/${BACKUP_NAME}"
AGE_PUBLIC_KEY_FILE="${AGE_PUBLIC_KEY_FILE:-/tmp/age_public_key.txt}"
ENCRYPTED_BACKUP="${BACKUP_NAME}.tar.age"
MAX_BACKUP_COUNT=168 # 7 days of hourly backups
CASSANDRA_HOST="${CASSANDRA_HOST:-cassandra}"
# AWS CLI configuration for B2
export AWS_ACCESS_KEY_ID="${B2_KEY_ID}"
export AWS_SECRET_ACCESS_KEY="${B2_APPLICATION_KEY}"
export AWS_DEFAULT_REGION="${B2_REGION}"
B2_ENDPOINT_URL="https://${B2_ENDPOINT}"
echo "[$(date)] Starting Cassandra backup: ${BACKUP_NAME}"
# Step 1: Create snapshot
echo "[$(date)] Creating Cassandra snapshot: ${SNAPSHOT_TAG}"
if ! nodetool -h "${CASSANDRA_HOST}" snapshot -t "${SNAPSHOT_TAG}"; then
echo "[$(date)] Error: Failed to create snapshot"
exit 1
fi
echo "[$(date)] Snapshot created successfully"
# Step 2: Collect snapshot files
echo "[$(date)] Collecting snapshot files"
mkdir -p "${TEMP_DIR}"
# Find all snapshot directories and copy to temp location
find "${DATA_DIR}" -type d -name "${SNAPSHOT_TAG}" | while read snapshot_dir; do
# Get relative path from data dir
rel_path=$(dirname $(echo "${snapshot_dir}" | sed "s|${DATA_DIR}/||"))
target_dir="${TEMP_DIR}/${rel_path}"
mkdir -p "${target_dir}"
cp -r "${snapshot_dir}" "${target_dir}/"
done
# Copy schema
echo "[$(date)] Saving schema"
if [ -n "${CASSANDRA_PASSWORD:-}" ]; then
if ! cqlsh -u cassandra -p "${CASSANDRA_PASSWORD}" "${CASSANDRA_HOST}" -e "DESC SCHEMA;" 2>/dev/null | sed '/^WARNING:/d' > "${TEMP_DIR}/schema.cql"; then
echo "Warning: Could not export schema"
fi
else
if ! cqlsh "${CASSANDRA_HOST}" -e "DESC SCHEMA;" 2>/dev/null | sed '/^WARNING:/d' > "${TEMP_DIR}/schema.cql"; then
echo "Warning: Could not export schema (no password set)"
fi
fi
# Save cluster topology info
nodetool -h "${CASSANDRA_HOST}" describecluster > "${TEMP_DIR}/cluster_topology.txt" 2>/dev/null || true
nodetool -h "${CASSANDRA_HOST}" status > "${TEMP_DIR}/cluster_status.txt" 2>/dev/null || true
echo "[$(date)] Snapshot collection completed"
# Step 3: Check if encryption is enabled
if [ ! -f "${AGE_PUBLIC_KEY_FILE}" ]; then
echo "[$(date)] Warning: Age public key not found - skipping encryption and upload"
echo "[$(date)] Backup stored locally at: ${TEMP_DIR}"
# Clear snapshot from Cassandra
nodetool -h "${CASSANDRA_HOST}" clearsnapshot -t "${SNAPSHOT_TAG}"
exit 0
fi
# Step 4: Create tar archive and encrypt with age (streaming)
echo "[$(date)] Encrypting backup with age..."
if ! tar -C /tmp -cf - "${BACKUP_NAME}" | \
age -r "$(cat ${AGE_PUBLIC_KEY_FILE})" -o "/tmp/${ENCRYPTED_BACKUP}"; then
echo "[$(date)] Error: Encryption failed"
rm -rf "${TEMP_DIR}"
nodetool -h "${CASSANDRA_HOST}" clearsnapshot -t "${SNAPSHOT_TAG}"
exit 1
fi
echo "[$(date)] Encryption completed: ${ENCRYPTED_BACKUP}"
# Get file size
BACKUP_SIZE=$(du -h "/tmp/${ENCRYPTED_BACKUP}" | cut -f1)
echo "[$(date)] Encrypted backup size: ${BACKUP_SIZE}"
# Step 5: Upload encrypted backup to B2
echo "[$(date)] Uploading encrypted backup to B2..."
if ! aws s3 cp "/tmp/${ENCRYPTED_BACKUP}" \
"s3://${B2_BUCKET_NAME}/${ENCRYPTED_BACKUP}" \
--endpoint-url="${B2_ENDPOINT_URL}"; then
echo "[$(date)] Error: Upload to B2 failed"
rm -f "/tmp/${ENCRYPTED_BACKUP}"
rm -rf "${TEMP_DIR}"
nodetool -h "${CASSANDRA_HOST}" clearsnapshot -t "${SNAPSHOT_TAG}"
exit 1
fi
echo "[$(date)] Upload completed successfully"
# Step 6: Cleanup
echo "[$(date)] Cleaning up temporary files..."
rm -f "/tmp/${ENCRYPTED_BACKUP}"
rm -rf "${TEMP_DIR}"
# Clear snapshot from Cassandra
echo "[$(date)] Clearing snapshot from Cassandra"
nodetool -h "${CASSANDRA_HOST}" clearsnapshot -t "${SNAPSHOT_TAG}"
# Step 7: Purge old backups from B2
echo "[$(date)] Purging old backups from B2 (keeping last ${MAX_BACKUP_COUNT})..."
aws s3 ls "s3://${B2_BUCKET_NAME}/" --endpoint-url="${B2_ENDPOINT_URL}" | \
grep "cassandra-backup-.*\.tar\.age$" | \
awk '{print $4}' | \
sort -r | \
tail -n +$((MAX_BACKUP_COUNT + 1)) | \
while read -r old_backup; do
echo "[$(date)] Deleting old backup: ${old_backup}"
aws s3 rm "s3://${B2_BUCKET_NAME}/${old_backup}" --endpoint-url="${B2_ENDPOINT_URL}" || true
done
echo "[$(date)] Backup process completed successfully"
echo "[$(date)] Backup name: ${ENCRYPTED_BACKUP}"
echo "[$(date)] Backup size: ${BACKUP_SIZE}"

View File

@@ -0,0 +1,95 @@
services:
cassandra:
image: ${CASSANDRA_IMAGE:-cassandra:5.0}
hostname: cassandra
env_file:
- .env
environment:
- CASSANDRA_CLUSTER_NAME=fluxer-cluster
- CASSANDRA_DC=dc1
- CASSANDRA_RACK=rack1
- CASSANDRA_ENDPOINT_SNITCH=GossipingPropertyFileSnitch
- CASSANDRA_SEEDS=cassandra
- MAX_HEAP_SIZE=32G
- CASSANDRA_BROADCAST_ADDRESS=cassandra
- CASSANDRA_LISTEN_ADDRESS=auto
- CASSANDRA_RPC_ADDRESS=0.0.0.0
- CASSANDRA_AUTHENTICATOR=PasswordAuthenticator
- CASSANDRA_AUTHORIZER=CassandraAuthorizer
volumes:
- cassandra_data:/var/lib/cassandra
- ./conf/cassandra.yaml:/etc/cassandra/cassandra.yaml
- ./conf/jvm-server.options:/etc/cassandra/jvm-server.options
networks:
- fluxer-shared
ports:
- target: 9042
published: 9042
protocol: tcp
mode: host
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 30s
max_attempts: 3
resources:
limits:
cpus: '6'
memory: 64G
reservations:
cpus: '4'
memory: 48G
healthcheck:
test: ['CMD-SHELL', 'nodetool status']
interval: 30s
timeout: 10s
retries: 5
start_period: 120s
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 100000
hard: 100000
cap_add:
- IPC_LOCK
cassandra-backup:
image: ${CASSANDRA_BACKUP_IMAGE:-fluxer-cassandra-backup:latest}
environment:
- AGE_PUBLIC_KEY
- B2_KEY_ID
- B2_APPLICATION_KEY
- B2_BUCKET_NAME
- B2_ENDPOINT
- B2_REGION
- CASSANDRA_PASSWORD
volumes:
- cassandra_data:/var/lib/cassandra:ro
networks:
- fluxer-shared
depends_on:
- cassandra
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 30s
max_attempts: 3
resources:
limits:
cpus: '1'
memory: 2G
reservations:
cpus: '0.5'
memory: 1G
networks:
fluxer-shared:
external: true
volumes:
cassandra_data:
driver: local

View File

@@ -0,0 +1,69 @@
cluster_name: 'fluxer-cluster'
num_tokens: 256
partitioner: org.apache.cassandra.dht.Murmur3Partitioner
seed_provider:
- class_name: org.apache.cassandra.locator.SimpleSeedProvider
parameters:
- seeds: 'cassandra'
listen_address: auto
broadcast_address: cassandra
rpc_address: 0.0.0.0
broadcast_rpc_address: cassandra
storage_port: 7000
ssl_storage_port: 7001
native_transport_port: 9042
endpoint_snitch: GossipingPropertyFileSnitch
data_file_directories:
- /var/lib/cassandra/data
commitlog_directory: /var/lib/cassandra/commitlog
saved_caches_directory: /var/lib/cassandra/saved_caches
hints_directory: /var/lib/cassandra/hints
concurrent_reads: 32
concurrent_writes: 32
concurrent_counter_writes: 32
concurrent_materialized_view_writes: 32
memtable_allocation_type: heap_buffers
memtable_flush_writers: 4
commitlog_sync: periodic
commitlog_sync_period: 10000ms
commitlog_segment_size: 32MiB
commitlog_total_space: 8192MiB
compaction_throughput: 64MiB/s
concurrent_compactors: 4
sstable_preemptive_open_interval: 50MiB
key_cache_size: 2048MiB
key_cache_save_period: 14400
row_cache_size: 0MiB
counter_cache_size: 256MiB
counter_cache_save_period: 7200
read_request_timeout: 10000ms
write_request_timeout: 10000ms
counter_write_request_timeout: 10000ms
range_request_timeout: 20000ms
request_timeout: 20000ms
authenticator: PasswordAuthenticator
authorizer: CassandraAuthorizer
role_manager: CassandraRoleManager
native_transport_max_threads: 128
native_transport_max_frame_size: 256MiB
auto_snapshot: true
snapshot_before_compaction: false
tombstone_warn_threshold: 10000
tombstone_failure_threshold: 100000
disk_optimization_strategy: ssd
disk_access_mode: auto
stream_throughput_outbound: 400MiB/s
inter_dc_stream_throughput_outbound: 200MiB/s
max_hints_delivery_threads: 4
hints_flush_period: 10000ms
gc_warn_threshold: 1000ms
gc_log_threshold: 200ms
batch_size_warn_threshold: 5KiB
batch_size_fail_threshold: 50KiB
prepared_statements_cache_size: 10MiB
client_encryption_options:
enabled: false
optional: false
server_encryption_options:
internode_encryption: none
optional: false

View File

@@ -0,0 +1,48 @@
-Xms32G
-Xmx32G
-XX:+UseG1GC
-XX:+ParallelRefProcEnabled
-XX:MaxTenuringThreshold=2
-XX:G1HeapRegionSize=16m
-XX:G1RSetUpdatingPauseTimePercent=5
-XX:MaxGCPauseMillis=300
-XX:InitiatingHeapOccupancyPercent=70
-Xlog:gc*,gc+age=trace,safepoint:file=/var/log/cassandra/gc.log:time,uptime,level,tags:filecount=10,filesize=100M
-XX:+AlwaysPreTouch
-XX:+IgnoreUnrecognizedVMOptions
-Xss512k
-XX:+UseTLAB
-XX:+ResizeTLAB
-XX:+PerfDisableSharedMem
-XX:+UseStringDeduplication
-XX:+HeapDumpOnOutOfMemoryError
-XX:HeapDumpPath=/var/lib/cassandra/dumps
-Djdk.attach.allowAttachSelf=true
--add-exports=java.base/jdk.internal.misc=ALL-UNNAMED
--add-exports=java.base/jdk.internal.ref=ALL-UNNAMED
--add-exports=java.base/sun.nio.ch=ALL-UNNAMED
--add-exports=java.management.rmi/com.sun.jmx.remote.internal.rmi=ALL-UNNAMED
--add-exports=java.rmi/sun.rmi.registry=ALL-UNNAMED
--add-exports=java.rmi/sun.rmi.server=ALL-UNNAMED
--add-exports=java.sql/java.sql=ALL-UNNAMED
--add-opens=java.base/java.lang.module=ALL-UNNAMED
--add-opens=java.base/jdk.internal.loader=ALL-UNNAMED
--add-opens=java.base/jdk.internal.ref=ALL-UNNAMED
--add-opens=java.base/jdk.internal.reflect=ALL-UNNAMED
--add-opens=java.base/jdk.internal.math=ALL-UNNAMED
--add-opens=java.base/jdk.internal.module=ALL-UNNAMED
--add-opens=java.base/jdk.internal.util.jar=ALL-UNNAMED
--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false
-Dcassandra.jmx.local.port=7199
-Dcassandra.jmx.remote.port=7199
-Djava.net.preferIPv4Stack=true
-Dio.netty.tryReflectionSetAccessible=true
-Dio.netty.allocator.useCacheForAllThreads=true
-Dio.netty.eventLoop.maxPendingTasks=65536
-Dcassandra.config=file:///etc/cassandra/cassandra.yaml
-Dcassandra.logdir=/var/log/cassandra
-Dcassandra.storagedir=/var/lib/cassandra
-Djava.security.egd=file:/dev/urandom
-Dfile.encoding=UTF-8

View File

@@ -0,0 +1,919 @@
CREATE TABLE IF NOT EXISTS fluxer.users (
user_id bigint,
username text,
discriminator int,
bot boolean,
system boolean,
email text,
email_verified boolean,
email_bounced boolean,
phone text,
password_hash text,
totp_secret text,
authenticator_types set<int>,
avatar_hash text,
banner_hash text,
bio text,
accent_color int,
date_of_birth date,
locale text,
flags bigint,
premium_type int,
premium_since timestamp,
premium_until timestamp,
premium_lifetime_sequence int,
stripe_subscription_id text,
stripe_customer_id text,
suspicious_activity_flags int,
terms_agreed_at timestamp,
privacy_agreed_at timestamp,
last_active_at timestamp,
last_active_ip text,
temp_banned_until timestamp,
pending_deletion_at timestamp,
password_last_changed_at timestamp,
pronouns text,
acls set<text>,
deletion_reason_code int,
deletion_public_reason text,
deletion_audit_log_reason text,
first_refund_at timestamp,
PRIMARY KEY ((user_id))
);
CREATE TABLE IF NOT EXISTS fluxer.users_by_email (
email_lower text,
user_id bigint,
PRIMARY KEY ((email_lower), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.users_by_phone (
phone text,
user_id bigint,
PRIMARY KEY ((phone), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.users_by_username (
username text,
discriminator int,
user_id bigint,
PRIMARY KEY ((username), discriminator, user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.users_by_stripe_subscription_id (
stripe_subscription_id text,
user_id bigint,
PRIMARY KEY ((stripe_subscription_id), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.users_by_stripe_customer_id (
stripe_customer_id text,
user_id bigint,
PRIMARY KEY ((stripe_customer_id), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.user_activity_tracking (
activity_month text,
last_active_at timestamp,
user_id bigint,
PRIMARY KEY ((activity_month), last_active_at, user_id)
) WITH CLUSTERING ORDER BY (last_active_at ASC, user_id ASC);
CREATE TABLE IF NOT EXISTS fluxer.users_pending_deletion (
deletion_date date,
pending_deletion_at timestamp,
user_id bigint,
deletion_reason_code int,
PRIMARY KEY ((deletion_date), pending_deletion_at, user_id)
) WITH CLUSTERING ORDER BY (pending_deletion_at ASC, user_id ASC);
CREATE TYPE IF NOT EXISTS fluxer.custom_status (
text text,
emoji_id bigint,
emoji_name text,
emoji_animated boolean,
expires_at timestamp
);
CREATE TYPE IF NOT EXISTS fluxer.guild_folder (
folder_id int,
name text,
color int,
guild_ids list<bigint>
);
CREATE TABLE IF NOT EXISTS fluxer.user_settings (
user_id bigint,
locale text,
theme text,
status text,
custom_status frozen<custom_status>,
developer_mode boolean,
message_display_compact boolean,
animate_emoji boolean,
animate_stickers int,
gif_auto_play boolean,
render_embeds boolean,
render_reactions boolean,
render_spoilers int,
inline_attachment_media boolean,
inline_embed_media boolean,
explicit_content_filter int,
friend_source_flags int,
default_guilds_restricted boolean,
restricted_guilds set<bigint>,
guild_positions list<bigint>,
guild_folders frozen<list<guild_folder>>,
afk_timeout int,
time_format int,
PRIMARY KEY ((user_id))
);
CREATE TABLE IF NOT EXISTS fluxer.relationships (
source_user_id bigint,
target_user_id bigint,
type int,
nickname text,
since timestamp,
PRIMARY KEY ((source_user_id), target_user_id, type)
);
CREATE TABLE IF NOT EXISTS fluxer.notes (
source_user_id bigint,
target_user_id bigint,
note text,
PRIMARY KEY ((source_user_id), target_user_id)
);
CREATE TYPE IF NOT EXISTS fluxer.mute_config (
end_time timestamp,
selected_time_window int
);
CREATE TYPE IF NOT EXISTS fluxer.channel_override (
collapsed boolean,
message_notifications int,
muted boolean,
mute_config frozen<mute_config>
);
CREATE TABLE IF NOT EXISTS fluxer.user_guild_settings (
user_id bigint,
guild_id bigint,
message_notifications int,
muted boolean,
mute_config frozen<mute_config>,
mobile_push boolean,
suppress_everyone boolean,
suppress_roles boolean,
hide_muted_channels boolean,
channel_overrides frozen<map<bigint, channel_override>>,
version int,
PRIMARY KEY ((user_id), guild_id)
);
CREATE TABLE IF NOT EXISTS fluxer.private_channels (
user_id bigint,
channel_id bigint,
is_gdm boolean,
PRIMARY KEY ((user_id), channel_id)
);
CREATE TABLE IF NOT EXISTS fluxer.dm_states (
hi_user_id bigint,
lo_user_id bigint,
channel_id bigint,
PRIMARY KEY ((hi_user_id, lo_user_id), channel_id)
);
CREATE TABLE IF NOT EXISTS fluxer.read_states (
user_id bigint,
channel_id bigint,
message_id bigint,
mention_count int,
last_pin_timestamp timestamp,
PRIMARY KEY ((user_id), channel_id)
);
CREATE TABLE IF NOT EXISTS fluxer.recent_mentions (
user_id bigint,
message_id bigint,
channel_id bigint,
guild_id bigint,
is_everyone boolean,
is_role boolean,
PRIMARY KEY ((user_id), message_id)
) WITH CLUSTERING ORDER BY (message_id DESC)
AND default_time_to_live = 604800;
CREATE TABLE IF NOT EXISTS fluxer.recent_mentions_by_guild (
user_id bigint,
guild_id bigint,
message_id bigint,
channel_id bigint,
is_everyone boolean,
is_role boolean,
PRIMARY KEY ((user_id, guild_id), message_id)
) WITH CLUSTERING ORDER BY (message_id DESC)
AND default_time_to_live = 604800;
CREATE TABLE IF NOT EXISTS fluxer.saved_messages (
user_id bigint,
channel_id bigint,
message_id bigint,
saved_at timestamp,
PRIMARY KEY ((user_id), message_id)
) WITH CLUSTERING ORDER BY (message_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.auth_sessions (
session_id_hash blob,
user_id bigint,
created_at timestamp,
approx_last_used_at timestamp,
client_ip text,
client_os text,
client_platform text,
client_country text,
PRIMARY KEY ((session_id_hash))
);
CREATE TABLE IF NOT EXISTS fluxer.auth_sessions_by_user_id (
user_id bigint,
session_id_hash blob,
PRIMARY KEY ((user_id), session_id_hash)
);
CREATE TABLE IF NOT EXISTS fluxer.mfa_backup_codes (
user_id bigint,
code text,
consumed boolean,
PRIMARY KEY ((user_id), code)
);
CREATE TABLE IF NOT EXISTS fluxer.webauthn_credentials (
user_id bigint,
credential_id text,
public_key blob,
counter bigint,
transports set<text>,
name text,
created_at timestamp,
last_used_at timestamp,
PRIMARY KEY (user_id, credential_id)
);
CREATE TABLE IF NOT EXISTS fluxer.webauthn_credential_lookup (
credential_id text PRIMARY KEY,
user_id bigint
);
CREATE TABLE IF NOT EXISTS fluxer.email_verification_tokens (
token_ text,
user_id bigint,
email text,
PRIMARY KEY ((token_), user_id)
) WITH default_time_to_live = 86400;
CREATE TABLE IF NOT EXISTS fluxer.password_reset_tokens (
token_ text,
user_id bigint,
email text,
PRIMARY KEY ((token_), user_id)
) WITH default_time_to_live = 3600;
CREATE TABLE IF NOT EXISTS fluxer.ip_authorization_tokens (
token_ text,
user_id bigint,
email text,
PRIMARY KEY ((token_), user_id)
) WITH default_time_to_live = 1800;
CREATE TABLE IF NOT EXISTS fluxer.email_revert_tokens (
token_ text,
user_id bigint,
email text,
PRIMARY KEY ((token_), user_id)
) WITH default_time_to_live = 172800;
CREATE TABLE IF NOT EXISTS fluxer.mfa_tickets (
ticket text,
user_id bigint,
PRIMARY KEY ((ticket), user_id)
) WITH default_time_to_live = 300;
CREATE TABLE IF NOT EXISTS fluxer.phone_tokens (
token_ text PRIMARY KEY,
phone text,
user_id bigint
);
CREATE TABLE IF NOT EXISTS fluxer.authorized_ips (
user_id bigint,
ip text,
PRIMARY KEY ((user_id, ip))
);
CREATE TABLE IF NOT EXISTS fluxer.banned_emails (
email_lower text PRIMARY KEY
);
CREATE TABLE IF NOT EXISTS fluxer.banned_phones (
phone text PRIMARY KEY
);
CREATE TABLE IF NOT EXISTS fluxer.banned_ips (
ip text PRIMARY KEY
);
CREATE TABLE IF NOT EXISTS fluxer.beta_codes (
code text,
creator_id bigint,
created_at timestamp,
redeemer_id bigint,
redeemed_at timestamp,
PRIMARY KEY ((creator_id), code)
);
CREATE TABLE IF NOT EXISTS fluxer.beta_codes_by_code (
code text,
creator_id bigint,
PRIMARY KEY ((code), creator_id)
);
CREATE TABLE IF NOT EXISTS fluxer.gift_codes (
code text,
amount_cents int,
duration_months int,
created_at timestamp,
created_by_user_id bigint,
redeemed_at timestamp,
redeemed_by_user_id bigint,
stripe_payment_intent_id text,
PRIMARY KEY ((code))
);
CREATE TABLE IF NOT EXISTS fluxer.gift_codes_by_creator (
created_by_user_id bigint,
code text,
PRIMARY KEY ((created_by_user_id), code)
);
CREATE TABLE IF NOT EXISTS fluxer.gift_codes_by_redeemer (
redeemed_by_user_id bigint,
code text,
PRIMARY KEY ((redeemed_by_user_id), code)
);
CREATE TABLE IF NOT EXISTS fluxer.gift_codes_by_payment_intent (
stripe_payment_intent_id text,
code text,
PRIMARY KEY ((stripe_payment_intent_id), code)
);
CREATE TABLE IF NOT EXISTS fluxer.guilds (
guild_id bigint,
owner_id bigint,
name text,
vanity_url_code text,
icon_hash text,
banner_hash text,
splash_hash text,
features set<text>,
verification_level int,
mfa_level int,
nsfw_level int,
explicit_content_filter int,
default_message_notifications int,
system_channel_id bigint,
system_channel_flags int,
rules_channel_id bigint,
afk_channel_id bigint,
afk_timeout int,
disabled_operations int,
max_presences int,
member_count int,
audit_logs_indexed_at timestamp,
PRIMARY KEY ((guild_id))
);
CREATE TABLE IF NOT EXISTS fluxer.guilds_by_owner_id (
owner_id bigint,
guild_id bigint,
PRIMARY KEY ((guild_id), owner_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_members (
guild_id bigint,
user_id bigint,
joined_at timestamp,
nick text,
avatar_hash text,
banner_hash text,
join_source_type int,
source_invite_code text,
inviter_id bigint,
deaf boolean,
mute boolean,
communication_disabled_until timestamp,
role_ids set<bigint>,
is_premium_sanitized boolean,
bio text,
accent_color int,
pronouns text,
temporary boolean,
PRIMARY KEY ((guild_id), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_members_by_user_id (
user_id bigint,
guild_id bigint,
PRIMARY KEY ((user_id), guild_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_roles (
guild_id bigint,
role_id bigint,
name text,
permissions bigint,
position int,
color int,
icon_hash text,
unicode_emoji text,
hoist boolean,
mentionable boolean,
PRIMARY KEY ((guild_id), role_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_bans (
guild_id bigint,
user_id bigint,
moderator_id bigint,
banned_at timestamp,
expires_at timestamp,
reason text,
ip text,
PRIMARY KEY ((guild_id), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_bans_by_ip (
guild_id bigint,
ip text,
user_id bigint,
PRIMARY KEY ((guild_id, ip), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_emojis (
guild_id bigint,
emoji_id bigint,
name text,
creator_id bigint,
animated boolean,
PRIMARY KEY ((guild_id), emoji_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_emojis_by_emoji_id (
emoji_id bigint,
guild_id bigint,
name text,
creator_id bigint,
animated boolean,
PRIMARY KEY ((emoji_id))
);
CREATE TABLE IF NOT EXISTS fluxer.guild_stickers (
guild_id bigint,
sticker_id bigint,
name text,
description text,
format_type int,
tags list<text>,
creator_id bigint,
PRIMARY KEY ((guild_id), sticker_id)
);
CREATE TABLE IF NOT EXISTS fluxer.guild_stickers_by_sticker_id (
sticker_id bigint,
guild_id bigint,
name text,
description text,
format_type int,
tags list<text>,
creator_id bigint,
PRIMARY KEY ((sticker_id))
);
CREATE TABLE IF NOT EXISTS fluxer.guild_audit_logs (
guild_id bigint,
log_id bigint,
user_id bigint,
target_type text,
target_id text,
action text,
audit_log_reason text,
metadata map<text, text>,
changes text,
created_at timestamp,
PRIMARY KEY ((guild_id), log_id)
) WITH CLUSTERING ORDER BY (log_id DESC);
CREATE TYPE IF NOT EXISTS fluxer.permission_overwrite (
type int,
allow_ bigint,
deny_ bigint
);
CREATE TABLE IF NOT EXISTS fluxer.channels (
channel_id bigint,
guild_id bigint,
type int,
name text,
topic text,
icon_hash text,
url text,
parent_id bigint,
position int,
owner_id bigint,
recipient_ids set<bigint>,
nsfw boolean,
rate_limit_per_user int,
bitrate int,
user_limit int,
rtc_region text,
last_message_id bigint,
last_pin_timestamp timestamp,
permission_overwrites frozen<map<bigint, permission_overwrite>>,
nicks map<text, text>,
soft_deleted boolean,
indexed_at timestamp,
PRIMARY KEY ((channel_id, soft_deleted))
);
CREATE TABLE IF NOT EXISTS fluxer.channels_by_guild_id (
guild_id bigint,
channel_id bigint,
PRIMARY KEY ((guild_id), channel_id)
);
CREATE TABLE IF NOT EXISTS fluxer.invites (
code text,
type int,
guild_id bigint,
channel_id bigint,
inviter_id bigint,
created_at timestamp,
uses int,
max_uses int,
max_age int,
temporary boolean,
PRIMARY KEY ((code))
);
CREATE TABLE IF NOT EXISTS fluxer.invites_by_guild_id (
guild_id bigint,
code text,
PRIMARY KEY ((guild_id), code)
);
CREATE TABLE IF NOT EXISTS fluxer.invites_by_channel_id (
channel_id bigint,
code text,
PRIMARY KEY ((channel_id), code)
);
CREATE TABLE IF NOT EXISTS fluxer.webhooks (
webhook_id bigint,
webhook_token text,
type int,
guild_id bigint,
channel_id bigint,
creator_id bigint,
name text,
avatar_hash text,
PRIMARY KEY ((webhook_id), webhook_token)
);
CREATE TABLE IF NOT EXISTS fluxer.webhooks_by_guild_id (
guild_id bigint,
webhook_id bigint,
PRIMARY KEY ((guild_id), webhook_id)
);
CREATE TABLE IF NOT EXISTS fluxer.webhooks_by_channel_id (
channel_id bigint,
webhook_id bigint,
PRIMARY KEY ((channel_id), webhook_id)
);
CREATE TYPE IF NOT EXISTS fluxer.message_attachment (
attachment_id bigint,
filename text,
size bigint,
title text,
description text,
width int,
height int,
duration int,
content_type text,
content_hash text,
placeholder text,
flags int,
nsfw boolean
);
CREATE TYPE IF NOT EXISTS fluxer.message_embed_author (
name text,
url text,
icon_url text
);
CREATE TYPE IF NOT EXISTS fluxer.message_embed_provider (
name text,
url text
);
CREATE TYPE IF NOT EXISTS fluxer.message_embed_footer (
text text,
icon_url text
);
CREATE TYPE IF NOT EXISTS fluxer.message_embed_media (
url text,
width int,
height int,
duration int,
description text,
content_type text,
content_hash text,
placeholder text,
flags int
);
CREATE TYPE IF NOT EXISTS fluxer.message_embed_field (
name text,
value text,
inline boolean
);
CREATE TYPE IF NOT EXISTS fluxer.message_embed (
type text,
title text,
description text,
url text,
timestamp timestamp,
color int,
author frozen<message_embed_author>,
provider frozen<message_embed_provider>,
thumbnail frozen<message_embed_media>,
image frozen<message_embed_media>,
video frozen<message_embed_media>,
footer frozen<message_embed_footer>,
fields frozen<list<message_embed_field>>,
nsfw boolean
);
CREATE TYPE IF NOT EXISTS fluxer.message_sticker_item (
sticker_id bigint,
name text,
format_type int
);
CREATE TYPE IF NOT EXISTS fluxer.message_reference (
channel_id bigint,
message_id bigint,
guild_id bigint,
type int
);
CREATE TYPE IF NOT EXISTS fluxer.message_call (
participant_ids set<bigint>,
ended_timestamp timestamp
);
CREATE TYPE IF NOT EXISTS fluxer.message_snapshot (
content text,
timestamp timestamp,
edited_timestmap timestamp,
mention_users set<bigint>,
mention_roles set<bigint>,
mention_channels set<bigint>,
attachments frozen<list<message_attachment>>,
embeds frozen<list<message_embed>>,
sticker_items frozen<list<message_sticker_item>>,
type int,
flags int
);
CREATE TABLE IF NOT EXISTS fluxer.messages (
channel_id bigint,
bucket int,
message_id bigint,
author_id bigint,
type int,
webhook_id bigint,
webhook_name text,
webhook_avatar_hash text,
content text,
edited_timestamp timestamp,
pinned_timestamp timestamp,
flags int,
mention_everyone boolean,
mention_users set<bigint>,
mention_roles set<bigint>,
mention_channels set<bigint>,
attachments frozen<list<message_attachment>>,
embeds frozen<list<message_embed>>,
sticker_items frozen<list<message_sticker_item>>,
message_reference frozen<message_reference>,
call frozen<message_call>,
message_snapshots frozen<list<message_snapshot>>,
PRIMARY KEY ((channel_id, bucket), message_id)
) WITH CLUSTERING ORDER BY (message_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.channel_pins (
channel_id bigint,
message_id bigint,
pinned_timestamp timestamp,
PRIMARY KEY ((channel_id), pinned_timestamp, message_id)
) WITH CLUSTERING ORDER BY (pinned_timestamp DESC, message_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.messages_by_author_id (
author_id bigint,
channel_id bigint,
message_id bigint,
PRIMARY KEY ((author_id), channel_id, message_id)
);
CREATE TABLE IF NOT EXISTS fluxer.message_reactions (
channel_id bigint,
bucket int,
message_id bigint,
emoji_id bigint,
emoji_name text,
user_id bigint,
emoji_animated boolean,
PRIMARY KEY ((channel_id, bucket), message_id, emoji_id, emoji_name, user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.attachment_lookup (
channel_id bigint,
attachment_id bigint,
filename text,
message_id bigint,
PRIMARY KEY ((channel_id), attachment_id, filename)
);
CREATE TABLE IF NOT EXISTS fluxer.favorite_memes (
user_id bigint,
meme_id bigint,
name text,
alt_text text,
tags list<text>,
attachment_id bigint,
filename text,
content_type text,
content_hash text,
size bigint,
width int,
height int,
duration int,
storage_key text,
created_at timestamp,
PRIMARY KEY ((user_id), meme_id)
) WITH CLUSTERING ORDER BY (meme_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.favorite_memes_by_meme_id (
meme_id bigint,
user_id bigint,
PRIMARY KEY ((meme_id), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.push_subscriptions (
user_id bigint,
subscription_id text,
endpoint text,
p256dh_key text,
auth_key text,
user_agent text,
PRIMARY KEY ((user_id), subscription_id)
);
CREATE TABLE IF NOT EXISTS fluxer.processed_payments (
payment_intent_id text,
user_id bigint,
checkout_session_id text,
price_id text,
product_type text,
amount_cents int,
currency text,
is_gift boolean,
processed_at timestamp,
PRIMARY KEY ((payment_intent_id))
);
CREATE TABLE IF NOT EXISTS fluxer.processed_payments_by_user (
user_id bigint,
payment_intent_id text,
processed_at timestamp,
PRIMARY KEY ((user_id), processed_at, payment_intent_id)
) WITH CLUSTERING ORDER BY (processed_at DESC, payment_intent_id ASC);
CREATE TABLE IF NOT EXISTS fluxer.visionary_purchases (
purchase_id bigint,
user_id bigint,
purchased_at timestamp,
sequence_number int,
stripe_payment_intent_id text,
PRIMARY KEY ((purchase_id))
);
CREATE TABLE IF NOT EXISTS fluxer.visionary_purchases_by_user (
user_id bigint,
purchase_id bigint,
PRIMARY KEY ((user_id), purchase_id)
);
CREATE TABLE IF NOT EXISTS fluxer.user_harvests (
user_id bigint,
harvest_id bigint,
requested_at timestamp,
started_at timestamp,
completed_at timestamp,
failed_at timestamp,
storage_key text,
file_size bigint,
progress_percent int,
progress_step text,
error_message text,
download_url_expires_at timestamp,
PRIMARY KEY (user_id, harvest_id)
) WITH CLUSTERING ORDER BY (harvest_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.admin_audit_logs (
log_id bigint PRIMARY KEY,
admin_user_id bigint,
target_type text,
target_id bigint,
action text,
audit_log_reason text,
metadata map<text, text>,
created_at timestamp
);
CREATE TYPE IF NOT EXISTS fluxer.iar_message_context (
message_id bigint,
author_id bigint,
author_username text,
author_discriminator int,
author_avatar_hash text,
content text,
timestamp timestamp,
edited_timestamp timestamp,
type int,
flags int,
mention_everyone boolean,
mention_users set<bigint>,
mention_roles set<bigint>,
mention_channels set<bigint>,
attachments frozen<list<message_attachment>>,
embeds frozen<list<message_embed>>,
sticker_items frozen<list<message_sticker_item>>
);
CREATE TABLE IF NOT EXISTS fluxer.iar_submissions (
report_id bigint,
reporter_id bigint,
reported_at timestamp,
status int,
report_type int,
category text,
additional_info text,
reported_user_id bigint,
reported_user_username text,
reported_user_discriminator int,
reported_user_avatar_hash text,
reported_guild_id bigint,
reported_guild_name text,
reported_guild_icon_hash text,
reported_message_id bigint,
reported_channel_id bigint,
reported_channel_name text,
message_context frozen<list<iar_message_context>>,
guild_context_id bigint,
resolved_at timestamp,
resolved_by_admin_id bigint,
public_comment text,
audit_log_reason text,
PRIMARY KEY ((report_id))
);
CREATE TABLE IF NOT EXISTS fluxer.pending_verifications (
user_id bigint,
created_at timestamp,
PRIMARY KEY ((user_id))
);
CREATE TABLE IF NOT EXISTS fluxer.pending_verifications_by_time (
created_at timestamp,
user_id bigint,
PRIMARY KEY ((created_at), user_id)
) WITH CLUSTERING ORDER BY (user_id ASC);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.gift_codes ADD visionary_sequence_number int;

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.users ADD beta_code_allowance int;
ALTER TABLE fluxer.users ADD beta_code_last_reset_at timestamp;

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.users ADD premium_will_cancel boolean;
ALTER TABLE fluxer.users ADD has_ever_purchased boolean;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.users ADD premium_billing_cycle text;

View File

@@ -0,0 +1,64 @@
CREATE TABLE IF NOT EXISTS fluxer.payments (
checkout_session_id text,
user_id bigint,
stripe_customer_id text,
payment_intent_id text,
subscription_id text,
invoice_id text,
price_id text,
product_type text,
amount_cents int,
currency text,
status text,
is_gift boolean,
gift_code text,
created_at timestamp,
completed_at timestamp,
PRIMARY KEY ((checkout_session_id))
);
CREATE TABLE IF NOT EXISTS fluxer.payments_by_payment_intent (
payment_intent_id text,
checkout_session_id text,
PRIMARY KEY ((payment_intent_id))
);
CREATE TABLE IF NOT EXISTS fluxer.payments_by_subscription (
subscription_id text,
checkout_session_id text,
user_id bigint,
price_id text,
product_type text,
PRIMARY KEY ((subscription_id))
);
CREATE TABLE IF NOT EXISTS fluxer.payments_by_user (
user_id bigint,
created_at timestamp,
checkout_session_id text,
PRIMARY KEY ((user_id), created_at)
) WITH CLUSTERING ORDER BY (created_at DESC);
ALTER TABLE fluxer.gift_codes ADD checkout_session_id text;
CREATE TABLE IF NOT EXISTS fluxer.visionary_users (
sequence_number int,
user_id bigint,
checkout_session_id text,
gift_code text,
granted_at timestamp,
PRIMARY KEY ((sequence_number))
);
CREATE TABLE IF NOT EXISTS fluxer.visionary_users_by_user (
user_id bigint,
sequence_number int,
granted_at timestamp,
PRIMARY KEY ((user_id))
);
CREATE TABLE IF NOT EXISTS fluxer.visionary_counter (
id text,
next_sequence_number counter,
PRIMARY KEY ((id))
);

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.users ADD gift_inventory_server_seq int;
ALTER TABLE fluxer.users ADD gift_inventory_client_seq int;

View File

@@ -0,0 +1,5 @@
CREATE TABLE IF NOT EXISTS fluxer.visionary_slots (
slot_index int,
user_id bigint,
PRIMARY KEY ((slot_index))
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.favorite_memes ADD is_gifv boolean;

View File

@@ -0,0 +1,6 @@
CREATE TABLE IF NOT EXISTS fluxer.pinned_dms (
user_id bigint,
channel_id bigint,
sort_order int,
PRIMARY KEY ((user_id), channel_id)
);

View File

@@ -0,0 +1,32 @@
CREATE TABLE IF NOT EXISTS fluxer.voice_regions (
id text PRIMARY KEY,
name text,
emoji text,
latitude double,
longitude double,
priority int,
is_default boolean,
vip_only boolean,
required_guild_features set<text>,
allowed_guild_ids set<bigint>,
allowed_user_ids set<bigint>,
created_at timestamp,
updated_at timestamp
);
CREATE TABLE IF NOT EXISTS fluxer.voice_servers (
region_id text,
server_id text,
endpoint text,
api_key text,
api_secret text,
priority int,
is_active boolean,
vip_only boolean,
required_guild_features set<text>,
allowed_guild_ids set<bigint>,
allowed_user_ids set<bigint>,
created_at timestamp,
updated_at timestamp,
PRIMARY KEY (region_id, server_id)
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.user_settings ADD incoming_call_flags int;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.user_settings ADD group_dm_add_permission_flags int;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.favorite_memes ADD tenor_id bigint;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.users ADD global_name text;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.users ADD premium_onboarding_dismissed_at timestamp;

View File

@@ -0,0 +1,135 @@
CREATE TABLE IF NOT EXISTS fluxer.oauth_clients (
client_id bigint PRIMARY KEY,
client_secret_hash text,
name text,
description text,
icon_url text,
owner_user_id bigint,
team_id bigint,
type text,
redirect_uris set<text>,
scopes set<text>,
grant_types set<text>,
homepage_url text,
created_at timestamp,
updated_at timestamp
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_clients_by_owner (
owner_user_id bigint,
client_id bigint,
PRIMARY KEY ((owner_user_id), client_id)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_authorization_requests (
request_id text PRIMARY KEY,
client_id bigint,
redirect_uri text,
scope set<text>,
state text,
code_challenge text,
code_challenge_method text,
nonce text,
created_at timestamp,
expires_at timestamp
) WITH default_time_to_live = 900;
CREATE TABLE IF NOT EXISTS fluxer.oauth_authorization_codes (
code text PRIMARY KEY,
client_id bigint,
user_id bigint,
redirect_uri text,
scope set<text>,
code_challenge text,
code_challenge_method text,
nonce text,
created_at timestamp,
expires_at timestamp
) WITH default_time_to_live = 900;
CREATE TABLE IF NOT EXISTS fluxer.oauth_access_tokens (
token_ text PRIMARY KEY,
client_id bigint,
user_id bigint,
scope set<text>,
created_at timestamp,
expires_at timestamp
) WITH default_time_to_live = 86400;
CREATE TABLE IF NOT EXISTS fluxer.oauth_access_tokens_by_client (
client_id bigint,
token_ text,
PRIMARY KEY ((client_id), token_)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_access_tokens_by_user (
user_id bigint,
token_ text,
PRIMARY KEY ((user_id), token_)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_refresh_tokens (
token_ text PRIMARY KEY,
client_id bigint,
user_id bigint,
scope set<text>,
created_at timestamp,
expires_at timestamp
) WITH default_time_to_live = 2592000;
CREATE TABLE IF NOT EXISTS fluxer.oauth_refresh_tokens_by_client (
client_id bigint,
token_ text,
PRIMARY KEY ((client_id), token_)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_refresh_tokens_by_user (
user_id bigint,
token_ text,
PRIMARY KEY ((user_id), token_)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_teams (
team_id bigint PRIMARY KEY,
name text,
owner_user_id bigint,
created_at timestamp
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_teams_by_owner (
owner_user_id bigint,
team_id bigint,
PRIMARY KEY ((owner_user_id), team_id)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_team_members (
team_id bigint,
user_id bigint,
role text,
added_at timestamp,
PRIMARY KEY ((team_id), user_id)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_team_members_by_user (
user_id bigint,
team_id bigint,
PRIMARY KEY ((user_id), team_id)
);
CREATE TABLE IF NOT EXISTS fluxer.oauth_bot_tokens (
token_ text PRIMARY KEY,
client_id bigint,
user_id bigint,
scopes set<text>,
created_at timestamp,
revoked boolean
);
CREATE TABLE IF NOT EXISTS fluxer.oidc_keys (
kid text PRIMARY KEY,
alg text,
public_jwk text,
private_jwk text,
created_at timestamp,
active boolean
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.favorite_memes ADD tenor_id_str text;

View File

@@ -0,0 +1,14 @@
CREATE TABLE IF NOT EXISTS fluxer.oidc_keys_by_kid (
kid text PRIMARY KEY,
alg text,
public_jwk text,
private_jwk text,
created_at timestamp
);
CREATE TABLE IF NOT EXISTS fluxer.oidc_key_status (
name text PRIMARY KEY,
active_kid text,
published_kids set<text>,
updated_at timestamp
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.guild_members ADD profile_flags int;

View File

@@ -0,0 +1,10 @@
CREATE TABLE IF NOT EXISTS fluxer.push_devices (
user_id bigint,
device_id text,
fcm_token text,
platform text,
device_name text,
created_at timestamp,
updated_at timestamp,
PRIMARY KEY (user_id, device_id)
);

View File

@@ -0,0 +1,6 @@
CREATE TABLE IF NOT EXISTS fluxer.push_devices_by_fcm_token (
fcm_token text,
user_id bigint,
device_id text,
PRIMARY KEY ((fcm_token), user_id, device_id)
);

View File

@@ -0,0 +1,7 @@
ALTER TABLE fluxer.push_devices WITH default_time_to_live = 7776000;
ALTER TABLE fluxer.push_devices_by_fcm_token WITH default_time_to_live = 7776000;
ALTER TABLE fluxer.push_subscriptions WITH default_time_to_live = 7776000;
ALTER TABLE fluxer.phone_tokens WITH default_time_to_live = 2592000;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.oauth_clients ADD bot_is_public boolean;

View File

@@ -0,0 +1,8 @@
CREATE TABLE IF NOT EXISTS fluxer.oauth_bot_tokens_by_client (
client_id bigint,
token_ text,
user_id bigint,
scopes set<text>,
created_at timestamp,
PRIMARY KEY (client_id, token_)
);

View File

@@ -0,0 +1,63 @@
CREATE TABLE IF NOT EXISTS fluxer.applications (
application_id bigint PRIMARY KEY,
owner_user_id bigint,
name text,
bot_user_id bigint,
is_confidential boolean,
oauth2_redirect_uris set<text>,
oauth2_scopes set<text>,
client_secret_hash text,
bot_token_hash text,
bot_token_preview text,
bot_token_created_at timestamp,
client_secret_created_at timestamp
);
CREATE TABLE IF NOT EXISTS fluxer.applications_by_owner (
owner_user_id bigint,
application_id bigint,
PRIMARY KEY ((owner_user_id), application_id)
) WITH CLUSTERING ORDER BY (application_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.oauth2_authorization_codes (
code text PRIMARY KEY,
application_id bigint,
user_id bigint,
redirect_uri text,
scope set<text>,
code_challenge text,
code_challenge_method text,
nonce text,
created_at timestamp,
expires_at timestamp
) WITH default_time_to_live = 600;
CREATE TABLE IF NOT EXISTS fluxer.oauth2_access_tokens (
token_ text PRIMARY KEY,
application_id bigint,
user_id bigint,
scope set<text>,
created_at timestamp,
expires_at timestamp
) WITH default_time_to_live = 3600;
CREATE TABLE IF NOT EXISTS fluxer.oauth2_access_tokens_by_user (
user_id bigint,
token_ text,
PRIMARY KEY ((user_id), token_)
) WITH CLUSTERING ORDER BY (token_ DESC);
CREATE TABLE IF NOT EXISTS fluxer.oauth2_refresh_tokens (
token_ text PRIMARY KEY,
application_id bigint,
user_id bigint,
scope set<text>,
created_at timestamp,
expires_at timestamp
) WITH default_time_to_live = 2592000;
CREATE TABLE IF NOT EXISTS fluxer.oauth2_refresh_tokens_by_user (
user_id bigint,
token_ text,
PRIMARY KEY ((user_id), token_)
) WITH CLUSTERING ORDER BY (token_ DESC);

View File

@@ -0,0 +1,5 @@
CREATE TABLE IF NOT EXISTS fluxer.authorized_ips_v2 (
user_id bigint,
ip text,
PRIMARY KEY (user_id, ip)
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.guilds ADD banner_height int;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.guilds ADD banner_width int;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.applications ADD bot_is_public boolean;

View File

@@ -0,0 +1,56 @@
CREATE TABLE IF NOT EXISTS fluxer.admin_archives_by_subject (
subject_type text,
subject_id bigint,
archive_id bigint,
requested_by bigint,
requested_at timestamp,
started_at timestamp,
completed_at timestamp,
failed_at timestamp,
storage_key text,
file_size bigint,
progress_percent int,
progress_step text,
error_message text,
download_url_expires_at timestamp,
expires_at timestamp,
PRIMARY KEY ((subject_type, subject_id), archive_id)
) WITH CLUSTERING ORDER BY (archive_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.admin_archives_by_requester (
requested_by bigint,
archive_id bigint,
subject_type text,
subject_id bigint,
requested_at timestamp,
started_at timestamp,
completed_at timestamp,
failed_at timestamp,
storage_key text,
file_size bigint,
progress_percent int,
progress_step text,
error_message text,
download_url_expires_at timestamp,
expires_at timestamp,
PRIMARY KEY ((requested_by), archive_id)
) WITH CLUSTERING ORDER BY (archive_id DESC);
CREATE TABLE IF NOT EXISTS fluxer.admin_archives_by_type (
subject_type text,
archive_id bigint,
subject_id bigint,
requested_by bigint,
requested_at timestamp,
started_at timestamp,
completed_at timestamp,
failed_at timestamp,
storage_key text,
file_size bigint,
progress_percent int,
progress_step text,
error_message text,
download_url_expires_at timestamp,
expires_at timestamp,
PRIMARY KEY ((subject_type), archive_id)
) WITH CLUSTERING ORDER BY (archive_id DESC);

View File

@@ -0,0 +1,5 @@
ALTER TABLE fluxer.oauth2_access_tokens
WITH default_time_to_live = 604800;
ALTER TABLE fluxer.oauth2_access_tokens_by_user
WITH default_time_to_live = 604800;

View File

@@ -0,0 +1,22 @@
CREATE TABLE IF NOT EXISTS fluxer.attachment_decay_by_id (
attachment_id bigint PRIMARY KEY,
channel_id bigint,
message_id bigint,
filename text,
size_bytes varint,
uploaded_at timestamp,
expires_at timestamp,
last_accessed_at timestamp,
cost double,
lifetime_days int,
status text
);
CREATE TABLE IF NOT EXISTS fluxer.attachment_decay_by_expiry (
expiry_bucket int,
expires_at timestamp,
attachment_id bigint,
channel_id bigint,
message_id bigint,
PRIMARY KEY ((expiry_bucket), expires_at, attachment_id)
);

View File

@@ -0,0 +1,25 @@
CREATE TABLE IF NOT EXISTS fluxer.email_change_tickets (
ticket text PRIMARY KEY,
user_id bigint,
require_original boolean,
original_email text,
original_verified boolean,
original_code text,
original_code_sent_at timestamp,
original_code_expires_at timestamp,
new_email text,
new_code text,
new_code_sent_at timestamp,
new_code_expires_at timestamp,
status text,
created_at timestamp,
updated_at timestamp
);
CREATE TABLE IF NOT EXISTS fluxer.email_change_tokens (
token_ text PRIMARY KEY,
user_id bigint,
new_email text,
expires_at timestamp,
created_at timestamp
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.email_change_tickets ADD original_proof text;

View File

@@ -0,0 +1,11 @@
CREATE TABLE IF NOT EXISTS fluxer.user_contact_change_logs (
user_id bigint,
event_id timeuuid,
field text,
old_value text,
new_value text,
reason text,
actor_user_id bigint,
event_at timestamp,
PRIMARY KEY ((user_id), event_id)
) WITH CLUSTERING ORDER BY (event_id DESC);

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.users ADD avatar_color int;
ALTER TABLE fluxer.users ADD banner_color int;

View File

@@ -0,0 +1,51 @@
CREATE TABLE IF NOT EXISTS fluxer.guild_audit_logs_v2 (
guild_id bigint,
log_id bigint,
user_id bigint,
target_id text,
action_type int,
reason text,
options map<text, text>,
changes text,
PRIMARY KEY ((guild_id), log_id)
) WITH CLUSTERING ORDER BY (log_id DESC)
AND default_time_to_live = 3888000;
CREATE TABLE IF NOT EXISTS fluxer.guild_audit_logs_v2_by_user (
guild_id bigint,
user_id bigint,
log_id bigint,
target_id text,
action_type int,
reason text,
options map<text, text>,
changes text,
PRIMARY KEY ((guild_id, user_id), log_id)
) WITH CLUSTERING ORDER BY (log_id DESC)
AND default_time_to_live = 3888000;
CREATE TABLE IF NOT EXISTS fluxer.guild_audit_logs_v2_by_action (
guild_id bigint,
action_type int,
log_id bigint,
user_id bigint,
target_id text,
reason text,
options map<text, text>,
changes text,
PRIMARY KEY ((guild_id, action_type), log_id)
) WITH CLUSTERING ORDER BY (log_id DESC)
AND default_time_to_live = 3888000;
CREATE TABLE IF NOT EXISTS fluxer.guild_audit_logs_v2_by_user_action (
guild_id bigint,
user_id bigint,
action_type int,
log_id bigint,
target_id text,
reason text,
options map<text, text>,
changes text,
PRIMARY KEY ((guild_id, user_id, action_type), log_id)
) WITH CLUSTERING ORDER BY (log_id DESC)
AND default_time_to_live = 3888000;

View File

@@ -0,0 +1,3 @@
ALTER TABLE fluxer.users ADD pending_bulk_message_deletion_at timestamp;
ALTER TABLE fluxer.users ADD pending_bulk_message_deletion_channel_count int;
ALTER TABLE fluxer.users ADD pending_bulk_message_deletion_message_count int;

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.guilds ADD splash_width int;
ALTER TABLE fluxer.guilds ADD splash_height int;

View File

@@ -0,0 +1,3 @@
ALTER TABLE fluxer.guilds ADD embed_splash_hash text;
ALTER TABLE fluxer.guilds ADD embed_splash_width int;
ALTER TABLE fluxer.guilds ADD embed_splash_height int;

View File

@@ -0,0 +1,15 @@
CREATE TABLE IF NOT EXISTS fluxer.channel_state (
channel_id bigint PRIMARY KEY,
created_bucket int,
has_messages boolean,
last_message_id bigint,
last_message_bucket int,
updated_at timestamp
);
CREATE TABLE IF NOT EXISTS fluxer.channel_message_buckets (
channel_id bigint,
bucket int,
updated_at timestamp,
PRIMARY KEY (channel_id, bucket)
) WITH CLUSTERING ORDER BY (bucket DESC);

View File

@@ -0,0 +1,6 @@
CREATE TABLE IF NOT EXISTS fluxer.channel_empty_buckets (
channel_id bigint,
bucket int,
updated_at timestamp,
PRIMARY KEY ((channel_id), bucket)
) WITH CLUSTERING ORDER BY (bucket DESC);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.payments ADD version int;

View File

@@ -0,0 +1,20 @@
ALTER TABLE fluxer.users ADD version int;
ALTER TABLE fluxer.user_settings ADD version int;
ALTER TABLE fluxer.guilds ADD version int;
ALTER TABLE fluxer.guild_members ADD version int;
ALTER TABLE fluxer.guild_roles ADD version int;
ALTER TABLE fluxer.guild_emojis ADD version int;
ALTER TABLE fluxer.guild_stickers ADD version int;
ALTER TABLE fluxer.channels ADD version int;
ALTER TABLE fluxer.messages ADD version int;
ALTER TABLE fluxer.notes ADD version int;
ALTER TABLE fluxer.relationships ADD version int;
ALTER TABLE fluxer.favorite_memes ADD version int;
ALTER TABLE fluxer.invites ADD version int;
ALTER TABLE fluxer.webhooks ADD version int;
ALTER TABLE fluxer.applications ADD version int;
ALTER TABLE fluxer.auth_sessions ADD version int;
ALTER TABLE fluxer.gift_codes ADD version int;
ALTER TABLE fluxer.beta_codes ADD version int;
ALTER TABLE fluxer.pending_verifications ADD version int;
ALTER TABLE fluxer.webauthn_credentials ADD version int;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.guilds ADD splash_card_alignment int;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.messages ADD has_reaction boolean;

View File

@@ -0,0 +1,17 @@
CREATE TABLE IF NOT EXISTS fluxer.instance_configuration (
key text PRIMARY KEY,
value text,
updated_at timestamp
);
INSERT INTO fluxer.instance_configuration (key, value, updated_at)
VALUES ('manual_review_enabled', 'true', toTimestamp(now()));
INSERT INTO fluxer.instance_configuration (key, value, updated_at)
VALUES ('manual_review_schedule_enabled', 'false', toTimestamp(now()));
INSERT INTO fluxer.instance_configuration (key, value, updated_at)
VALUES ('manual_review_schedule_start_hour_utc', '0', toTimestamp(now()));
INSERT INTO fluxer.instance_configuration (key, value, updated_at)
VALUES ('manual_review_schedule_end_hour_utc', '23', toTimestamp(now()));

View File

@@ -0,0 +1,18 @@
CREATE TABLE IF NOT EXISTS fluxer.expression_packs (
pack_id bigint PRIMARY KEY,
pack_type text,
creator_id bigint,
name text,
description text,
created_at timestamp,
updated_at timestamp,
version int
);
CREATE TABLE IF NOT EXISTS fluxer.pack_installations (
user_id bigint,
pack_id bigint,
pack_type text,
installed_at timestamp,
PRIMARY KEY (user_id, pack_id)
);

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.pending_verifications ADD metadata map<text, text>;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.auth_sessions ADD client_location text;

View File

@@ -0,0 +1,14 @@
CREATE TABLE IF NOT EXISTS fluxer.scheduled_messages (
user_id bigint,
scheduled_message_id bigint,
channel_id bigint,
payload text,
scheduled_at timestamp,
scheduled_local_at text,
timezone text,
status text,
status_reason text,
created_at timestamp,
invalidated_at timestamp,
PRIMARY KEY (user_id, scheduled_message_id)
) WITH CLUSTERING ORDER BY (scheduled_message_id DESC);

View File

@@ -0,0 +1,20 @@
ALTER TABLE fluxer.iar_submissions ADD reporter_email text;
ALTER TABLE fluxer.iar_submissions ADD reporter_full_legal_name text;
ALTER TABLE fluxer.iar_submissions ADD reporter_country_of_residence text;
ALTER TABLE fluxer.iar_submissions ADD reported_guild_invite_code text;
CREATE TABLE IF NOT EXISTS fluxer.dsa_report_email_verifications (
email_lower text,
code_hash text,
expires_at timestamp,
last_sent_at timestamp,
PRIMARY KEY ((email_lower))
);
CREATE TABLE IF NOT EXISTS fluxer.dsa_report_tickets (
ticket text,
email_lower text,
expires_at timestamp,
created_at timestamp,
PRIMARY KEY ((ticket))
);

View File

@@ -0,0 +1,11 @@
CREATE TABLE IF NOT EXISTS fluxer.expression_packs_by_creator (
creator_id bigint,
pack_id bigint,
pack_type text,
name text,
description text,
created_at timestamp,
updated_at timestamp,
version int,
PRIMARY KEY (creator_id, pack_id)
);

View File

@@ -0,0 +1,2 @@
ALTER TABLE fluxer.user_settings ADD status_resets_at timestamp;
ALTER TABLE fluxer.user_settings ADD status_resets_to text;

View File

@@ -0,0 +1 @@
ALTER TABLE fluxer.guild_roles ADD hoist_position int;

View File

@@ -0,0 +1,28 @@
CREATE TABLE IF NOT EXISTS fluxer.swish_payments (
payment_id text,
user_id bigint,
product_type text,
status text,
is_gift boolean,
amount int,
currency text,
message text,
payer_alias text,
payment_reference text,
gift_code text,
created_at timestamp,
completed_at timestamp,
PRIMARY KEY ((payment_id))
);
CREATE TABLE IF NOT EXISTS fluxer.swish_payments_by_user (
user_id bigint,
payment_id text,
product_type text,
status text,
is_gift boolean,
amount int,
currency text,
created_at timestamp,
PRIMARY KEY ((user_id), created_at)
) WITH CLUSTERING ORDER BY (created_at DESC);

View File

@@ -0,0 +1,38 @@
services:
clamav:
image: clamav/clamav:1.4
hostname: clamav
volumes:
- clamav_data:/var/lib/clamav
- ./conf/clamd.conf:/etc/clamav/clamd.conf:ro
networks:
- fluxer-shared
ports:
- '3310:3310'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '2'
memory: 4G
reservations:
cpus: '1'
memory: 2G
healthcheck:
test: ['CMD-SHELL', 'clamdscan --version || exit 1']
interval: 30s
timeout: 10s
retries: 5
start_period: 60s
networks:
fluxer-shared:
external: true
volumes:
clamav_data:
driver: local

View File

@@ -0,0 +1,54 @@
# Listening
LocalSocket /tmp/clamd.sock
TCPSocket 3310
TCPAddr 0.0.0.0
# Threading
MaxThreads 12
MaxConnectionQueueLength 30
# Scanner limits
MaxScanSize 150M
MaxFileSize 100M
MaxRecursion 16
MaxFiles 10000
MaxEmbeddedPE 10M
MaxHTMLNormalize 10M
MaxHTMLNoTags 2M
MaxScriptNormalize 5M
MaxZipTypeRcg 1M
# Scanning options
ScanPE yes
ScanELF yes
ScanOLE2 yes
ScanPDF yes
ScanSWF yes
ScanHTML yes
ScanMail yes
ScanArchive yes
ScanPartialMessages yes
AlertBrokenExecutables yes
AlertEncrypted no
AlertEncryptedArchive no
AlertEncryptedDoc no
AlertOLE2Macros yes
AlertPhishingSSLMismatch no
AlertPhishingCloak no
# Database
DatabaseDirectory /var/lib/clamav
OfficialDatabaseOnly no
DetectPUA yes
ExcludePUA NetTool
ExcludePUA PWTool
HeuristicScanPrecedence yes
# Logging
LogTime yes
LogClean no
LogVerbose no
ExtendedDetectionInfo yes
# Performance
BytecodeTimeout 60000

View File

@@ -0,0 +1,52 @@
services:
clickhouse:
image: clickhouse/clickhouse-server:24.8
hostname: clickhouse
env_file:
- /etc/fluxer/clickhouse.env
volumes:
- clickhouse_data:/var/lib/clickhouse
- clickhouse_logs:/var/log/clickhouse-server
- ./conf/config.xml:/etc/clickhouse-server/config.d/custom.xml:ro
- ./conf/users.xml:/etc/clickhouse-server/users.d/custom.xml:ro
networks:
- fluxer-shared
ports:
- target: 8123
published: 8123
protocol: tcp
mode: host
- target: 9000
published: 9000
protocol: tcp
mode: host
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '4'
memory: 16G
healthcheck:
test: ['CMD-SHELL', 'wget -qO- http://127.0.0.1:8123/ping || exit 1']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
ulimits:
nofile:
soft: 262144
hard: 262144
networks:
fluxer-shared:
external: true
volumes:
clickhouse_data:
driver: local
clickhouse_logs:
driver: local

View File

@@ -0,0 +1,20 @@
<?xml version="1.0"?>
<clickhouse>
<logger>
<level>information</level>
<console>true</console>
</logger>
<max_concurrent_queries>100</max_concurrent_queries>
<max_connections>4096</max_connections>
<mark_cache_size>5368709120</mark_cache_size>
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
<merge_tree>
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
</merge_tree>
<background_pool_size>16</background_pool_size>
<background_schedule_pool_size>16</background_schedule_pool_size>
</clickhouse>

View File

@@ -0,0 +1,14 @@
<?xml version="1.0"?>
<clickhouse>
<users>
<fluxer>
<password from_env="CLICKHOUSE_PASSWORD"/>
<networks>
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
<access_management>1</access_management>
</fluxer>
</users>
</clickhouse>

View File

@@ -0,0 +1,10 @@
FROM caddy:2-builder AS builder
RUN xcaddy build \
--with github.com/abiosoft/caddy-yaml \
--with github.com/mholt/caddy-l4 \
--with github.com/caddy-dns/porkbun
FROM caddy:2
COPY --from=builder /usr/bin/caddy /usr/bin/caddy

View File

@@ -0,0 +1,40 @@
.PHONY: help build-caddy build up down restart logs clean
help:
@echo "Available commands:"
@echo " make build-caddy - Build the Caddy Docker image"
@echo " make build - Build all images (including Caddy)"
@echo " make up - Start all services"
@echo " make down - Stop all services"
@echo " make restart - Restart all services"
@echo " make logs - View logs from all services"
@echo " make logs-caddy - View Caddy logs"
@echo " make logs-livekit - View LiveKit logs"
@echo " make clean - Stop services and remove volumes"
build-caddy:
docker build -f Dockerfile.caddy -t fluxer-livekit-caddy:latest .
build: build-caddy
docker compose build
up:
docker compose up -d
down:
docker compose down
restart:
docker compose restart
logs:
docker compose logs -f
logs-caddy:
docker compose logs -f caddy
logs-livekit:
docker compose logs -f livekit
clean:
docker compose down -v

View File

@@ -0,0 +1,56 @@
logging:
logs:
default:
level: INFO
storage:
module: 'file_system'
root: '/data'
apps:
tls:
automation:
policies:
- subjects:
- '${LIVEKIT_DOMAIN}'
- '${LIVEKIT_DOMAIN_TURN}'
issuers:
- module: acme
challenges:
dns:
provider:
name: porkbun
api_key: '{env.PORKBUN_API_KEY}'
api_secret_key: '{env.PORKBUN_API_SECRET_KEY}'
on_demand: false
certificates:
automate:
- '${LIVEKIT_DOMAIN}'
- '${LIVEKIT_DOMAIN_TURN}'
layer4:
servers:
main:
listen: [':443']
routes:
- match:
- tls:
sni:
- '${LIVEKIT_DOMAIN_TURN}'
handle:
- handler: tls
- handler: proxy
upstreams:
- dial: ['livekit:5349']
- match:
- tls:
sni:
- '${LIVEKIT_DOMAIN}'
handle:
- handler: tls
connection_policies:
- alpn: ['http/1.1']
- handler: proxy
upstreams:
- dial: ['livekit:7880']

View File

@@ -0,0 +1,65 @@
services:
valkey:
image: valkey/valkey:7.2-alpine
hostname: valkey
command: >
valkey-server
--requirepass ${REDIS_PASSWORD}
--maxmemory 3gb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
--appendonly yes
volumes:
- valkey_data:/data
restart: unless-stopped
livekit:
image: livekit/livekit-server:latest
hostname: livekit
entrypoint: /entrypoint.sh
env_file:
- .env
volumes:
- ./conf/livekit.yaml.template:/etc/livekit.yaml.template:ro
- ./entrypoint.sh:/entrypoint.sh:ro
ports:
- '7881:7881'
- '7882:7882/udp'
- '3478:3478/udp'
depends_on:
- valkey
restart: unless-stopped
deploy:
resources:
limits:
cpus: '4'
memory: 6G
reservations:
cpus: '1'
memory: 2G
caddy:
build:
context: .
dockerfile: Dockerfile.caddy
image: fluxer-livekit-caddy:latest
hostname: livekit-caddy
entrypoint: /entrypoint-caddy.sh
env_file:
- .env
volumes:
- ./caddy.yaml.template:/etc/caddy.yaml.template:ro
- ./entrypoint-caddy.sh:/entrypoint-caddy.sh:ro
- caddy_data:/data
ports:
- '443:443'
- '80:80'
depends_on:
- livekit
restart: unless-stopped
volumes:
valkey_data:
caddy_data:

View File

@@ -0,0 +1,37 @@
port: 7880
bind_addresses:
- ''
rtc:
tcp_port: 7881
udp_port: 7882
use_external_ip: false
node_ip: ${NODE_IP}
enable_loopback_candidate: false
redis:
address: valkey:6379
username: ''
password: ${REDIS_PASSWORD}
db: 0
use_tls: false
turn:
enabled: true
domain: ${LIVEKIT_DOMAIN_TURN}
tls_port: 5349
udp_port: 3478
external_tls: true
keys:
${LIVEKIT_API_KEY}: ${LIVEKIT_API_SECRET}
room:
auto_create: true
empty_timeout: 300
departure_timeout: 20
webhook:
api_key: ${LIVEKIT_API_KEY}
urls:
- ${LIVEKIT_WEBHOOK_URL}

View File

@@ -0,0 +1,26 @@
#!/bin/sh
# Copyright (C) 2026 Fluxer Contributors
#
# This file is part of Fluxer.
#
# Fluxer is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fluxer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -e
sed -e "s|\${LIVEKIT_DOMAIN}|${LIVEKIT_DOMAIN}|g" \
-e "s|\${LIVEKIT_DOMAIN_TURN}|${LIVEKIT_DOMAIN_TURN}|g" \
/etc/caddy.yaml.template > /etc/caddy.yaml
exec caddy run --config /etc/caddy.yaml --adapter yaml "$@"

View File

@@ -0,0 +1,30 @@
#!/bin/sh
# Copyright (C) 2026 Fluxer Contributors
#
# This file is part of Fluxer.
#
# Fluxer is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fluxer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -e
sed -e "s|\${NODE_IP}|${NODE_IP}|g" \
-e "s|\${REDIS_PASSWORD}|${REDIS_PASSWORD}|g" \
-e "s|\${LIVEKIT_API_KEY}|${LIVEKIT_API_KEY}|g" \
-e "s|\${LIVEKIT_API_SECRET}|${LIVEKIT_API_SECRET}|g" \
-e "s|\${LIVEKIT_WEBHOOK_URL}|${LIVEKIT_WEBHOOK_URL}|g" \
-e "s|\${LIVEKIT_DOMAIN_TURN}|${LIVEKIT_DOMAIN_TURN}|g" \
/etc/livekit.yaml.template > /tmp/livekit.yaml
exec /livekit-server --config /tmp/livekit.yaml "$@"

View File

@@ -0,0 +1,47 @@
services:
meilisearch:
image: getmeili/meilisearch:v1.23
hostname: meilisearch
env_file:
- /etc/fluxer/meilisearch.env
environment:
- MEILI_ENV=production
- MEILI_DB_PATH=/meili_data
- MEILI_HTTP_ADDR=0.0.0.0:7700
- MEILI_MAX_INDEXING_MEMORY=4gb
- MEILI_MAX_INDEXING_THREADS=4
- MEILI_LOG_LEVEL=INFO
- MEILI_NO_ANALYTICS=true
volumes:
- meilisearch_data:/meili_data
networks:
- fluxer-shared
ports:
- '7700:7700'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '4'
memory: 8G
reservations:
cpus: '2'
memory: 4G
healthcheck:
test: ['CMD-SHELL', 'curl -fsS http://127.0.0.1:7700/health > /dev/null']
interval: 10s
timeout: 5s
retries: 5
start_period: 60s
networks:
fluxer-shared:
external: true
volumes:
meilisearch_data:
driver: local

View File

@@ -0,0 +1,44 @@
user www-data;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
worker_connections 1024;
}
stream {
# Map SNI to upstream
map $ssl_preread_server_name $upstream {
gateway.fluxer.app 127.0.0.1:9443; # caddy-gateway via host port
default 127.0.0.1:8443; # main caddy via host port
}
# HTTPS SNI router on 443, IPv4 + IPv6
server {
listen 443;
listen [::]:443;
proxy_pass $upstream;
ssl_preread on;
proxy_protocol on;
proxy_connect_timeout 60s;
proxy_timeout 1h;
proxy_buffer_size 16k;
}
# HTTP passthrough to main Caddy on 80
server {
listen 80;
listen [::]:80;
proxy_pass 127.0.0.1:8080;
proxy_protocol on;
proxy_connect_timeout 60s;
proxy_timeout 1h;
proxy_buffer_size 16k;
}
}

View File

@@ -0,0 +1,50 @@
services:
postgres:
image: postgres:17-alpine
hostname: postgres
environment:
- POSTGRES_DB=fluxer
- POSTGRES_USER=fluxer
- POSTGRES_PASSWORD_FILE=/run/secrets/postgres_password
secrets:
- postgres_password
volumes:
- postgres_data:/var/lib/postgresql/data
- ./conf/postgresql.conf:/etc/postgresql/postgresql.conf
command: postgres -c config_file=/etc/postgresql/postgresql.conf
networks:
- fluxer-shared
ports:
- '5432:5432'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '4'
memory: 16G
reservations:
cpus: '2'
memory: 8G
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U fluxer -d fluxer']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
shm_size: 2g
networks:
fluxer-shared:
external: true
volumes:
postgres_data:
driver: local
secrets:
postgres_password:
external: true

View File

@@ -0,0 +1,60 @@
listen_addresses = '*'
max_connections = 200
superuser_reserved_connections = 3
shared_buffers = 4GB
effective_cache_size = 12GB
maintenance_work_mem = 1GB
work_mem = 20MB
checkpoint_timeout = 15min
checkpoint_completion_target = 0.9
max_wal_size = 4GB
min_wal_size = 1GB
random_page_cost = 1.1
effective_io_concurrency = 200
wal_buffers = 16MB
wal_compression = on
wal_level = replica
max_wal_senders = 3
logging_collector = on
log_directory = 'log'
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
log_rotation_age = 1d
log_rotation_size = 100MB
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '
log_checkpoints = on
log_connections = on
log_disconnections = on
log_duration = off
log_lock_waits = on
log_statement = 'none'
log_temp_files = 0
log_min_duration_statement = 1000
autovacuum = on
autovacuum_max_workers = 3
autovacuum_naptime = 30s
shared_preload_libraries = 'pg_stat_statements'
track_io_timing = on
track_functions = all
pg_stat_statements.max = 10000
pg_stat_statements.track = all
default_statistics_target = 100
max_parallel_workers_per_gather = 2
max_parallel_workers = 4
max_worker_processes = 4
datestyle = 'iso, mdy'
timezone = 'UTC'
lc_messages = 'en_US.utf8'
lc_monetary = 'en_US.utf8'
lc_numeric = 'en_US.utf8'
lc_time = 'en_US.utf8'
default_text_search_config = 'pg_catalog.english'

View File

@@ -0,0 +1,42 @@
services:
valkey:
image: valkey/valkey:8-alpine
hostname: valkey
env_file:
- /etc/fluxer/valkey.env
entrypoint: /entrypoint.sh
volumes:
- valkey_data:/data
- ./conf/valkey.conf.template:/etc/valkey/valkey.conf.template:ro
- ./entrypoint.sh:/entrypoint.sh:ro
networks:
- fluxer-shared
ports:
- '6379:6379'
deploy:
replicas: 1
restart_policy:
condition: on-failure
delay: 10s
max_attempts: 3
resources:
limits:
cpus: '2'
memory: 4G
reservations:
cpus: '1'
memory: 2G
healthcheck:
test: ['CMD', 'valkey-cli', 'ping']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
networks:
fluxer-shared:
external: true
volumes:
valkey_data:
driver: local

View File

@@ -0,0 +1,69 @@
bind 0.0.0.0
port 6379
protected-mode yes
requirepass ${VALKEY_PASSWORD}
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize no
pidfile /var/run/valkey.pid
loglevel notice
logfile ""
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
maxmemory 3gb
maxmemory-policy allkeys-lru
appendonly yes
appendfilename "appendonly.aof"
appenddirname "appendonlydir"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 100
notify-keyspace-events ""
hash-max-listpack-entries 512
hash-max-listpack-value 64
list-max-listpack-size -2
list-compress-depth 0
set-max-intset-entries 512
set-max-listpack-entries 128
set-max-listpack-value 64
zset-max-listpack-entries 128
zset-max-listpack-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes

View File

@@ -0,0 +1,27 @@
#!/bin/sh
# Copyright (C) 2026 Fluxer Contributors
#
# This file is part of Fluxer.
#
# Fluxer is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Fluxer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Fluxer. If not, see <https://www.gnu.org/licenses/>.
set -e
# Substitute environment variables in the config file
sed -e "s|\${VALKEY_PASSWORD}|${VALKEY_PASSWORD}|g" \
/etc/valkey/valkey.conf.template > /tmp/valkey.conf
# Start Valkey with the processed config
exec valkey-server /tmp/valkey.conf "$@"