refactor progress

This commit is contained in:
Hampus Kraft
2026-02-17 12:22:36 +00:00
parent cb31608523
commit d5abd1a7e4
8257 changed files with 1190207 additions and 761040 deletions

View File

View File

@@ -0,0 +1,547 @@
#!/usr/bin/env python3
import json
import pathlib
import sys
from datetime import datetime, timezone
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_steps import INSTALL_RCLONE_SCRIPT, rclone_config_script
from ci_workflow import EnvArg, parse_step_env_args
from ci_utils import pwsh_step, require_env, run_step, write_github_output
PLATFORMS = [
{"platform": "windows", "arch": "x64", "os": "windows-latest", "electron_arch": "x64"},
{"platform": "windows", "arch": "arm64", "os": "windows-11-arm", "electron_arch": "arm64"},
{"platform": "macos", "arch": "x64", "os": "macos-15-intel", "electron_arch": "x64"},
{"platform": "macos", "arch": "arm64", "os": "macos-15", "electron_arch": "arm64"},
{"platform": "linux", "arch": "x64", "os": "ubuntu-24.04", "electron_arch": "x64"},
{"platform": "linux", "arch": "arm64", "os": "ubuntu-24.04-arm", "electron_arch": "arm64"},
]
def parse_bool(value: str) -> bool:
return value.lower() in {"1", "true", "yes", "on"}
def set_metadata_step(channel: str, ref: str) -> None:
require_env(["GITHUB_RUN_NUMBER"])
import os
run_number = os.environ.get("GITHUB_RUN_NUMBER", "")
version = f"0.0.{run_number}"
pub_date = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
build_channel = "canary" if channel == "canary" else "stable"
source_ref = ref or ("canary" if channel == "canary" else "main")
write_github_output(
{
"version": version,
"pub_date": pub_date,
"channel": channel,
"build_channel": build_channel,
"source_ref": source_ref,
}
)
def set_matrix_step(flags: dict[str, bool]) -> None:
filtered: list[dict[str, str]] = []
for platform in PLATFORMS:
plat = platform["platform"]
arch = platform["arch"]
skip = False
if plat == "windows":
skip = flags["skip_windows"] or (
(arch == "x64" and flags["skip_windows_x64"])
or (arch == "arm64" and flags["skip_windows_arm64"])
)
elif plat == "macos":
skip = flags["skip_macos"] or (
(arch == "x64" and flags["skip_macos_x64"])
or (arch == "arm64" and flags["skip_macos_arm64"])
)
elif plat == "linux":
skip = flags["skip_linux"] or (
(arch == "x64" and flags["skip_linux_x64"])
or (arch == "arm64" and flags["skip_linux_arm64"])
)
if not skip:
filtered.append(platform)
matrix = {"include": filtered}
write_github_output({"matrix": json.dumps(matrix, separators=(",", ":"))})
STEPS = {
"windows_paths": pwsh_step(
r"""
subst W: "$env:GITHUB_WORKSPACE"
"WORKDIR=W:" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
New-Item -ItemType Directory -Force "C:\t" | Out-Null
New-Item -ItemType Directory -Force "C:\sq" | Out-Null
New-Item -ItemType Directory -Force "C:\ebcache" | Out-Null
"TEMP=C:\t" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
"TMP=C:\t" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
"SQUIRREL_TEMP=C:\sq" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
"ELECTRON_BUILDER_CACHE=C:\ebcache" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
New-Item -ItemType Directory -Force "C:\pnpm-store" | Out-Null
"NPM_CONFIG_STORE_DIR=C:\pnpm-store" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
"npm_config_store_dir=C:\pnpm-store" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
"store-dir=C:\pnpm-store" | Set-Content -Path "W:\.npmrc" -Encoding ascii
git config --global core.longpaths true
"""
),
"set_workdir_unix": "echo \"WORKDIR=$GITHUB_WORKSPACE\" >> \"$GITHUB_ENV\"\n",
"resolve_pnpm_store_windows": pwsh_step(
r"""
$store = pnpm store path --silent
"PNPM_STORE_PATH=$store" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
New-Item -ItemType Directory -Force $store | Out-Null
"""
),
"resolve_pnpm_store_unix": """
set -euo pipefail
store="$(pnpm store path --silent)"
echo "PNPM_STORE_PATH=$store" >> "$GITHUB_ENV"
mkdir -p "$store"
""",
"install_setuptools_windows_arm64": pwsh_step(
r"""
python -m pip install --upgrade pip
python -m pip install "setuptools>=69" wheel
"""
),
"install_setuptools_macos": "brew install python-setuptools\n",
"install_linux_deps": """
set -euo pipefail
sudo apt-get update
sudo apt-get install -y \
libx11-dev libxtst-dev libxt-dev libxinerama-dev libxkbcommon-dev libxrandr-dev \
ruby ruby-dev build-essential rpm \
libpixman-1-dev libcairo2-dev libpango1.0-dev libjpeg-dev libgif-dev librsvg2-dev
sudo gem install --no-document fpm
""",
"install_dependencies": "pnpm install --frozen-lockfile\n",
"update_version": "pnpm version \"${VERSION}\" --no-git-tag-version --allow-same-version\n",
"set_build_channel": "pnpm set-channel\n",
"build_electron_main": "pnpm build\n",
"build_app_macos": "pnpm exec electron-builder --config electron-builder.config.cjs --mac --${ELECTRON_ARCH}\n",
"verify_bundle_id": """
set -euo pipefail
DIST="dist-electron"
ZIP="$(ls -1 "$DIST"/*"${ELECTRON_ARCH}"*.zip | head -n1)"
tmp="$(mktemp -d)"
ditto -xk "$ZIP" "$tmp"
APP="$(find "$tmp" -maxdepth 2 -name "*.app" -print -quit)"
BID=$(/usr/libexec/PlistBuddy -c 'Print :CFBundleIdentifier' "$APP/Contents/Info.plist")
expected="app.fluxer"
if [[ "${BUILD_CHANNEL:-stable}" == "canary" ]]; then expected="app.fluxer.canary"; fi
echo "Bundle id in zip: $BID (expected: $expected)"
test "$BID" = "$expected"
""",
"build_app_windows": "pnpm exec electron-builder --config electron-builder.config.cjs --win --${ELECTRON_ARCH}\n",
"analyse_squirrel_paths": pwsh_step(
r"""
$primaryDir = if ($env:ARCH -eq "arm64") { "dist-electron/squirrel-windows-arm64" } else { "dist-electron/squirrel-windows" }
$fallbackDir = if ($env:ARCH -eq "arm64") { "dist-electron/squirrel-windows" } else { "dist-electron/squirrel-windows-arm64" }
$dirs = @($primaryDir, $fallbackDir)
$nupkg = $null
foreach ($d in $dirs) {
if (Test-Path $d) {
$nupkg = Get-ChildItem -Path "$d/*.nupkg" -ErrorAction SilentlyContinue | Select-Object -First 1
if ($nupkg) { break }
}
}
if (-not $nupkg) {
throw "No Squirrel nupkg found in: $($dirs -join ', ')"
}
Write-Host "Analyzing Windows installer $($nupkg.FullName)"
$env:NUPKG_PATH = $nupkg.FullName
$lines = @(
'import os'
'import zipfile'
''
'path = os.environ["NUPKG_PATH"]'
'build_ver = os.environ["BUILD_VERSION"]'
'prefix = os.path.join(os.environ["LOCALAPPDATA"], "fluxer_app", f"app-{build_ver}", "resources", "app.asar.unpacked")'
'max_len = int(os.environ.get("MAX_WINDOWS_PATH_LEN", "260"))'
'headroom = int(os.environ.get("PATH_HEADROOM", "10"))'
'limit = max_len - headroom'
''
'with zipfile.ZipFile(path) as archive:'
' entries = []'
' for info in archive.infolist():'
' normalized = info.filename.lstrip("/\\\\")'
' total_len = len(os.path.join(prefix, normalized)) if normalized else len(prefix)'
' entries.append((total_len, info.filename))'
''
'if not entries:'
' raise SystemExit("nupkg archive contains no entries")'
''
'entries.sort(reverse=True)'
'print(f"Assumed install prefix: {prefix} ({len(prefix)} chars). Maximum allowed path length: {limit} (total reserve {max_len}, headroom {headroom}).")'
'print("Top 20 longest archived paths (length includes prefix):")'
'for length, name in entries[:20]:'
' print(f"{length:4d} {name}")'
''
'longest_len, longest_name = entries[0]'
'if longest_len > limit:'
' raise SystemExit(f"Longest path {longest_len} for {longest_name} exceeds limit {limit}")'
'print(f"Longest archived path {longest_len} is within the limit of {limit}.")'
)
$scriptPath = Join-Path $env:TEMP "nupkg-long-path-check.py"
Set-Content -Path $scriptPath -Value $lines -Encoding utf8
python $scriptPath
"""
),
"build_app_linux": "pnpm exec electron-builder --config electron-builder.config.cjs --linux --${ELECTRON_ARCH}\n",
"prepare_artifacts_windows": pwsh_step(
r"""
New-Item -ItemType Directory -Force upload_staging | Out-Null
$dist = Join-Path $env:WORKDIR "fluxer_desktop/dist-electron"
$sqDirName = if ($env:ARCH -eq "arm64") { "squirrel-windows-arm64" } else { "squirrel-windows" }
$sqFallbackName = if ($sqDirName -eq "squirrel-windows") { "squirrel-windows-arm64" } else { "squirrel-windows" }
$sq = Join-Path $dist $sqDirName
$sqFallback = Join-Path $dist $sqFallbackName
$picked = $null
if (Test-Path $sq) { $picked = $sq }
elseif (Test-Path $sqFallback) { $picked = $sqFallback }
if ($picked) {
Copy-Item -Force -ErrorAction SilentlyContinue "$picked\*.exe" "upload_staging\"
Copy-Item -Force -ErrorAction SilentlyContinue "$picked\*.exe.blockmap" "upload_staging\"
Copy-Item -Force -ErrorAction SilentlyContinue "$picked\RELEASES*" "upload_staging\"
Copy-Item -Force -ErrorAction SilentlyContinue "$picked\*.nupkg" "upload_staging\"
Copy-Item -Force -ErrorAction SilentlyContinue "$picked\*.nupkg.blockmap" "upload_staging\"
}
if (Test-Path $dist) {
Copy-Item -Force -ErrorAction SilentlyContinue "$dist\*.yml" "upload_staging\"
Copy-Item -Force -ErrorAction SilentlyContinue "$dist\*.zip" "upload_staging\"
Copy-Item -Force -ErrorAction SilentlyContinue "$dist\*.zip.blockmap" "upload_staging\"
}
if (-not (Get-ChildItem upload_staging -Filter *.exe -ErrorAction SilentlyContinue)) {
throw "No installer .exe staged. Squirrel outputs were not copied."
}
Get-ChildItem -Force upload_staging | Format-Table -AutoSize
"""
),
"prepare_artifacts_unix": """
set -euo pipefail
mkdir -p upload_staging
DIST="${WORKDIR}/fluxer_desktop/dist-electron"
cp -f "$DIST"/*.dmg upload_staging/ 2>/dev/null || true
cp -f "$DIST"/*.zip upload_staging/ 2>/dev/null || true
cp -f "$DIST"/*.zip.blockmap upload_staging/ 2>/dev/null || true
cp -f "$DIST"/*.yml upload_staging/ 2>/dev/null || true
cp -f "$DIST"/*.AppImage upload_staging/ 2>/dev/null || true
cp -f "$DIST"/*.deb upload_staging/ 2>/dev/null || true
cp -f "$DIST"/*.rpm upload_staging/ 2>/dev/null || true
cp -f "$DIST"/*.tar.gz upload_staging/ 2>/dev/null || true
ls -la upload_staging/
""",
"normalise_updater_yaml": """
set -euo pipefail
cd upload_staging
[[ "${PLATFORM}" == "macos" && -f latest-mac.yml && ! -f latest-mac-arm64.yml ]] && mv latest-mac.yml latest-mac-arm64.yml || true
""",
"generate_checksums_unix": """
set -euo pipefail
cd upload_staging
for file in *.exe *.dmg *.zip *.AppImage *.deb *.rpm *.tar.gz; do
[ -f "$file" ] || continue
sha256sum "$file" | awk '{print $1}' > "${file}.sha256"
echo "Generated checksum for $file"
done
ls -la *.sha256 2>/dev/null || echo "No checksum files generated"
""",
"generate_checksums_windows": pwsh_step(
r"""
cd upload_staging
$extensions = @('.exe', '.nupkg')
Get-ChildItem -File | Where-Object { $extensions -contains $_.Extension } | ForEach-Object {
$hash = (Get-FileHash $_.FullName -Algorithm SHA256).Hash.ToLower()
Set-Content -Path "$($_.FullName).sha256" -Value $hash -NoNewline
Write-Host "Generated checksum for $($_.Name)"
}
Get-ChildItem -Filter "*.sha256" -ErrorAction SilentlyContinue | Format-Table -AutoSize
"""
),
"install_rclone": INSTALL_RCLONE_SCRIPT,
"configure_rclone": rclone_config_script(
endpoint="https://s3.us-east-va.io.cloud.ovh.us",
acl="private",
),
"build_payload": """
set -euo pipefail
mkdir -p s3_payload
shopt -s nullglob
for dir in artifacts/fluxer-desktop-${CHANNEL}-*; do
[ -d "$dir" ] || continue
base="$(basename "$dir")"
if [[ "$base" =~ ^fluxer-desktop-[a-z]+-([a-z]+)-([a-z0-9]+)$ ]]; then
platform="${BASH_REMATCH[1]}"
arch="${BASH_REMATCH[2]}"
else
echo "Skipping unrecognised artifact dir: $base"
continue
fi
case "$platform" in
windows) plat="win32" ;;
macos) plat="darwin" ;;
linux) plat="linux" ;;
*)
echo "Unknown platform: $platform"
continue
;;
esac
dest="s3_payload/desktop/${CHANNEL}/${plat}/${arch}"
mkdir -p "$dest"
cp -av "$dir"/* "$dest/" || true
if [[ "$plat" == "darwin" ]]; then
zip_file=""
for z in "$dest"/*-"$arch".zip; do
zip_file="$z"
break
done
if [[ -z "$zip_file" ]]; then
for z in "$dest"/*.zip; do
zip_file="$z"
break
done
fi
if [[ -z "$zip_file" ]]; then
echo "No .zip found for macOS $arch in $dest (auto-update requires zip artifacts)."
else
zip_name="$(basename "$zip_file")"
url="${PUBLIC_DL_BASE}/desktop/${CHANNEL}/${plat}/${arch}/${zip_name}"
cat > "$dest/RELEASES.json" <<EOF
{
"currentRelease": "${VERSION}",
"releases": [
{
"version": "${VERSION}",
"updateTo": {
"version": "${VERSION}",
"pub_date": "${PUB_DATE}",
"notes": "",
"name": "${VERSION}",
"url": "${url}"
}
}
]
}
EOF
cp -f "$dest/RELEASES.json" "$dest/releases.json"
fi
fi
setup_file=""
dmg_file=""
zip_file2=""
appimage_file=""
deb_file=""
rpm_file=""
targz_file=""
if [[ "$plat" == "win32" ]]; then
setup_file="$(ls -1 "$dest"/*.exe 2>/dev/null | grep -i 'setup' | head -n1 || true)"
if [[ -z "$setup_file" ]]; then
setup_file="$(ls -1 "$dest"/*.exe 2>/dev/null | head -n1 || true)"
fi
fi
if [[ "$plat" == "darwin" ]]; then
dmg_file="$(ls -1 "$dest"/*-"$arch".dmg 2>/dev/null | head -n1 || true)"
if [[ -z "$dmg_file" ]]; then
dmg_file="$(ls -1 "$dest"/*.dmg 2>/dev/null | head -n1 || true)"
fi
zip_file2="$(ls -1 "$dest"/*-"$arch".zip 2>/dev/null | head -n1 || true)"
if [[ -z "$zip_file2" ]]; then
zip_file2="$(ls -1 "$dest"/*.zip 2>/dev/null | head -n1 || true)"
fi
fi
if [[ "$plat" == "linux" ]]; then
appimage_file="$(ls -1 "$dest"/*.AppImage 2>/dev/null | head -n1 || true)"
deb_file="$(ls -1 "$dest"/*.deb 2>/dev/null | head -n1 || true)"
rpm_file="$(ls -1 "$dest"/*.rpm 2>/dev/null | head -n1 || true)"
targz_file="$(ls -1 "$dest"/*.tar.gz 2>/dev/null | head -n1 || true)"
fi
read_sha256() {
local file="$1"
if [[ -n "$file" && -f "${file}.sha256" ]]; then
awk '{print $1}' "${file}.sha256"
else
echo ""
fi
}
setup_sha256="$(read_sha256 "$setup_file")"
dmg_sha256="$(read_sha256 "$dmg_file")"
zip_sha256="$(read_sha256 "$zip_file2")"
appimage_sha256="$(read_sha256 "$appimage_file")"
deb_sha256="$(read_sha256 "$deb_file")"
rpm_sha256="$(read_sha256 "$rpm_file")"
targz_sha256="$(read_sha256 "$targz_file")"
jq -n \
--arg channel "${CHANNEL}" \
--arg platform "${plat}" \
--arg arch "${arch}" \
--arg version "${VERSION}" \
--arg pub_date "${PUB_DATE}" \
--arg setup "$(basename "${setup_file:-}")" \
--arg setup_sha256 "${setup_sha256}" \
--arg dmg "$(basename "${dmg_file:-}")" \
--arg dmg_sha256 "${dmg_sha256}" \
--arg zip "$(basename "${zip_file2:-}")" \
--arg zip_sha256 "${zip_sha256}" \
--arg appimage "$(basename "${appimage_file:-}")" \
--arg appimage_sha256 "${appimage_sha256}" \
--arg deb "$(basename "${deb_file:-}")" \
--arg deb_sha256 "${deb_sha256}" \
--arg rpm "$(basename "${rpm_file:-}")" \
--arg rpm_sha256 "${rpm_sha256}" \
--arg tar_gz "$(basename "${targz_file:-}")" \
--arg tar_gz_sha256 "${targz_sha256}" \
'{
channel: $channel,
platform: $platform,
arch: $arch,
version: $version,
pub_date: $pub_date,
files: (
{}
| if ($setup | length) > 0 then
. + {setup: (if ($setup_sha256 | length) > 0 then {filename: $setup, sha256: $setup_sha256} else $setup end)}
else . end
| if ($dmg | length) > 0 then
. + {dmg: (if ($dmg_sha256 | length) > 0 then {filename: $dmg, sha256: $dmg_sha256} else $dmg end)}
else . end
| if ($zip | length) > 0 then
. + {zip: (if ($zip_sha256 | length) > 0 then {filename: $zip, sha256: $zip_sha256} else $zip end)}
else . end
| if ($appimage | length) > 0 then
. + {appimage: (if ($appimage_sha256 | length) > 0 then {filename: $appimage, sha256: $appimage_sha256} else $appimage end)}
else . end
| if ($deb | length) > 0 then
. + {deb: (if ($deb_sha256 | length) > 0 then {filename: $deb, sha256: $deb_sha256} else $deb end)}
else . end
| if ($rpm | length) > 0 then
. + {rpm: (if ($rpm_sha256 | length) > 0 then {filename: $rpm, sha256: $rpm_sha256} else $rpm end)}
else . end
| if ($tar_gz | length) > 0 then
. + {tar_gz: (if ($tar_gz_sha256 | length) > 0 then {filename: $tar_gz, sha256: $tar_gz_sha256} else $tar_gz end)}
else . end
)
}' > "$dest/manifest.json"
done
echo "Payload tree:"
find s3_payload -maxdepth 6 -type f | sort
""",
"upload_payload": """
set -euo pipefail
rclone copy s3_payload/desktop "ovh:${S3_BUCKET}/desktop" \
--transfers 32 \
--checkers 16 \
--fast-list \
--s3-upload-concurrency 8 \
--s3-chunk-size 16M \
-v
""",
"build_summary": """
{
echo "## Desktop ${DISPLAY_CHANNEL^} Upload Complete"
echo ""
echo "**Version:** ${VERSION}"
echo ""
echo "**S3 prefix:** desktop/${CHANNEL}/"
echo ""
echo "**Redirect endpoint shape:** /dl/desktop/${CHANNEL}/{plat}/{arch}/{format}"
} >> "$GITHUB_STEP_SUMMARY"
""",
}
SKIP_FLAG_ENV_MAP = {
"skip_windows": "SKIP_WINDOWS",
"skip_windows_x64": "SKIP_WINDOWS_X64",
"skip_windows_arm64": "SKIP_WINDOWS_ARM64",
"skip_macos": "SKIP_MACOS",
"skip_macos_x64": "SKIP_MACOS_X64",
"skip_macos_arm64": "SKIP_MACOS_ARM64",
"skip_linux": "SKIP_LINUX",
"skip_linux_x64": "SKIP_LINUX_X64",
"skip_linux_arm64": "SKIP_LINUX_ARM64",
}
ENV_ARGS = [
EnvArg("--channel", "CHANNEL"),
EnvArg("--ref", "REF"),
EnvArg("--skip-windows", "SKIP_WINDOWS"),
EnvArg("--skip-windows-x64", "SKIP_WINDOWS_X64"),
EnvArg("--skip-windows-arm64", "SKIP_WINDOWS_ARM64"),
EnvArg("--skip-macos", "SKIP_MACOS"),
EnvArg("--skip-macos-x64", "SKIP_MACOS_X64"),
EnvArg("--skip-macos-arm64", "SKIP_MACOS_ARM64"),
EnvArg("--skip-linux", "SKIP_LINUX"),
EnvArg("--skip-linux-x64", "SKIP_LINUX_X64"),
EnvArg("--skip-linux-arm64", "SKIP_LINUX_ARM64"),
]
def main() -> int:
import os
args = parse_step_env_args(ENV_ARGS)
if args.step == "set_metadata":
channel = os.environ.get("CHANNEL", "") or "stable"
set_metadata_step(channel, os.environ.get("REF", ""))
return 0
if args.step == "set_matrix":
flags = {
key: parse_bool(os.environ.get(env_name, "false"))
for key, env_name in SKIP_FLAG_ENV_MAP.items()
}
set_matrix_step(flags)
return 0
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env python3
import os
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import EnvArg, parse_env_args
from ci_utils import require_env, write_github_output
ENV_ARGS = [
EnvArg("--event-name", "EVENT_NAME"),
EnvArg("--ref-name", "REF_NAME"),
EnvArg("--dispatch-channel", "DISPATCH_CHANNEL"),
]
def determine_channel(
*,
event_name: str,
ref_name: str,
dispatch_channel: str,
) -> str:
if event_name == "push":
return "canary" if ref_name == "canary" else "stable"
return "canary" if dispatch_channel == "canary" else "stable"
def main() -> int:
parse_env_args(ENV_ARGS)
require_env(["EVENT_NAME"])
channel = determine_channel(
event_name=os.environ.get("EVENT_NAME", ""),
ref_name=os.environ.get("REF_NAME", ""),
dispatch_channel=os.environ.get("DISPATCH_CHANNEL", ""),
)
stack_suffix = "-canary" if channel == "canary" else ""
is_canary = "true" if channel == "canary" else "false"
write_github_output(
{
"channel": channel,
"is_canary": is_canary,
"stack_suffix": stack_suffix,
}
)
return 0
if __name__ == "__main__":
raise SystemExit(main())

54
scripts/ci/workflows/ci.py Executable file
View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"install_dependencies": """
set -euo pipefail
pnpm install --frozen-lockfile
""",
"typecheck": """
set -euo pipefail
pnpm typecheck
""",
"test": """
set -euo pipefail
pnpm test
""",
"gateway_compile": """
set -euo pipefail
cd fluxer_gateway
rebar3 compile
""",
"gateway_dialyzer": """
set -euo pipefail
cd fluxer_gateway
rebar3 dialyzer
""",
"gateway_eunit": """
set -euo pipefail
cd fluxer_gateway
rebar3 eunit
""",
"knip": """
set -euo pipefail
pnpm knip
""",
}
def main() -> int:
args = parse_step_env_args()
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"sync": """
set -euo pipefail
cd scripts/ci
uv sync --dev
""",
"test": """
set -euo pipefail
cd scripts/ci
uv run pytest
""",
}
def main() -> int:
args = parse_step_env_args()
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" \
"IMAGE_TAG=${IMAGE_TAG} STACK=${STACK} CADDY_DOMAIN=${CADDY_DOMAIN} REPLICAS=${REPLICAS} RELEASE_CHANNEL=${RELEASE_CHANNEL} IS_CANARY=${IS_CANARY} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${IS_CANARY}" == "true" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${STACK}"
sudo chown -R "${USER}:${USER}" "/opt/${STACK}"
cd "/opt/${STACK}"
cat > compose.yaml << COMPOSEEOF
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
x-healthcheck: &healthcheck
test: ['CMD', 'curl', '-f', 'http://localhost:8080/']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
services:
app:
image: ${IMAGE_TAG}
environment:
FLUXER_CONFIG: /etc/fluxer/config.json
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
<<: *deploy_base
replicas: ${REPLICAS}
labels:
- "caddy=${CADDY_DOMAIN}"
- 'caddy.reverse_proxy={{upstreams 8080}}'
- 'caddy.header.X-Robots-Tag="noindex, nofollow, nosnippet, noimageindex"'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
- 'caddy.header.X-Xss-Protection="1; mode=block"'
- 'caddy.header.X-Content-Type-Options=nosniff'
- 'caddy.header.Referrer-Policy=strict-origin-when-cross-origin'
- 'caddy.header.X-Frame-Options=DENY'
networks: [fluxer-shared]
healthcheck: *healthcheck
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy \
--with-registry-auth \
--detach=false \
--resolve-image never \
-c compose.yaml \
"${STACK}"
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,167 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG_APP}" "${SERVER}"
if [[ "${IS_CANARY}" == "true" ]]; then
docker pussh "${IMAGE_TAG_WORKER}" "${SERVER}"
fi
ssh "${SERVER}" \
"IMAGE_TAG_APP=${IMAGE_TAG_APP} IMAGE_TAG_WORKER=${IMAGE_TAG_WORKER} STACK=${STACK} WORKER_STACK=${WORKER_STACK} CANARY_WORKER_REPLICAS=${CANARY_WORKER_REPLICAS} IS_CANARY=${IS_CANARY} CADDY_DOMAIN=${CADDY_DOMAIN} RELEASE_CHANNEL=${RELEASE_CHANNEL} SENTRY_RELEASE=${SENTRY_RELEASE} SENTRY_BUILD_SHA=${SENTRY_BUILD_SHA} SENTRY_BUILD_NUMBER=${SENTRY_BUILD_NUMBER} SENTRY_BUILD_TIMESTAMP=${SENTRY_BUILD_TIMESTAMP} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${IS_CANARY}" == "true" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
CANARY_WORKER_REPLICAS="${CANARY_WORKER_REPLICAS:-3}"
BLUESKY_KEYS_DIR="/etc/fluxer/keys"
sudo mkdir -p "${BLUESKY_KEYS_DIR}"
sudo chown root:65534 "${BLUESKY_KEYS_DIR}"
sudo chmod 0750 "${BLUESKY_KEYS_DIR}"
shopt -s nullglob
KEY_FILES=("${BLUESKY_KEYS_DIR}"/*.pem)
if [[ ${#KEY_FILES[@]} -gt 0 ]]; then
sudo chown root:65534 "${KEY_FILES[@]}"
sudo chmod 0440 "${KEY_FILES[@]}"
fi
shopt -u nullglob
deploy_api_stack() {
sudo mkdir -p "/opt/${STACK}"
sudo chown -R "${USER}:${USER}" "/opt/${STACK}"
cd "/opt/${STACK}"
cat > compose.yaml << COMPOSEEOF
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
x-healthcheck: &healthcheck
test: ['CMD', 'curl', '-f', 'http://localhost:8080/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
services:
app:
image: ${IMAGE_TAG_APP}
command: ['npm', 'run', 'start']
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
- ${BLUESKY_KEYS_DIR}:${BLUESKY_KEYS_DIR}:ro
- /opt/geoip/GeoLite2-City.mmdb:/data/GeoLite2-City.mmdb:ro
deploy:
<<: *deploy_base
replicas: 6
labels:
- "caddy=${CADDY_DOMAIN}"
- 'caddy.reverse_proxy={{upstreams 8080}}'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
- 'caddy.header.X-Xss-Protection="1; mode=block"'
- 'caddy.header.X-Content-Type-Options=nosniff'
- 'caddy.header.Referrer-Policy=strict-origin-when-cross-origin'
- 'caddy.header.X-Frame-Options=DENY'
- 'caddy.header.Expect-Ct="max-age=86400, report-uri=\\"https://o4510149383094272.ingest.us.sentry.io/api/4510205804019712/security/?sentry_key=bb16e8b823b82d788db49a666b3b4b90\\""'
networks:
- fluxer-shared
healthcheck: *healthcheck
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy --with-registry-auth --detach=false --resolve-image never -c compose.yaml "${STACK}"
}
deploy_worker_stack() {
sudo mkdir -p "/opt/${WORKER_STACK}"
sudo chown -R "${USER}:${USER}" "/opt/${WORKER_STACK}"
cd "/opt/${WORKER_STACK}"
cat > compose.yaml << COMPOSEEOF
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
services:
worker:
image: ${IMAGE_TAG_WORKER}
command: ['npm', 'run', 'start:worker']
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
- SENTRY_RELEASE=${SENTRY_RELEASE}
- SENTRY_BUILD_SHA=${SENTRY_BUILD_SHA}
- SENTRY_BUILD_NUMBER=${SENTRY_BUILD_NUMBER}
- SENTRY_BUILD_TIMESTAMP=${SENTRY_BUILD_TIMESTAMP}
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
<<: *deploy_base
replicas: ${CANARY_WORKER_REPLICAS}
networks:
- fluxer-shared
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy --with-registry-auth --detach=false --resolve-image never -c compose.yaml "${WORKER_STACK}"
}
deploy_api_stack
if [[ "${IS_CANARY}" == "true" ]]; then
deploy_worker_stack
fi
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
include_sentry=True,
include_build_timestamp=False,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,248 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_steps import (
ADD_KNOWN_HOSTS_SCRIPT,
INSTALL_DOCKER_PUSSH_SCRIPT,
INSTALL_RCLONE_SCRIPT,
record_deploy_commit_script,
rclone_config_script,
set_build_timestamp_script,
)
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"install_dependencies": """
set -euo pipefail
cd fluxer_app
pnpm install --frozen-lockfile
""",
"run_lingui": """
set -euo pipefail
cd fluxer_app
pnpm lingui:extract
pnpm lingui:compile --strict
""",
"record_deploy_commit": record_deploy_commit_script(
include_env=True,
include_sentry=False,
),
"install_wasm_pack": """
set -euo pipefail
if ! command -v wasm-pack >/dev/null 2>&1; then
cargo install wasm-pack --version 0.13.1
fi
""",
"generate_wasm": """
set -euo pipefail
cd fluxer_app
pnpm wasm:codegen
""",
"add_known_hosts": ADD_KNOWN_HOSTS_SCRIPT,
"fetch_deployment_config": """
set -euo pipefail
if [[ "${RELEASE_CHANNEL}" == "canary" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
ssh "${SERVER}" "cat ${CONFIG_PATH}" > fluxer_app/config.json
""",
"build_application": """
set -euo pipefail
cd fluxer_app
pnpm build
node -e "const fs = require('fs'); const {execSync} = require('child_process'); const cfg = JSON.parse(fs.readFileSync(process.env.FLUXER_CONFIG, 'utf8')); const app = cfg.app_public || {}; let sha = app.build_sha || ''; if (!sha) { try { sha = execSync('git rev-parse --short HEAD', {stdio:['ignore','pipe','ignore']}).toString().trim(); } catch {} } const timestamp = Number(app.build_timestamp ?? Math.floor(Date.now() / 1000)); const buildNumber = Number(app.build_number ?? 0); const env = app.project_env ?? cfg.sentry?.release_channel ?? cfg.env ?? ''; const payload = { sha, buildNumber, timestamp, env }; fs.writeFileSync('dist/version.json', JSON.stringify(payload, null, 2));"
""",
"install_rclone": INSTALL_RCLONE_SCRIPT,
"upload_assets": rclone_config_script(
endpoint="https://s3.us-east-va.io.cloud.ovh.us",
acl="public-read",
expand_vars=True,
)
+ """
rclone copy fluxer_app/dist/assets ovh:fluxer-static/assets \
--transfers 32 \
--checkers 16 \
--size-only \
--fast-list \
--s3-upload-concurrency 8 \
--s3-chunk-size 16M \
-v
""",
"set_build_timestamp": set_build_timestamp_script(),
"install_docker_pussh": INSTALL_DOCKER_PUSSH_SCRIPT,
"push_and_deploy": """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" \
"IMAGE_TAG=${IMAGE_TAG} SERVICE_NAME=${SERVICE_NAME} COMPOSE_STACK=${COMPOSE_STACK} RELEASE_CHANNEL=${RELEASE_CHANNEL} APP_REPLICAS=${APP_REPLICAS} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${RELEASE_CHANNEL}" == "canary" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
read -r CADDY_APP_DOMAIN SENTRY_CADDY_DOMAIN <<EOF
$(python3 - <<'PY' "${CONFIG_PATH}"
import sys, json
from urllib.parse import urlparse
path = sys.argv[1]
with open(path, 'r') as f:
cfg = json.load(f)
domain = cfg.get('domain', {})
overrides = cfg.get('endpoint_overrides', {})
def build_url(scheme, base_domain, port, path=''):
standard = (scheme == 'http' and port == 80) or (scheme == 'https' and port == 443) or (scheme == 'ws' and port == 80) or (scheme == 'wss' and port == 443)
port_part = f":{port}" if port and not standard else ""
return f"{scheme}://{base_domain}{port_part}{path}"
def derive_domain(key):
if key == 'cdn':
return domain.get('cdn_domain') or domain.get('base_domain')
if key == 'invite':
return domain.get('invite_domain') or domain.get('base_domain')
if key == 'gift':
return domain.get('gift_domain') or domain.get('base_domain')
return domain.get('base_domain')
public_scheme = domain.get('public_scheme', 'https')
public_port = domain.get('public_port', 443 if public_scheme == 'https' else 80)
derived_app = build_url(public_scheme, derive_domain('app'), public_port)
app_url = (overrides.get('app') or derived_app).strip()
parsed_app = urlparse(app_url)
app_host = parsed_app.netloc or parsed_app.path
sentry_host_raw = (cfg.get('services', {}).get('app_proxy', {}).get('sentry_report_host') or '').strip()
if sentry_host_raw and not sentry_host_raw.startswith('http'):
sentry_host_raw = f"https://{sentry_host_raw}"
sentry_host = urlparse(sentry_host_raw).netloc if sentry_host_raw else ''
print(f"{app_host} {sentry_host}")
PY
)
EOF
if [[ "${RELEASE_CHANNEL}" == "canary" ]]; then
API_TARGET="fluxer-api-canary_app"
else
API_TARGET="fluxer-api_app"
fi
SENTRY_REPORT_HOST="$(
python3 - <<'PY' "${CONFIG_PATH}"
import sys, json
path = sys.argv[1]
with open(path, 'r') as f:
cfg = json.load(f)
app_proxy = cfg.get('services', {}).get('app_proxy', {})
host = (app_proxy.get('sentry_report_host') or '').rstrip('/')
print(host)
PY
)"
sudo mkdir -p "/opt/${SERVICE_NAME}"
sudo chown -R "${USER}:${USER}" "/opt/${SERVICE_NAME}"
cd "/opt/${SERVICE_NAME}"
cat > compose.yaml << COMPOSEEOF
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
x-common-caddy-headers: &common_caddy_headers
caddy.header.Strict-Transport-Security: "max-age=31536000; includeSubDomains; preload"
caddy.header.X-Xss-Protection: "1; mode=block"
caddy.header.X-Content-Type-Options: "nosniff"
caddy.header.Referrer-Policy: "strict-origin-when-cross-origin"
caddy.header.X-Frame-Options: "DENY"
caddy.header.Expect-Ct: "max-age=86400, report-uri=\\"${SENTRY_REPORT_HOST}/api/4510205815291904/security/?sentry_key=59ced0e2666ab83dd1ddb056cdd22d1b\\""
caddy.header.Cache-Control: "no-store, no-cache, must-revalidate"
caddy.header.Pragma: "no-cache"
caddy.header.Expires: "0"
x-env-base: &env_base
FLUXER_CONFIG: /etc/fluxer/config.json
x-healthcheck: &healthcheck
test: ['CMD', 'curl', '-f', 'http://localhost:8080/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
services:
app:
image: ${IMAGE_TAG}
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
<<: *deploy_base
replicas: ${APP_REPLICAS}
labels:
<<: *common_caddy_headers
caddy: ${CADDY_APP_DOMAIN}
caddy.redir: "/.well-known/fluxer /api/.well-known/fluxer 301"
caddy.handle_path_0: /api*
caddy.handle_path_0.reverse_proxy: "http://${API_TARGET}:8080"
caddy.reverse_proxy: "{{upstreams 8080}}"
environment:
<<: *env_base
networks: [fluxer-shared]
healthcheck: *healthcheck
sentry:
image: ${IMAGE_TAG}
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
<<: *deploy_base
replicas: 1
labels:
<<: *common_caddy_headers
caddy: ${SENTRY_CADDY_DOMAIN}
caddy.reverse_proxy: "{{upstreams 8080}}"
environment:
<<: *env_base
networks: [fluxer-shared]
healthcheck: *healthcheck
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy \
--with-registry-auth \
--detach=false \
--resolve-image never \
-c compose.yaml \
"${COMPOSE_STACK}"
REMOTE_EOF
""",
}
def main() -> int:
args = parse_step_env_args(include_server_ip=True)
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,156 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_steps import ADD_KNOWN_HOSTS_SCRIPT, record_deploy_commit_script
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"compile": """
set -euo pipefail
cd fluxer_gateway
rebar3 as prod compile
""",
"add_known_hosts": ADD_KNOWN_HOSTS_SCRIPT,
"record_deploy_commit": record_deploy_commit_script(
include_env=False,
include_sentry=False,
),
"deploy": """
set -euo pipefail
CONTAINER_ID="$(ssh "${SERVER}" "docker ps -q --filter label=com.docker.swarm.service.name=fluxer-gateway_app | head -1")"
if [ -z "${CONTAINER_ID}" ]; then
echo "::error::No running container found for service fluxer-gateway_app"
ssh "${SERVER}" "docker ps --filter 'name=fluxer-gateway_app' --format '{{.ID}} {{.Names}} {{.Status}}'" || true
exit 1
fi
echo "Container: ${CONTAINER_ID}"
GATEWAY_HTTP_PORT="8080"
echo "Gateway HTTP port: ${GATEWAY_HTTP_PORT}"
if ! ssh "${SERVER}" "docker exec ${CONTAINER_ID} curl -fsS --max-time 3 http://localhost:${GATEWAY_HTTP_PORT}/_health >/dev/null"; then
echo "::error::Gateway HTTP listener is not reachable on port ${GATEWAY_HTTP_PORT}"
exit 1
fi
LOCAL_MD5_LINES="$(
erl -noshell -eval '
Files = filelib:wildcard("fluxer_gateway/_build/prod/lib/fluxer_gateway/ebin/*.beam"),
lists:foreach(
fun(F) ->
{ok, {M, Md5}} = beam_lib:md5(F),
Hex = binary:encode_hex(Md5, lowercase),
io:format("~s ~s ~s~n", [atom_to_list(M), binary_to_list(Hex), F])
end,
Files
),
halt().'
)"
REMOTE_MD5_LINES="$(
ssh "${SERVER}" "docker exec ${CONTAINER_ID} /opt/fluxer_gateway/bin/fluxer_gateway eval '
Mods = hot_reload:get_loaded_modules(),
lists:foreach(
fun(M) ->
case hot_reload:get_module_info(M) of
{ok, Info} ->
V = maps:get(loaded_md5, Info),
S = case V of
null -> \"null\";
B when is_binary(B) -> binary_to_list(B)
end,
io:format(\"~s ~s~n\", [atom_to_list(M), S]);
_ ->
ok
end
end,
Mods
),
ok.
' " | tr -d '\r'
)"
LOCAL_MD5_FILE="$(mktemp)"
REMOTE_MD5_FILE="$(mktemp)"
CHANGED_FILE_LIST="$(mktemp)"
CHANGED_MAIN_LIST="$(mktemp)"
CHANGED_SELF_LIST="$(mktemp)"
RELOAD_RESULT_MAIN="$(mktemp)"
RELOAD_RESULT_SELF="$(mktemp)"
trap 'rm -f "${LOCAL_MD5_FILE}" "${REMOTE_MD5_FILE}" "${CHANGED_FILE_LIST}" "${CHANGED_MAIN_LIST}" "${CHANGED_SELF_LIST}" "${RELOAD_RESULT_MAIN}" "${RELOAD_RESULT_SELF}"' EXIT
printf '%s' "${LOCAL_MD5_LINES}" > "${LOCAL_MD5_FILE}"
printf '%s' "${REMOTE_MD5_LINES}" > "${REMOTE_MD5_FILE}"
python3 scripts/ci/erlang_hot_reload.py diff-md5 \
"${LOCAL_MD5_FILE}" \
"${REMOTE_MD5_FILE}" \
"${CHANGED_FILE_LIST}"
mapfile -t CHANGED_FILES < "${CHANGED_FILE_LIST}"
if [ "${#CHANGED_FILES[@]}" -eq 0 ]; then
echo "No BEAM changes detected, nothing to hot-reload."
exit 0
fi
echo "Changed modules count: ${#CHANGED_FILES[@]}"
while IFS= read -r p; do
[ -n "${p}" ] || continue
m="$(basename "${p}")"
m="${m%.beam}"
if [ "${m}" = "hot_reload" ] || [ "${m}" = "hot_reload_handler" ]; then
printf '%s\n' "${p}" >> "${CHANGED_SELF_LIST}"
else
printf '%s\n' "${p}" >> "${CHANGED_MAIN_LIST}"
fi
done < "${CHANGED_FILE_LIST}"
build_json() {
python3 scripts/ci/erlang_hot_reload.py build-json "$1"
}
strict_verify() {
python3 scripts/ci/erlang_hot_reload.py verify --mode strict
}
self_verify() {
python3 scripts/ci/erlang_hot_reload.py verify --mode self
}
if [ -s "${CHANGED_SELF_LIST}" ]; then
if ! build_json "${CHANGED_SELF_LIST}" | ssh "${SERVER}" "docker exec -i ${CONTAINER_ID} curl -sS -X POST -H 'Authorization: Bearer ${GATEWAY_ADMIN_SECRET}' -H 'Content-Type: application/json' --data @- http://localhost:${GATEWAY_HTTP_PORT}/_admin/reload" | tee "${RELOAD_RESULT_SELF}" | self_verify; then
echo "::group::Hot reload response (self)"
cat "${RELOAD_RESULT_SELF}" || true
echo "::endgroup::"
exit 1
fi
fi
if [ -s "${CHANGED_MAIN_LIST}" ]; then
if ! build_json "${CHANGED_MAIN_LIST}" | ssh "${SERVER}" "docker exec -i ${CONTAINER_ID} curl -sS -X POST -H 'Authorization: Bearer ${GATEWAY_ADMIN_SECRET}' -H 'Content-Type: application/json' --data @- http://localhost:${GATEWAY_HTTP_PORT}/_admin/reload" | tee "${RELOAD_RESULT_MAIN}" | strict_verify; then
echo "::group::Hot reload response (main)"
cat "${RELOAD_RESULT_MAIN}" || true
echo "::endgroup::"
exit 1
fi
fi
""",
}
def main() -> int:
args = parse_step_env_args(include_server_ip=True)
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" "IMAGE_TAG=${IMAGE_TAG} STACK=${STACK} IS_CANARY=${IS_CANARY} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${IS_CANARY}" == "true" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${STACK}"
sudo chown -R "${USER}:${USER}" "/opt/${STACK}"
cd "/opt/${STACK}"
cat > compose.yaml << COMPOSEEOF
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
x-healthcheck: &healthcheck
test: ['CMD', 'curl', '-f', 'http://localhost:6380/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
services:
app:
image: ${IMAGE_TAG}
deploy:
<<: *deploy_base
replicas: 1
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
networks: [fluxer-shared]
healthcheck: *healthcheck
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy \
--with-registry-auth \
--detach=false \
--resolve-image never \
-c compose.yaml \
"${STACK}"
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,132 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" \
"IMAGE_TAG=${IMAGE_TAG} STACK=${STACK} IS_CANARY=${IS_CANARY} CADDY_DOMAIN=${CADDY_DOMAIN} RELEASE_CHANNEL=${RELEASE_CHANNEL} APP_REPLICAS=${APP_REPLICAS} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${IS_CANARY}" == "true" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${STACK}"
sudo chown -R "${USER}:${USER}" "/opt/${STACK}"
cd "/opt/${STACK}"
cat > compose.yaml << COMPOSEEOF
services:
app:
image: ${IMAGE_TAG}
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
replicas: ${APP_REPLICAS}
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
labels:
caddy: "${CADDY_DOMAIN}"
caddy.reverse_proxy: "{{upstreams 8080}}"
caddy.header.Strict-Transport-Security: "max-age=31536000; includeSubDomains; preload"
caddy.header.X-Xss-Protection: "1; mode=block"
caddy.header.X-Content-Type-Options: "nosniff"
caddy.header.Referrer-Policy: "strict-origin-when-cross-origin"
caddy.header.X-Frame-Options: "DENY"
COMPOSEEOF
if [[ "${IS_CANARY}" == "true" ]]; then
cat >> compose.yaml << 'COMPOSEEOF'
caddy.header.X-Robots-Tag: "noindex, nofollow, nosnippet, noimageindex"
caddy.@channels.path: "/channels /channels/*"
caddy.redir: "@channels https://web.canary.fluxer.app{uri}"
caddy.redir_0: "/.well-known/fluxer https://api.canary.fluxer.app/.well-known/fluxer 301"
COMPOSEEOF
else
cat >> compose.yaml << 'COMPOSEEOF'
caddy.redir_0: "/channels/* https://web.fluxer.app{uri}"
caddy.redir_1: "/channels https://web.fluxer.app{uri}"
caddy.redir_2: "/delete-my-account /help/delete-account 302"
caddy.redir_3: "/delete-my-data /help/data-deletion 302"
caddy.redir_4: "/export-my-data /help/data-export 302"
caddy.redir_5: "/bugs /help/report-bug 302"
caddy_1: "www.fluxer.app"
caddy_1.redir: "https://fluxer.app{uri}"
caddy_3: "fluxer.gg"
caddy_3.@fluxer_gg_root.path: "/"
caddy_3.redir_0: "@fluxer_gg_root https://fluxer.app"
caddy_3.redir_1: "https://web.fluxer.app/invite{uri}"
caddy_4: "fluxer.gift"
caddy_4.@fluxer_gift_root.path: "/"
caddy_4.redir_0: "@fluxer_gift_root https://fluxer.app"
caddy_4.redir_1: "https://web.fluxer.app/gift{uri}"
caddy_5: "fluxerapp.com"
caddy_5.redir: "https://fluxer.app{uri}"
caddy_6: "www.fluxerapp.com"
caddy_6.redir: "https://fluxer.app{uri}"
caddy_7: "fluxer.dev"
caddy_7.redir: "https://docs.fluxer.app{uri}"
caddy_8: "www.fluxer.dev"
caddy_8.redir: "https://docs.fluxer.app{uri}"
caddy.redir_9: "/.well-known/fluxer https://api.fluxer.app/.well-known/fluxer 301"
COMPOSEEOF
fi
cat >> compose.yaml << 'COMPOSEEOF'
networks:
- fluxer-shared
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8080/']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy \
--with-registry-auth \
--detach=false \
--resolve-image never \
-c compose.yaml \
"${STACK}"
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" "IMAGE_TAG=${IMAGE_TAG} SERVICE_NAME=${SERVICE_NAME} COMPOSE_STACK=${COMPOSE_STACK} RELEASE_CHANNEL=${RELEASE_CHANNEL} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${RELEASE_CHANNEL}" == "canary" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${SERVICE_NAME}"
sudo chown -R "${USER}:${USER}" "/opt/${SERVICE_NAME}"
cd "/opt/${SERVICE_NAME}"
cat > compose.yaml << COMPOSEEOF
services:
app:
image: ${IMAGE_TAG}
command: ['pnpm', 'start']
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
replicas: 2
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
labels:
- 'caddy=http://fluxerusercontent.com'
- 'caddy.reverse_proxy={{upstreams 8080}}'
- 'caddy.header.X-Robots-Tag="noindex, nofollow, nosnippet, noimageindex"'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
- 'caddy.header.X-Xss-Protection="1; mode=block"'
- 'caddy.header.X-Content-Type-Options=nosniff'
- 'caddy.header.Referrer-Policy=strict-origin-when-cross-origin'
- 'caddy.header.X-Frame-Options=DENY'
- 'caddy.header.Expect-Ct="max-age=86400, report-uri=\"https://o4510149383094272.ingest.us.sentry.io/api/4510205811556352/security/?sentry_key=2670068cd12b6a62f3a30a7f0055f0f1\""'
networks:
- fluxer-shared
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8080/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy --with-registry-auth --detach=false --resolve-image never -c compose.yaml "${COMPOSE_STACK}"
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" "IMAGE_TAG=${IMAGE_TAG} STACK=${STACK} RELEASE_CHANNEL=${RELEASE_CHANNEL} IS_CANARY=${IS_CANARY} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${IS_CANARY}" == "true" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${STACK}"
sudo chown -R "${USER}:${USER}" "/opt/${STACK}"
cd "/opt/${STACK}"
cat > compose.yaml << COMPOSEEOF
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
x-healthcheck: &healthcheck
test: ['CMD', 'curl', '-f', 'http://localhost:8080/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
services:
queue:
image: ${IMAGE_TAG}
deploy:
<<: *deploy_base
replicas: 1
volumes:
- queue_data:/data
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
networks: [fluxer-shared]
healthcheck: *healthcheck
volumes:
queue_data:
driver: local
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy \
--with-registry-auth \
--detach=false \
--resolve-image never \
-c compose.yaml \
"${STACK}"
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,149 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_steps import ADD_KNOWN_HOSTS_SCRIPT, record_deploy_commit_script
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"compile": """
set -euo pipefail
cd fluxer_relay
rebar3 as prod compile
""",
"add_known_hosts": ADD_KNOWN_HOSTS_SCRIPT,
"record_deploy_commit": record_deploy_commit_script(
include_env=False,
include_sentry=False,
),
"deploy": """
set -euo pipefail
CONTAINER_ID="$(ssh "${SERVER}" "docker ps -q --filter label=com.docker.swarm.service.name=fluxer_relay_app | head -1")"
if [ -z "${CONTAINER_ID}" ]; then
echo "::error::No running container found for service fluxer_relay_app"
ssh "${SERVER}" "docker ps --filter 'name=fluxer_relay_app' --format '{{.ID}} {{.Names}} {{.Status}}'" || true
exit 1
fi
echo "Container: ${CONTAINER_ID}"
LOCAL_MD5_LINES="$(
erl -noshell -eval '
Files = filelib:wildcard("fluxer_relay/_build/prod/lib/fluxer_relay/ebin/*.beam"),
lists:foreach(
fun(F) ->
{ok, {M, Md5}} = beam_lib:md5(F),
Hex = binary:encode_hex(Md5, lowercase),
io:format("~s ~s ~s~n", [atom_to_list(M), binary_to_list(Hex), F])
end,
Files
),
halt().'
)"
REMOTE_MD5_LINES="$(
ssh "${SERVER}" "docker exec ${CONTAINER_ID} /opt/fluxer_relay/bin/fluxer_relay eval '
Mods = hot_reload:get_loaded_modules(),
lists:foreach(
fun(M) ->
case hot_reload:get_module_info(M) of
{ok, Info} ->
V = maps:get(loaded_md5, Info),
S = case V of
null -> \"null\";
B when is_binary(B) -> binary_to_list(B)
end,
io:format(\"~s ~s~n\", [atom_to_list(M), S]);
_ ->
ok
end
end,
Mods
),
ok.
' " | tr -d '\r'
)"
LOCAL_MD5_FILE="$(mktemp)"
REMOTE_MD5_FILE="$(mktemp)"
CHANGED_FILE_LIST="$(mktemp)"
CHANGED_MAIN_LIST="$(mktemp)"
CHANGED_SELF_LIST="$(mktemp)"
RELOAD_RESULT_MAIN="$(mktemp)"
RELOAD_RESULT_SELF="$(mktemp)"
trap 'rm -f "${LOCAL_MD5_FILE}" "${REMOTE_MD5_FILE}" "${CHANGED_FILE_LIST}" "${CHANGED_MAIN_LIST}" "${CHANGED_SELF_LIST}" "${RELOAD_RESULT_MAIN}" "${RELOAD_RESULT_SELF}"' EXIT
printf '%s' "${LOCAL_MD5_LINES}" > "${LOCAL_MD5_FILE}"
printf '%s' "${REMOTE_MD5_LINES}" > "${REMOTE_MD5_FILE}"
python3 scripts/ci/erlang_hot_reload.py diff-md5 \
"${LOCAL_MD5_FILE}" \
"${REMOTE_MD5_FILE}" \
"${CHANGED_FILE_LIST}"
mapfile -t CHANGED_FILES < "${CHANGED_FILE_LIST}"
if [ "${#CHANGED_FILES[@]}" -eq 0 ]; then
echo "No BEAM changes detected, nothing to hot-reload."
exit 0
fi
echo "Changed modules count: ${#CHANGED_FILES[@]}"
while IFS= read -r p; do
[ -n "${p}" ] || continue
m="$(basename "${p}")"
m="${m%.beam}"
if [ "${m}" = "hot_reload" ] || [ "${m}" = "hot_reload_handler" ]; then
printf '%s\n' "${p}" >> "${CHANGED_SELF_LIST}"
else
printf '%s\n' "${p}" >> "${CHANGED_MAIN_LIST}"
fi
done < "${CHANGED_FILE_LIST}"
build_json() {
python3 scripts/ci/erlang_hot_reload.py build-json "$1"
}
strict_verify() {
python3 scripts/ci/erlang_hot_reload.py verify --mode strict
}
self_verify() {
python3 scripts/ci/erlang_hot_reload.py verify --mode self
}
if [ -s "${CHANGED_SELF_LIST}" ]; then
if ! build_json "${CHANGED_SELF_LIST}" | ssh "${SERVER}" "docker exec -i ${CONTAINER_ID} curl -sS -X POST -H 'Authorization: Bearer ${RELAY_ADMIN_SECRET}' -H 'Content-Type: application/json' --data @- http://localhost:8081/_admin/reload" | tee "${RELOAD_RESULT_SELF}" | self_verify; then
echo "::group::Hot reload response (self)"
cat "${RELOAD_RESULT_SELF}" || true
echo "::endgroup::"
exit 1
fi
fi
if [ -s "${CHANGED_MAIN_LIST}" ]; then
if ! build_json "${CHANGED_MAIN_LIST}" | ssh "${SERVER}" "docker exec -i ${CONTAINER_ID} curl -sS -X POST -H 'Authorization: Bearer ${RELAY_ADMIN_SECRET}' -H 'Content-Type: application/json' --data @- http://localhost:8081/_admin/reload" | tee "${RELOAD_RESULT_MAIN}" | strict_verify; then
echo "::group::Hot reload response (main)"
cat "${RELOAD_RESULT_MAIN}" || true
echo "::endgroup::"
exit 1
fi
fi
""",
}
def main() -> int:
args = parse_step_env_args(include_server_ip=True)
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" "IMAGE_TAG=${IMAGE_TAG} STACK=${STACK} IS_CANARY=${IS_CANARY} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${IS_CANARY}" == "true" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${STACK}"
sudo chown -R "${USER}:${USER}" "/opt/${STACK}"
cd "/opt/${STACK}"
cat > compose.yaml << COMPOSEEOF
x-deploy-base: &deploy_base
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
x-healthcheck: &healthcheck
test: ['CMD', 'curl', '-f', 'http://localhost:8080/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 5s
services:
app:
image: ${IMAGE_TAG}
deploy:
<<: *deploy_base
replicas: 1
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
networks: [fluxer-shared]
healthcheck: *healthcheck
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy \
--with-registry-auth \
--detach=false \
--resolve-image never \
-c compose.yaml \
"${STACK}"
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,89 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" "IMAGE_TAG=${IMAGE_TAG} SERVICE_NAME=${SERVICE_NAME} COMPOSE_STACK=${COMPOSE_STACK} RELEASE_CHANNEL=${RELEASE_CHANNEL} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${RELEASE_CHANNEL}" == "canary" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${SERVICE_NAME}"
sudo chown -R "${USER}:${USER}" "/opt/${SERVICE_NAME}"
cd "/opt/${SERVICE_NAME}"
cat > compose.yaml << COMPOSEEOF
services:
app:
image: ${IMAGE_TAG}
command: ['pnpm', 'start']
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
- FLUXER_CONFIG__SERVICES__MEDIA_PROXY__STATIC_MODE=true
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
replicas: 2
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
labels:
- 'caddy=http://fluxerstatic.com'
- 'caddy.reverse_proxy={{upstreams 8080}}'
- 'caddy.header.X-Robots-Tag="noindex, nofollow, nosnippet, noimageindex"'
- 'caddy.header.Strict-Transport-Security="max-age=31536000; includeSubDomains; preload"'
- 'caddy.header.X-Xss-Protection="1; mode=block"'
- 'caddy.header.X-Content-Type-Options=nosniff'
- 'caddy.header.Referrer-Policy=strict-origin-when-cross-origin'
- 'caddy.header.X-Frame-Options=DENY'
- 'caddy.header.Expect-Ct="max-age=86400, report-uri=\"https://o4510149383094272.ingest.us.sentry.io/api/4510205811556352/security/?sentry_key=2670068cd12b6a62f3a30a7f0055f0f1\""'
networks:
- fluxer-shared
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8080/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy --with-registry-auth --detach=false --resolve-image never -c compose.yaml "${COMPOSE_STACK}"
REMOTE_EOF
"""
STEPS = build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
)
def main() -> int:
return run_deploy_workflow(STEPS)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_steps import ADD_KNOWN_HOSTS_SCRIPT
from ci_workflow import EnvArg, parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"install_dependencies": """
set -euo pipefail
cd fluxer_api
pnpm install --frozen-lockfile
""",
"validate_migrations": """
set -euo pipefail
cd fluxer_api
pnpm tsx scripts/CassandraMigrate.tsx check
""",
"add_known_hosts": ADD_KNOWN_HOSTS_SCRIPT,
"setup_tunnel": """
set -euo pipefail
TUNNEL_PID_FILE=/tmp/ssh-tunnel.pid
rm -f "$TUNNEL_PID_FILE"
nohup ssh -N -o ConnectTimeout=30 -o ServerAliveInterval=10 -o ServerAliveCountMax=30 -o ExitOnForwardFailure=yes -L 9042:localhost:9042 ${SERVER_USER}@${SERVER_IP} > /tmp/ssh-tunnel.log 2>&1 &
SSH_TUNNEL_PID=$!
printf '%s\n' "$SSH_TUNNEL_PID" > "$TUNNEL_PID_FILE"
printf 'SSH_TUNNEL_PID=%s\n' "$SSH_TUNNEL_PID" >> "$GITHUB_ENV"
for i in {1..30}; do
if timeout 1 bash -c "echo > /dev/tcp/localhost/9042" 2>/dev/null; then
echo "SSH tunnel established"
break
elif command -v ss >/dev/null 2>&1 && ss -tln | grep -q ":9042 "; then
echo "SSH tunnel established"
break
elif command -v netstat >/dev/null 2>&1 && netstat -tln | grep -q ":9042 "; then
echo "SSH tunnel established"
break
fi
if [ $i -eq 30 ]; then
cat /tmp/ssh-tunnel.log || true
exit 1
fi
sleep 1
done
ps -p "$SSH_TUNNEL_PID" > /dev/null || exit 1
""",
"test_connection": """
set -euo pipefail
cd fluxer_api
pnpm tsx scripts/CassandraMigrate.tsx \
--host localhost \
--port 9042 \
--username "${CASSANDRA_USERNAME}" \
--password "${CASSANDRA_PASSWORD}" \
test
""",
"run_migrations": """
set -euo pipefail
cd fluxer_api
pnpm tsx scripts/CassandraMigrate.tsx \
--host localhost \
--port 9042 \
--username "${CASSANDRA_USERNAME}" \
--password "${CASSANDRA_PASSWORD}" \
up
""",
"close_tunnel": """
set -euo pipefail
TUNNEL_PID_FILE=/tmp/ssh-tunnel.pid
if [ -n "${SSH_TUNNEL_PID:-}" ]; then
kill "$SSH_TUNNEL_PID" 2>/dev/null || true
fi
if [ -f "$TUNNEL_PID_FILE" ]; then
read -r TUNNEL_PID < "$TUNNEL_PID_FILE" || true
if [ -n "${TUNNEL_PID:-}" ]; then
kill "$TUNNEL_PID" 2>/dev/null || true
fi
fi
rm -f "$TUNNEL_PID_FILE" /tmp/ssh-tunnel.log || true
""",
}
def main() -> int:
args = parse_step_env_args(
[
EnvArg("--server-user", "SERVER_USER"),
],
include_server_ip=True,
)
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,73 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import EnvArg, parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"verify": """
set -euo pipefail
src="${SRC}"
dst="${DST}"
git fetch origin "${dst}" "${src}" --prune
# Ensure HEAD is exactly origin/src
git reset --hard "origin/${src}"
# FF-only requirement: dst must be an ancestor of src
if ! git merge-base --is-ancestor "origin/${dst}" "origin/${src}"; then
echo "::error::Cannot fast-forward: origin/${dst} is not an ancestor of origin/${src} (branches diverged)."
exit 1
fi
ahead="$(git rev-list --count "origin/${dst}..origin/${src}")"
echo "ahead=$ahead" >> "$GITHUB_OUTPUT"
{
echo "## Promote \`${src}\` → \`${dst}\` (ff-only)"
echo ""
echo "- \`${dst}\`: \`$(git rev-parse "origin/${dst}")\`"
echo "- \`${src}\`: \`$(git rev-parse "origin/${src}")\`"
echo "- Commits to promote: **${ahead}**"
echo ""
echo "### Commits"
if [ "$ahead" -eq 0 ]; then
echo "_Nothing to promote._"
else
git log --oneline --decorate "origin/${dst}..origin/${src}"
fi
} >> "$GITHUB_STEP_SUMMARY"
""",
"push": """
set -euo pipefail
dst="${DST}"
# Push src HEAD to dst (no merge commit, same SHAs)
git push origin "HEAD:refs/heads/${dst}"
""",
"dry_run": """
echo "No push performed (dry_run=${DRY_RUN}, ahead=${AHEAD})."
""",
}
def main() -> int:
args = parse_step_env_args(
[
EnvArg("--src", "SRC"),
EnvArg("--dst", "DST"),
EnvArg("--dry-run", "DRY_RUN"),
EnvArg("--ahead", "AHEAD"),
]
)
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,122 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from cli_release import (
determine_cli_version,
generate_checksums,
prepare_release_assets,
write_cli_version_outputs,
)
from ci_workflow import EnvArg, apply_env_args, build_step_parser
from ci_utils import require_env, run_step
BINARY_PREFIX = "livekitctl"
TAG_PREFIX = "livekitctl-v"
PROJECT_DIR = pathlib.Path("fluxer_devops/livekitctl")
def determine_version_step() -> None:
import os
require_env(["EVENT_NAME"])
info = determine_cli_version(
event_name=os.environ["EVENT_NAME"],
input_version=os.environ.get("INPUT_VERSION", ""),
ref_name=os.environ.get("REF_NAME", ""),
tag_prefix=TAG_PREFIX,
)
write_cli_version_outputs(info)
def build_binary_step() -> None:
from ci_utils import run_bash
run_bash(
f"""
set -euo pipefail
cd {PROJECT_DIR}
go build -ldflags=\"-s -w\" -o {BINARY_PREFIX}-${{GOOS}}-${{GOARCH}} .
"""
)
def prepare_release_assets_step(artifacts_dir: pathlib.Path, release_dir: pathlib.Path) -> None:
prepare_release_assets(
artifacts_dir=artifacts_dir,
release_dir=release_dir,
binary_prefix=BINARY_PREFIX,
)
def generate_checksums_step(release_dir: pathlib.Path) -> None:
files = release_dir.glob(f"{BINARY_PREFIX}-*")
generate_checksums(files, release_dir / "checksums.txt")
def create_tag_step() -> None:
from ci_utils import run_bash
from ci_utils import require_env
require_env(["TAG", "VERSION"])
run_bash(
"""
set -euo pipefail
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git tag -a "${TAG}" -m "Release livekitctl v${VERSION}"
git push origin "${TAG}"
"""
)
STEPS = {
"determine_version": determine_version_step,
"build_binary": build_binary_step,
"prepare_release_assets": prepare_release_assets_step,
"generate_checksums": generate_checksums_step,
"create_tag": create_tag_step,
}
ENV_ARGS = [
EnvArg("--event-name", "EVENT_NAME"),
EnvArg("--input-version", "INPUT_VERSION"),
EnvArg("--ref-name", "REF_NAME"),
EnvArg("--version", "VERSION"),
EnvArg("--tag", "TAG"),
]
def parse_args():
parser = build_step_parser(ENV_ARGS)
parser.add_argument("--artifacts-dir", default="artifacts")
parser.add_argument("--release-dir", default="release")
return parser.parse_args()
def main() -> int:
args = parse_args()
apply_env_args(args, ENV_ARGS)
if args.step == "prepare_release_assets":
prepare_release_assets_step(
pathlib.Path(args.artifacts_dir),
pathlib.Path(args.release_dir),
)
return 0
if args.step == "generate_checksums":
generate_checksums_step(pathlib.Path(args.release_dir))
return 0
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from release_workflow import build_release_steps, run_release_workflow
STEPS = build_release_steps(
title="Fluxer Relay release",
image_name_env="IMAGE_NAME",
)
def main() -> int:
return run_release_workflow(
title="Fluxer Relay release",
image_name_arg="--image-name",
image_name_env="IMAGE_NAME",
)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from release_workflow import build_release_steps, run_release_workflow
STEPS = build_release_steps(
title="Fluxer Relay Directory release",
image_name_env="IMAGE_NAME",
)
def main() -> int:
return run_release_workflow(
title="Fluxer Relay Directory release",
image_name_arg="--image-name",
image_name_env="IMAGE_NAME",
)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import EnvArg
from ci_utils import require_env, write_github_output
from release_workflow import build_release_steps, run_release_workflow
def determine_build_targets_step() -> None:
import os
require_env(["EVENT_NAME"])
if os.environ["EVENT_NAME"] == "workflow_dispatch":
write_github_output({"server": os.environ.get("BUILD_SERVER_INPUT", "")})
return
write_github_output({"server": "true"})
EXTRA_ENV_ARGS = [
EnvArg("--event-name", "EVENT_NAME"),
EnvArg("--build-server-input", "BUILD_SERVER_INPUT"),
]
STEPS = build_release_steps(
title="Fluxer Server release",
image_name_env="IMAGE_NAME_SERVER",
extra_steps={"determine_build_targets": determine_build_targets_step},
)
def main() -> int:
return run_release_workflow(
title="Fluxer Server release",
image_name_arg="--image-name-server",
image_name_env="IMAGE_NAME_SERVER",
extra_steps={"determine_build_targets": determine_build_targets_step},
extra_env_args=EXTRA_ENV_ARGS,
)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import EnvArg
from deploy_workflow import build_standard_deploy_steps, run_deploy_workflow
VALIDATE_CONFIRMATION_STEP = """
set -euo pipefail
if [ "${CONFIRMATION}" != "RESTART" ]; then
echo "::error::Confirmation failed. You must type 'RESTART' to proceed with a full restart."
echo "::error::For regular updates, use deploy-gateway.yaml instead."
exit 1
fi
"""
PUSH_AND_DEPLOY_SCRIPT = """
set -euo pipefail
docker pussh "${IMAGE_TAG}" "${SERVER}"
ssh "${SERVER}" "IMAGE_TAG=${IMAGE_TAG} SERVICE_NAME=${SERVICE_NAME} COMPOSE_STACK=${COMPOSE_STACK} RELEASE_CHANNEL=${RELEASE_CHANNEL} bash" << 'REMOTE_EOF'
set -euo pipefail
if [[ "${RELEASE_CHANNEL}" == "canary" ]]; then
CONFIG_PATH="/etc/fluxer/config.canary.json"
else
CONFIG_PATH="/etc/fluxer/config.stable.json"
fi
sudo mkdir -p "/opt/${SERVICE_NAME}"
sudo chown -R "${USER}:${USER}" "/opt/${SERVICE_NAME}"
cd "/opt/${SERVICE_NAME}"
cat > compose.yaml << COMPOSEEOF
services:
app:
image: ${IMAGE_TAG}
hostname: "{{.Node.Hostname}}-{{.Task.Slot}}"
environment:
- FLUXER_CONFIG=/etc/fluxer/config.json
- FLUXER_GATEWAY_NODE_FLAG=-sname
- FLUXER_GATEWAY_NODE_NAME=fluxer_gateway_{{.Node.ID}}_{{.Task.Slot}}
volumes:
- ${CONFIG_PATH}:/etc/fluxer/config.json:ro
deploy:
replicas: 1
endpoint_mode: dnsrr
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
order: start-first
rollback_config:
parallelism: 1
delay: 10s
labels:
- 'caddy_gw=gateway.fluxer.app'
- 'caddy_gw.reverse_proxy={{upstreams 8080}}'
networks:
- fluxer-shared
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:8080/_health']
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
networks:
fluxer-shared:
external: true
COMPOSEEOF
docker stack deploy --with-registry-auth --detach=false --resolve-image never -c compose.yaml "${COMPOSE_STACK}"
REMOTE_EOF
"""
STEPS = {
"validate_confirmation": VALIDATE_CONFIRMATION_STEP,
**build_standard_deploy_steps(
push_and_deploy_script=PUSH_AND_DEPLOY_SCRIPT,
include_build_timestamp=False,
),
}
def main() -> int:
return run_deploy_workflow(
STEPS,
env_args=[
EnvArg("--confirmation", "CONFIRMATION"),
],
)
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_steps import bot_user_id_script
from ci_workflow import EnvArg, parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"get_user_id": bot_user_id_script(),
"determine_branch": """
set -euo pipefail
if [[ -n "${INPUT_BRANCH}" ]]; then
echo "name=${INPUT_BRANCH}" >> "$GITHUB_OUTPUT"
else
echo "name=${REF_NAME}" >> "$GITHUB_OUTPUT"
fi
""",
"clone_target": """
set -euo pipefail
git clone --depth 1 "https://x-access-token:${TOKEN}@github.com/fluxerapp/fluxer_desktop.git" target || {
mkdir target
cd target
git init
git remote add origin "https://x-access-token:${TOKEN}@github.com/fluxerapp/fluxer_desktop.git"
}
""",
"configure_git": """
set -euo pipefail
cd target
git config user.name "${APP_SLUG}[bot]"
git config user.email "${USER_ID}+${APP_SLUG}[bot]@users.noreply.github.com"
""",
"checkout_or_create_branch": """
set -euo pipefail
cd target
BRANCH="${BRANCH_NAME}"
if git ls-remote --exit-code --heads origin "$BRANCH" >/dev/null 2>&1; then
git fetch origin "$BRANCH"
git checkout "$BRANCH"
else
git checkout --orphan "$BRANCH"
git rm -rf . 2>/dev/null || true
fi
""",
"sync_files": """
set -euo pipefail
find target -mindepth 1 -maxdepth 1 ! -name '.git' -exec rm -rf {} +
cp -a source/fluxer_desktop/. target/
""",
"commit_and_push": """
set -euo pipefail
cd target
BRANCH="${BRANCH_NAME}"
SOURCE_SHA="$(git -C ../source rev-parse --short HEAD)"
git add -A
if git diff --cached --quiet; then
echo "No changes to commit"
exit 0
fi
git commit -m "Sync from fluxerapp/fluxer @ ${SOURCE_SHA}"
git push origin "HEAD:refs/heads/${BRANCH}"
echo "Synced to fluxerapp/fluxer_desktop:${BRANCH}"
""",
"summary": """
set -euo pipefail
{
echo "## Desktop Sync Complete"
echo ""
echo "- **Source:** \`fluxerapp/fluxer:${BRANCH_NAME}\`"
echo "- **Destination:** \`fluxerapp/fluxer_desktop:${BRANCH_NAME}\`"
echo "- **Commit:** \`$(git -C source rev-parse --short HEAD)\`"
} >> "$GITHUB_STEP_SUMMARY"
""",
}
def main() -> int:
args = parse_step_env_args(
[
EnvArg("--app-slug", "APP_SLUG"),
EnvArg("--token", "TOKEN"),
EnvArg("--user-id", "USER_ID"),
EnvArg("--input-branch", "INPUT_BRANCH"),
EnvArg("--ref-name", "REF_NAME"),
EnvArg("--branch-name", "BRANCH_NAME"),
]
)
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_steps import INSTALL_RCLONE_SCRIPT, rclone_config_script
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"install_rclone": INSTALL_RCLONE_SCRIPT,
"push": rclone_config_script(
endpoint="$RCLONE_ENDPOINT",
acl="private",
expand_vars=True,
)
+ """
mkdir -p "$RCLONE_SOURCE_DIR"
rclone sync "$RCLONE_SOURCE" "$RCLONE_REMOTE:$RCLONE_BUCKET" --create-empty-src-dirs --exclude "assets/**"
""",
}
def main() -> int:
args = parse_step_env_args()
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,281 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS = {
"set_temp_paths": """
set -euo pipefail
: "${RUNNER_TEMP:?RUNNER_TEMP is not set}"
echo "WORKDIR=$RUNNER_TEMP/cassandra-restore-test" >> "$GITHUB_ENV"
""",
"pre_clean": """
set -euo pipefail
docker rm -f "${CASS_CONTAINER}" "${UTIL_CONTAINER}" 2>/dev/null || true
docker volume rm "${CASS_VOLUME}" 2>/dev/null || true
docker volume rm "${BACKUP_VOLUME}" 2>/dev/null || true
rm -rf "${WORKDIR}" 2>/dev/null || true
""",
"install_tools": """
set -euo pipefail
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends rclone age ca-certificates
""",
"fetch_backup": """
set -euo pipefail
rm -rf "$WORKDIR"
mkdir -p "$WORKDIR"
export RCLONE_CONFIG_B2S3_TYPE=s3
export RCLONE_CONFIG_B2S3_PROVIDER=Other
export RCLONE_CONFIG_B2S3_ACCESS_KEY_ID="${B2_KEY_ID}"
export RCLONE_CONFIG_B2S3_SECRET_ACCESS_KEY="${B2_APPLICATION_KEY}"
export RCLONE_CONFIG_B2S3_ENDPOINT="https://s3.eu-central-003.backblazeb2.com"
export RCLONE_CONFIG_B2S3_REGION="eu-central-003"
export RCLONE_CONFIG_B2S3_FORCE_PATH_STYLE=true
LATEST_BACKUP="$(
rclone lsf "B2S3:fluxer" --recursive --files-only --fast-list \
| grep -E '(^|/)cassandra-backup-[0-9]{8}-[0-9]{6}\.tar\.age$' \
| sort -r \
| head -n 1
)"
if [ -z "${LATEST_BACKUP}" ]; then
echo "Error: No backup found in bucket"
exit 1
fi
echo "LATEST_BACKUP=${LATEST_BACKUP}" >> "$GITHUB_ENV"
base="$(basename "${LATEST_BACKUP}")"
ts="${base#cassandra-backup-}"
ts="${ts%.tar.age}"
if ! [[ "$ts" =~ ^[0-9]{8}-[0-9]{6}$ ]]; then
echo "Error: Could not extract timestamp from backup filename: ${base}"
exit 1
fi
BACKUP_EPOCH="$(date -u -d "${ts:0:8} ${ts:9:2}:${ts:11:2}:${ts:13:2}" +%s)"
CURRENT_EPOCH="$(date -u +%s)"
AGE_HOURS=$(( (CURRENT_EPOCH - BACKUP_EPOCH) / 3600 ))
echo "Backup age: ${AGE_HOURS} hours"
if [ "${AGE_HOURS}" -ge 3 ]; then
echo "Error: Latest backup is ${AGE_HOURS} hours old (threshold: 3 hours)"
exit 1
fi
rclone copyto "B2S3:fluxer/${LATEST_BACKUP}" "${WORKDIR}/backup.tar.age" --fast-list
umask 077
printf '%s' "${AGE_PRIVATE_KEY}" > "${WORKDIR}/age.key"
docker volume create "${BACKUP_VOLUME}"
age -d -i "${WORKDIR}/age.key" "${WORKDIR}/backup.tar.age" \
| docker run --rm -i \
-v "${BACKUP_VOLUME}:/backup" \
--entrypoint bash \
"${CASSANDRA_IMAGE}" -lc '
set -euo pipefail
rm -rf /backup/*
mkdir -p /backup/_tmp
tar -C /backup/_tmp -xf -
top="$(find /backup/_tmp -maxdepth 1 -mindepth 1 -type d -name "cassandra-backup-*" | head -n 1 || true)"
if [ -n "$top" ] && [ -f "$top/schema.cql" ]; then
cp -a "$top"/. /backup/
elif [ -f /backup/_tmp/schema.cql ]; then
cp -a /backup/_tmp/. /backup/
else
echo "Error: schema.cql not found after extraction"
find /backup/_tmp -maxdepth 3 -type f -print | sed -n "1,80p" || true
exit 1
fi
rm -rf /backup/_tmp
'
docker run --rm \
-v "${BACKUP_VOLUME}:/backup:ro" \
--entrypoint bash \
"${CASSANDRA_IMAGE}" -lc '
set -euo pipefail
test -f /backup/schema.cql
echo "Extracted backup layout (top 3 levels):"
find /backup -maxdepth 3 -type d -print | sed -n "1,200p" || true
echo "Sample SSTables (*Data.db):"
find /backup -type f -name "*Data.db" | sed -n "1,30p" || true
'
""",
"create_data_volume": """
set -euo pipefail
docker volume create "${CASS_VOLUME}"
""",
"restore_keyspaces": """
set -euo pipefail
docker run --rm \
--name "${UTIL_CONTAINER}" \
-v "${CASS_VOLUME}:/var/lib/cassandra" \
-v "${BACKUP_VOLUME}:/backup:ro" \
--entrypoint bash \
"${CASSANDRA_IMAGE}" -lc '
set -euo pipefail
shopt -s nullglob
BASE=/var/lib/cassandra
DATA_DIR="$BASE/data"
mkdir -p "$DATA_DIR" "$BASE/commitlog" "$BASE/hints" "$BASE/saved_caches"
ROOT=/backup
if [ -d "$ROOT/cassandra_data" ]; then ROOT="$ROOT/cassandra_data"; fi
if [ -d "$ROOT/data" ]; then ROOT="$ROOT/data"; fi
echo "Using backup ROOT=$ROOT"
echo "Restoring into DATA_DIR=$DATA_DIR"
restored=0
for keyspace_dir in "$ROOT"/*/; do
[ -d "$keyspace_dir" ] || continue
ks="$(basename "$keyspace_dir")"
if [ "$ks" = "system_schema" ] || ! [[ "$ks" =~ ^system ]]; then
echo "Restoring keyspace: $ks"
rm -rf "$DATA_DIR/$ks"
cp -a "$keyspace_dir" "$DATA_DIR/"
restored=$((restored + 1))
fi
done
if [ "$restored" -le 0 ]; then
echo "Error: No keyspaces restored from backup root: $ROOT"
echo "Debug: listing $ROOT:"
ls -la "$ROOT" || true
find "$ROOT" -maxdepth 2 -type d -print | sed -n "1,100p" || true
exit 1
fi
promoted=0
for ks_dir in "$DATA_DIR"/*/; do
[ -d "$ks_dir" ] || continue
ks="$(basename "$ks_dir")"
if [ "$ks" != "system_schema" ] && [[ "$ks" =~ ^system ]]; then
continue
fi
for table_dir in "$ks_dir"*/; do
[ -d "$table_dir" ] || continue
snap_root="$table_dir/snapshots"
[ -d "$snap_root" ] || continue
latest_snap="$(ls -1d "$snap_root"/*/ 2>/dev/null | sort -r | head -n 1 || true)"
[ -n "$latest_snap" ] || continue
files=( "$latest_snap"* )
if [ "${#files[@]}" -gt 0 ]; then
cp -av "${files[@]}" "$table_dir"
promoted=$((promoted + $(ls -1 "$latest_snap"/*Data.db 2>/dev/null | wc -l || true)))
fi
done
done
chown -R cassandra:cassandra "$BASE"
echo "Promoted Data.db files: $promoted"
if [ "$promoted" -le 0 ]; then
echo "Error: No *Data.db files were promoted out of snapshots"
echo "Debug: first snapshot dirs found:"
find "$DATA_DIR" -type d -path "*/snapshots/*" | sed -n "1,50p" || true
exit 1
fi
'
""",
"start_cassandra": """
set -euo pipefail
docker run -d \
--name "${CASS_CONTAINER}" \
-v "${CASS_VOLUME}:/var/lib/cassandra" \
-e MAX_HEAP_SIZE="${MAX_HEAP_SIZE}" \
-e HEAP_NEWSIZE="${HEAP_NEWSIZE}" \
-e JVM_OPTS="-Dcassandra.disable_mlock=true" \
"${CASSANDRA_IMAGE}"
for i in $(seq 1 150); do
status="$(docker inspect -f '{{.State.Status}}' "${CASS_CONTAINER}" 2>/dev/null || true)"
if [ "${status}" != "running" ]; then
docker inspect "${CASS_CONTAINER}" --format 'ExitCode={{.State.ExitCode}} OOMKilled={{.State.OOMKilled}} Error={{.State.Error}}' || true
docker logs --tail 300 "${CASS_CONTAINER}" || true
exit 1
fi
if docker exec "${CASS_CONTAINER}" cqlsh -e "SELECT now() FROM system.local;" >/dev/null 2>&1; then
break
fi
sleep 2
done
docker exec "${CASS_CONTAINER}" cqlsh -e "SELECT now() FROM system.local;" >/dev/null 2>&1
""",
"verify_data": """
set -euo pipefail
USER_COUNT=""
for i in $(seq 1 20); do
USER_COUNT="$(
docker exec "${CASS_CONTAINER}" cqlsh -e "SELECT COUNT(*) FROM fluxer.users;" 2>/dev/null \
| awk "/^[[:space:]]*[0-9]+[[:space:]]*$/ {print \$1; exit}" || true
)"
if [ -n "${USER_COUNT}" ]; then
break
fi
sleep 2
done
if [ -n "${USER_COUNT}" ] && [ "${USER_COUNT}" -gt 0 ] 2>/dev/null; then
echo "Backup restore verification passed"
else
echo "Backup restore verification failed"
docker logs --tail 300 "${CASS_CONTAINER}" || true
exit 1
fi
""",
"cleanup": """
set -euo pipefail
docker rm -f "${CASS_CONTAINER}" 2>/dev/null || true
docker volume rm "${CASS_VOLUME}" 2>/dev/null || true
docker volume rm "${BACKUP_VOLUME}" 2>/dev/null || true
rm -rf "${WORKDIR}" 2>/dev/null || true
""",
"report_status": """
set -euo pipefail
LATEST_BACKUP_NAME="${LATEST_BACKUP:-unknown}"
if [ "${JOB_STATUS}" = "success" ]; then
echo "Backup ${LATEST_BACKUP_NAME} is valid and restorable"
else
echo "Backup ${LATEST_BACKUP_NAME} test failed"
fi
""",
}
def main() -> int:
args = parse_step_env_args()
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python3
import pathlib
import sys
sys.path.append(str(pathlib.Path(__file__).resolve().parents[1]))
from ci_workflow import parse_step_env_args
from ci_utils import run_step
STEPS: dict[str, str] = {
"download": """
set -euo pipefail
curl -fsSL https://raw.githubusercontent.com/tailscale/tailscale/refs/heads/main/words/scales.txt -o /tmp/scales.txt
curl -fsSL https://raw.githubusercontent.com/tailscale/tailscale/refs/heads/main/words/tails.txt -o /tmp/tails.txt
""",
"check_changes": """
set -euo pipefail
if ! diff -q /tmp/scales.txt fluxer_api/src/words/scales.txt > /dev/null 2>&1 || \
! diff -q /tmp/tails.txt fluxer_api/src/words/tails.txt > /dev/null 2>&1; then
printf 'changes_detected=true\n' >> "$GITHUB_OUTPUT"
echo "Changes detected in word lists"
else
printf 'changes_detected=false\n' >> "$GITHUB_OUTPUT"
echo "No changes detected in word lists"
fi
""",
"update": """
set -euo pipefail
cp /tmp/scales.txt fluxer_api/src/words/scales.txt
cp /tmp/tails.txt fluxer_api/src/words/tails.txt
""",
"no_changes": """
echo "Word lists are already up to date."
""",
}
def main() -> int:
args = parse_step_env_args()
run_step(STEPS, args.step)
return 0
if __name__ == "__main__":
raise SystemExit(main())