mirror of
https://github.com/versia-pub/server.git
synced 2025-12-06 08:28:19 +01:00
feat: ✨ Split off queue workers into a separate worker process
This commit is contained in:
parent
0b3e74107e
commit
1b98381242
9
.editorconfig
Normal file
9
.editorconfig
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
charset = utf-8
|
||||||
|
end_of_line = lf
|
||||||
|
indent_style = space
|
||||||
|
insert_final_newline = true
|
||||||
|
tab_width = 4
|
||||||
|
trim_trailing_whitespace = true
|
||||||
44
.github/workflows/check.yml
vendored
44
.github/workflows/check.yml
vendored
|
|
@ -1,31 +1,31 @@
|
||||||
name: Check Types
|
name: Check Types
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: ["*"]
|
branches: ["*"]
|
||||||
pull_request:
|
pull_request:
|
||||||
# The branches below must be a subset of the branches above
|
# The branches below must be a subset of the branches above
|
||||||
branches: ["main"]
|
branches: ["main"]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Setup Bun
|
- name: Setup Bun
|
||||||
uses: oven-sh/setup-bun@v2
|
uses: oven-sh/setup-bun@v2
|
||||||
|
|
||||||
- name: Install NPM packages
|
- name: Install NPM packages
|
||||||
run: |
|
run: |
|
||||||
bun install
|
bun install
|
||||||
|
|
||||||
- name: Run typechecks
|
- name: Run typechecks
|
||||||
run: |
|
run: |
|
||||||
bun run check
|
bun run check
|
||||||
|
|
|
||||||
74
.github/workflows/docker-publish.yml
vendored
74
.github/workflows/docker-publish.yml
vendored
|
|
@ -1,74 +0,0 @@
|
||||||
name: Docker Build
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ "main" ]
|
|
||||||
# Publish semver tags as releases.
|
|
||||||
tags: [ 'v*.*.*' ]
|
|
||||||
pull_request:
|
|
||||||
branches: [ "main" ]
|
|
||||||
|
|
||||||
env:
|
|
||||||
# Use docker.io for Docker Hub if empty
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
# github.repository as <account>/<repo>
|
|
||||||
IMAGE_NAME: ${{ github.repository }}
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
# This is used to complete the identity challenge
|
|
||||||
# with sigstore/fulcio when running outside of PRs.
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
with:
|
|
||||||
platforms: all
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3 # v3.0.0
|
|
||||||
|
|
||||||
- name: Log into registry ${{ env.REGISTRY }}
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v3 # v3.0.0
|
|
||||||
with:
|
|
||||||
registry: ${{ env.REGISTRY }}
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Extract Docker metadata
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5 # v5.0.0
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
|
|
||||||
- name: Get the commit hash
|
|
||||||
run: echo "GIT_COMMIT=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build and push Docker image
|
|
||||||
id: build-and-push
|
|
||||||
uses: docker/build-push-action@v5 # v5.0.0
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
build-args: |
|
|
||||||
GIT_COMMIT=${{ env.GIT_COMMIT }}
|
|
||||||
provenance: mode=max
|
|
||||||
sbom: true
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
cache-from: type=gha
|
|
||||||
cache-to: type=gha,mode=max
|
|
||||||
72
.github/workflows/docker-server.yml
vendored
Normal file
72
.github/workflows/docker-server.yml
vendored
Normal file
|
|
@ -0,0 +1,72 @@
|
||||||
|
name: Build Server Docker Image
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main"]
|
||||||
|
# Publish semver tags as releases.
|
||||||
|
tags: ["v*.*.*"]
|
||||||
|
pull_request:
|
||||||
|
branches: ["main"]
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Use docker.io for Docker Hub if empty
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
# github.repository as <account>/<repo>
|
||||||
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
# This is used to complete the identity challenge
|
||||||
|
# with sigstore/fulcio when running outside of PRs.
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: all
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3 # v3.0.0
|
||||||
|
|
||||||
|
- name: Log into registry ${{ env.REGISTRY }}
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3 # v3.0.0
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract Docker metadata
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5 # v5.0.0
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
- name: Get the commit hash
|
||||||
|
run: echo "GIT_COMMIT=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
id: build-and-push
|
||||||
|
uses: docker/build-push-action@v5 # v5.0.0
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
build-args: |
|
||||||
|
GIT_COMMIT=${{ env.GIT_COMMIT }}
|
||||||
|
provenance: mode=max
|
||||||
|
sbom: true
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
73
.github/workflows/docker-worker.yml
vendored
Normal file
73
.github/workflows/docker-worker.yml
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
||||||
|
name: Build Worker Docker Image
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["main"]
|
||||||
|
# Publish semver tags as releases.
|
||||||
|
tags: ["v*.*.*"]
|
||||||
|
pull_request:
|
||||||
|
branches: ["main"]
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Use docker.io for Docker Hub if empty
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
# github.repository as <account>/<repo>
|
||||||
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
# This is used to complete the identity challenge
|
||||||
|
# with sigstore/fulcio when running outside of PRs.
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
with:
|
||||||
|
platforms: all
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3 # v3.0.0
|
||||||
|
|
||||||
|
- name: Log into registry ${{ env.REGISTRY }}
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3 # v3.0.0
|
||||||
|
with:
|
||||||
|
registry: ${{ env.REGISTRY }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract Docker metadata
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5 # v5.0.0
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
- name: Get the commit hash
|
||||||
|
run: echo "GIT_COMMIT=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Build and push Docker image
|
||||||
|
id: build-and-push
|
||||||
|
uses: docker/build-push-action@v5 # v5.0.0
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
build-args: |
|
||||||
|
GIT_COMMIT=${{ env.GIT_COMMIT }}
|
||||||
|
file: ./Worker.Dockerfile
|
||||||
|
provenance: mode=max
|
||||||
|
sbom: true
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
76
.github/workflows/docs.yml
vendored
76
.github/workflows/docs.yml
vendored
|
|
@ -1,56 +1,56 @@
|
||||||
name: Deploy Docs to GitHub Pages
|
name: Deploy Docs to GitHub Pages
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [main]
|
branches: [main]
|
||||||
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
pages: write
|
pages: write
|
||||||
id-token: write
|
id-token: write
|
||||||
|
|
||||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||||
concurrency:
|
concurrency:
|
||||||
group: pages
|
group: pages
|
||||||
cancel-in-progress: false
|
cancel-in-progress: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- uses: oven-sh/setup-bun@v2
|
- uses: oven-sh/setup-bun@v2
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
|
|
||||||
uses: actions/configure-pages@v4
|
uses: actions/configure-pages@v4
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: bun install
|
run: bun install
|
||||||
|
|
||||||
- name: Build with VitePress
|
- name: Build with VitePress
|
||||||
run: bun run docs:build
|
run: bun run docs:build
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@v3
|
uses: actions/upload-pages-artifact@v3
|
||||||
with:
|
with:
|
||||||
path: docs/.vitepress/dist
|
path: docs/.vitepress/dist
|
||||||
|
|
||||||
# Deployment job
|
# Deployment job
|
||||||
deploy:
|
deploy:
|
||||||
environment:
|
environment:
|
||||||
name: github-pages
|
name: github-pages
|
||||||
url: ${{ steps.deployment.outputs.page_url }}
|
url: ${{ steps.deployment.outputs.page_url }}
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Deploy
|
name: Deploy
|
||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v4
|
uses: actions/deploy-pages@v4
|
||||||
|
|
|
||||||
44
.github/workflows/lint.yml
vendored
44
.github/workflows/lint.yml
vendored
|
|
@ -1,31 +1,31 @@
|
||||||
name: Lint & Format
|
name: Lint & Format
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: ["*"]
|
branches: ["*"]
|
||||||
pull_request:
|
pull_request:
|
||||||
# The branches below must be a subset of the branches above
|
# The branches below must be a subset of the branches above
|
||||||
branches: ["main"]
|
branches: ["main"]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Setup Bun
|
- name: Setup Bun
|
||||||
uses: oven-sh/setup-bun@v2
|
uses: oven-sh/setup-bun@v2
|
||||||
|
|
||||||
- name: Install NPM packages
|
- name: Install NPM packages
|
||||||
run: |
|
run: |
|
||||||
bun install
|
bun install
|
||||||
|
|
||||||
- name: Run linting
|
- name: Run linting
|
||||||
run: |
|
run: |
|
||||||
bunx @biomejs/biome ci .
|
bunx @biomejs/biome ci .
|
||||||
|
|
|
||||||
8
.github/workflows/mirror.yml
vendored
8
.github/workflows/mirror.yml
vendored
|
|
@ -2,7 +2,7 @@ name: Mirror to Codeberg
|
||||||
on: [push]
|
on: [push]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mirror:
|
mirror:
|
||||||
name: Mirror
|
name: Mirror
|
||||||
uses: versia-pub/.github/.github/workflows/mirror.yml@main
|
uses: versia-pub/.github/.github/workflows/mirror.yml@main
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
|
||||||
40
.github/workflows/nix-flake.yml
vendored
40
.github/workflows/nix-flake.yml
vendored
|
|
@ -1,25 +1,25 @@
|
||||||
name: Nix Build
|
name: Nix Build
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
branches: ["*"]
|
branches: ["*"]
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check:
|
check:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
id-token: "write"
|
id-token: "write"
|
||||||
contents: "read"
|
contents: "read"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: DeterminateSystems/nix-installer-action@main
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
with:
|
with:
|
||||||
extra-conf: accept-flake-config = true
|
extra-conf: accept-flake-config = true
|
||||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
- uses: DeterminateSystems/flake-checker-action@main
|
- uses: DeterminateSystems/flake-checker-action@main
|
||||||
- name: Build default package
|
- name: Build default package
|
||||||
run: nix build .
|
run: nix build .
|
||||||
- name: Check flakes
|
- name: Check flakes
|
||||||
run: nix flake check --allow-import-from-derivation
|
run: nix flake check --allow-import-from-derivation
|
||||||
|
|
|
||||||
78
.github/workflows/staging.yml
vendored
78
.github/workflows/staging.yml
vendored
|
|
@ -1,50 +1,50 @@
|
||||||
name: Staging build bundle
|
name: Staging build bundle
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: ["staging"]
|
branches: ["staging"]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Setup Bun
|
- name: Setup Bun
|
||||||
uses: oven-sh/setup-bun@v2
|
uses: oven-sh/setup-bun@v2
|
||||||
|
|
||||||
- name: Install NPM packages
|
- name: Install NPM packages
|
||||||
run: |
|
run: |
|
||||||
bun install
|
bun install
|
||||||
|
|
||||||
- name: Build dist
|
- name: Build dist
|
||||||
run: |
|
run: |
|
||||||
bun run build
|
bun run build
|
||||||
|
|
||||||
- name: Bundle
|
- name: Bundle
|
||||||
run: |
|
run: |
|
||||||
mkdir bundle
|
mkdir bundle
|
||||||
cp -r dist bundle/
|
cp -r dist bundle/
|
||||||
cp -r config bundle/
|
cp -r config bundle/
|
||||||
cp -r docs bundle/
|
cp -r docs bundle/
|
||||||
cp -r CODE_OF_CONDUCT.md bundle/
|
cp -r CODE_OF_CONDUCT.md bundle/
|
||||||
cp -r CONTRIBUTING.md bundle/
|
cp -r CONTRIBUTING.md bundle/
|
||||||
cp -r README.md bundle/
|
cp -r README.md bundle/
|
||||||
cp -r flake.nix bundle/
|
cp -r flake.nix bundle/
|
||||||
cp -r shell.nix bundle/
|
cp -r shell.nix bundle/
|
||||||
cp -r flake.lock bundle/
|
cp -r flake.lock bundle/
|
||||||
cp -r LICENSE bundle/
|
cp -r LICENSE bundle/
|
||||||
cp -r SECURITY.md bundle/
|
cp -r SECURITY.md bundle/
|
||||||
tar cfJ archive.tar.xz bundle/
|
tar cfJ archive.tar.xz bundle/
|
||||||
|
|
||||||
- name: Upload
|
- name: Upload
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: staging-dist
|
name: staging-dist
|
||||||
path: archive.tar.xz
|
path: archive.tar.xz
|
||||||
|
|
|
||||||
100
.github/workflows/tests.yml
vendored
100
.github/workflows/tests.yml
vendored
|
|
@ -1,59 +1,59 @@
|
||||||
name: Tests
|
name: Tests
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: ["*"]
|
branches: ["*"]
|
||||||
pull_request:
|
pull_request:
|
||||||
# The branches below must be a subset of the branches above
|
# The branches below must be a subset of the branches above
|
||||||
branches: ["main"]
|
branches: ["main"]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
services:
|
services:
|
||||||
postgres:
|
postgres:
|
||||||
image: ghcr.io/versia-pub/postgres:main
|
image: ghcr.io/versia-pub/postgres:main
|
||||||
ports:
|
ports:
|
||||||
- 5432:5432
|
- 5432:5432
|
||||||
env:
|
env:
|
||||||
POSTGRES_DB: versia
|
POSTGRES_DB: versia
|
||||||
POSTGRES_USER: versia
|
POSTGRES_USER: versia
|
||||||
POSTGRES_PASSWORD: versia
|
POSTGRES_PASSWORD: versia
|
||||||
volumes:
|
volumes:
|
||||||
- versia-data:/var/lib/postgresql/data
|
- versia-data:/var/lib/postgresql/data
|
||||||
options: --health-cmd pg_isready
|
options: --health-cmd pg_isready
|
||||||
--health-interval 10s
|
--health-interval 10s
|
||||||
--health-timeout 5s
|
--health-timeout 5s
|
||||||
--health-retries 5
|
--health-retries 5
|
||||||
redis:
|
redis:
|
||||||
image: redis:latest
|
image: redis:latest
|
||||||
ports:
|
ports:
|
||||||
- 6379:6379
|
- 6379:6379
|
||||||
options: --health-cmd "redis-cli ping"
|
options: --health-cmd "redis-cli ping"
|
||||||
--health-interval 10s
|
--health-interval 10s
|
||||||
--health-timeout 5s
|
--health-timeout 5s
|
||||||
--health-retries 5
|
--health-retries 5
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
security-events: write
|
security-events: write
|
||||||
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
- name: Setup Bun
|
- name: Setup Bun
|
||||||
uses: oven-sh/setup-bun@v2
|
uses: oven-sh/setup-bun@v2
|
||||||
|
|
||||||
- name: Install NPM packages
|
- name: Install NPM packages
|
||||||
run: |
|
run: |
|
||||||
bun install
|
bun install
|
||||||
|
|
||||||
- name: Move workflow config to config folder
|
- name: Move workflow config to config folder
|
||||||
run: |
|
run: |
|
||||||
mv .github/config.workflow.toml config/config.toml
|
mv .github/config.workflow.toml config/config.toml
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
bun run test
|
bun run test
|
||||||
|
|
|
||||||
53
Worker.Dockerfile
Normal file
53
Worker.Dockerfile
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
# Node is required for building the project
|
||||||
|
FROM imbios/bun-node:1-20-alpine AS base
|
||||||
|
|
||||||
|
RUN apk add --no-cache libstdc++
|
||||||
|
|
||||||
|
# Install dependencies into temp directory
|
||||||
|
# This will cache them and speed up future builds
|
||||||
|
FROM base AS install
|
||||||
|
|
||||||
|
RUN mkdir -p /temp
|
||||||
|
COPY . /temp
|
||||||
|
WORKDIR /temp
|
||||||
|
RUN bun install --production
|
||||||
|
|
||||||
|
FROM base AS build
|
||||||
|
|
||||||
|
# Copy the project
|
||||||
|
RUN mkdir -p /temp
|
||||||
|
COPY . /temp
|
||||||
|
# Copy dependencies
|
||||||
|
COPY --from=install /temp/node_modules /temp/node_modules
|
||||||
|
|
||||||
|
# Build the project
|
||||||
|
WORKDIR /temp
|
||||||
|
RUN bun run build:worker
|
||||||
|
WORKDIR /temp/dist
|
||||||
|
|
||||||
|
# Copy production dependencies and source code into final image
|
||||||
|
FROM oven/bun:1.1.36-alpine
|
||||||
|
|
||||||
|
# Install libstdc++ for Bun and create app directory
|
||||||
|
RUN apk add --no-cache libstdc++ && \
|
||||||
|
mkdir -p /app
|
||||||
|
|
||||||
|
COPY --from=build /temp/dist /app/dist
|
||||||
|
COPY entrypoint.sh /app
|
||||||
|
|
||||||
|
LABEL org.opencontainers.image.authors="Gaspard Wierzbinski (https://cpluspatch.dev)"
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/versia-pub/server"
|
||||||
|
LABEL org.opencontainers.image.vendor="Versia Pub"
|
||||||
|
LABEL org.opencontainers.image.licenses="AGPL-3.0-or-later"
|
||||||
|
LABEL org.opencontainers.image.title="Versia Server Worker"
|
||||||
|
LABEL org.opencontainers.image.description="Versia Server Worker Docker image"
|
||||||
|
|
||||||
|
# Set current Git commit hash as an environment variable
|
||||||
|
ARG GIT_COMMIT
|
||||||
|
ENV GIT_COMMIT=$GIT_COMMIT
|
||||||
|
|
||||||
|
# CD to app
|
||||||
|
WORKDIR /app/dist
|
||||||
|
ENV NODE_ENV=production
|
||||||
|
# Run migrations and start the server
|
||||||
|
CMD [ "bun", "run", "index.js" ]
|
||||||
|
|
@ -2,7 +2,7 @@ import { apiRoute, applyConfig } from "@/api";
|
||||||
import { createRoute } from "@hono/zod-openapi";
|
import { createRoute } from "@hono/zod-openapi";
|
||||||
import type { Entity } from "@versia/federation/types";
|
import type { Entity } from "@versia/federation/types";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
import { InboxJobType, inboxQueue } from "~/worker";
|
import { InboxJobType, inboxQueue } from "~/classes/queues/inbox";
|
||||||
|
|
||||||
export const meta = applyConfig({
|
export const meta = applyConfig({
|
||||||
auth: {
|
auth: {
|
||||||
|
|
|
||||||
|
|
@ -2,8 +2,8 @@ import { apiRoute, applyConfig } from "@/api";
|
||||||
import { createRoute } from "@hono/zod-openapi";
|
import { createRoute } from "@hono/zod-openapi";
|
||||||
import type { Entity } from "@versia/federation/types";
|
import type { Entity } from "@versia/federation/types";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
|
import { InboxJobType, inboxQueue } from "~/classes/queues/inbox";
|
||||||
import { ErrorSchema } from "~/types/api";
|
import { ErrorSchema } from "~/types/api";
|
||||||
import { InboxJobType, inboxQueue } from "~/worker";
|
|
||||||
|
|
||||||
export const meta = applyConfig({
|
export const meta = applyConfig({
|
||||||
auth: {
|
auth: {
|
||||||
|
|
|
||||||
26
build-worker.ts
Normal file
26
build-worker.ts
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
import { $ } from "bun";
|
||||||
|
import ora from "ora";
|
||||||
|
|
||||||
|
const buildSpinner = ora("Building").start();
|
||||||
|
|
||||||
|
await $`rm -rf dist && mkdir dist`;
|
||||||
|
|
||||||
|
await Bun.build({
|
||||||
|
entrypoints: ["entrypoints/worker/index.ts"],
|
||||||
|
outdir: "dist",
|
||||||
|
target: "bun",
|
||||||
|
splitting: true,
|
||||||
|
minify: false,
|
||||||
|
}).then((output) => {
|
||||||
|
if (!output.success) {
|
||||||
|
console.error(output.logs);
|
||||||
|
throw new Error("Build failed");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
buildSpinner.text = "Transforming";
|
||||||
|
|
||||||
|
// Copy Drizzle migrations to dist
|
||||||
|
await $`cp -r drizzle dist/drizzle`;
|
||||||
|
|
||||||
|
buildSpinner.stop();
|
||||||
|
|
@ -41,7 +41,7 @@ import {
|
||||||
parseTextMentions,
|
parseTextMentions,
|
||||||
} from "~/classes/functions/status";
|
} from "~/classes/functions/status";
|
||||||
import { config } from "~/packages/config-manager";
|
import { config } from "~/packages/config-manager";
|
||||||
import { DeliveryJobType, deliveryQueue } from "~/worker.ts";
|
import { DeliveryJobType, deliveryQueue } from "../queues/delivery.ts";
|
||||||
import { Application } from "./application.ts";
|
import { Application } from "./application.ts";
|
||||||
import { Attachment } from "./attachment.ts";
|
import { Attachment } from "./attachment.ts";
|
||||||
import { BaseInterface } from "./base.ts";
|
import { BaseInterface } from "./base.ts";
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ import { findManyUsers } from "~/classes/functions/user";
|
||||||
import { searchManager } from "~/classes/search/search-manager";
|
import { searchManager } from "~/classes/search/search-manager";
|
||||||
import { type Config, config } from "~/packages/config-manager";
|
import { type Config, config } from "~/packages/config-manager";
|
||||||
import type { KnownEntity } from "~/types/api.ts";
|
import type { KnownEntity } from "~/types/api.ts";
|
||||||
import { DeliveryJobType, deliveryQueue } from "~/worker.ts";
|
import { DeliveryJobType, deliveryQueue } from "../queues/delivery.ts";
|
||||||
import { BaseInterface } from "./base.ts";
|
import { BaseInterface } from "./base.ts";
|
||||||
import { Emoji } from "./emoji.ts";
|
import { Emoji } from "./emoji.ts";
|
||||||
import { Instance } from "./instance.ts";
|
import { Instance } from "./instance.ts";
|
||||||
|
|
|
||||||
20
classes/queues/delivery.ts
Normal file
20
classes/queues/delivery.ts
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
import { Queue } from "bullmq";
|
||||||
|
import type { KnownEntity } from "~/types/api";
|
||||||
|
import { connection } from "~/utils/redis.ts";
|
||||||
|
|
||||||
|
export enum DeliveryJobType {
|
||||||
|
FederateEntity = "federateEntity",
|
||||||
|
}
|
||||||
|
|
||||||
|
export type DeliveryJobData = {
|
||||||
|
entity: KnownEntity;
|
||||||
|
recipientId: string;
|
||||||
|
senderId: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const deliveryQueue = new Queue<DeliveryJobData, void, DeliveryJobType>(
|
||||||
|
"delivery",
|
||||||
|
{
|
||||||
|
connection,
|
||||||
|
},
|
||||||
|
);
|
||||||
17
classes/queues/fetch.ts
Normal file
17
classes/queues/fetch.ts
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
import { Queue } from "bullmq";
|
||||||
|
import { connection } from "~/utils/redis.ts";
|
||||||
|
|
||||||
|
export enum FetchJobType {
|
||||||
|
Instance = "instance",
|
||||||
|
User = "user",
|
||||||
|
Note = "user",
|
||||||
|
}
|
||||||
|
|
||||||
|
export type FetchJobData = {
|
||||||
|
uri: string;
|
||||||
|
refetcher?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const fetchQueue = new Queue<FetchJobData, void, FetchJobType>("fetch", {
|
||||||
|
connection,
|
||||||
|
});
|
||||||
31
classes/queues/inbox.ts
Normal file
31
classes/queues/inbox.ts
Normal file
|
|
@ -0,0 +1,31 @@
|
||||||
|
import type { Entity } from "@versia/federation/types";
|
||||||
|
import { Queue } from "bullmq";
|
||||||
|
import type { SocketAddress } from "bun";
|
||||||
|
import { connection } from "~/utils/redis.ts";
|
||||||
|
|
||||||
|
export enum InboxJobType {
|
||||||
|
ProcessEntity = "processEntity",
|
||||||
|
}
|
||||||
|
|
||||||
|
export type InboxJobData = {
|
||||||
|
data: Entity;
|
||||||
|
headers: {
|
||||||
|
"x-signature"?: string;
|
||||||
|
"x-nonce"?: string;
|
||||||
|
"x-signed-by"?: string;
|
||||||
|
authorization?: string;
|
||||||
|
};
|
||||||
|
request: {
|
||||||
|
url: string;
|
||||||
|
method: string;
|
||||||
|
body: string;
|
||||||
|
};
|
||||||
|
ip: SocketAddress | null;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const inboxQueue = new Queue<InboxJobData, Response, InboxJobType>(
|
||||||
|
"inbox",
|
||||||
|
{
|
||||||
|
connection,
|
||||||
|
},
|
||||||
|
);
|
||||||
66
classes/workers/delivery.ts
Normal file
66
classes/workers/delivery.ts
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
import { getLogger } from "@logtape/logtape";
|
||||||
|
import { User } from "@versia/kit/db";
|
||||||
|
import { Worker } from "bullmq";
|
||||||
|
import chalk from "chalk";
|
||||||
|
import { config } from "~/packages/config-manager";
|
||||||
|
import { connection } from "~/utils/redis.ts";
|
||||||
|
import {
|
||||||
|
type DeliveryJobData,
|
||||||
|
DeliveryJobType,
|
||||||
|
deliveryQueue,
|
||||||
|
} from "../queues/delivery.ts";
|
||||||
|
|
||||||
|
export const getDeliveryWorker = (): Worker<
|
||||||
|
DeliveryJobData,
|
||||||
|
void,
|
||||||
|
DeliveryJobType
|
||||||
|
> =>
|
||||||
|
new Worker<DeliveryJobData, void, DeliveryJobType>(
|
||||||
|
deliveryQueue.name,
|
||||||
|
async (job) => {
|
||||||
|
switch (job.name) {
|
||||||
|
case DeliveryJobType.FederateEntity: {
|
||||||
|
const { entity, recipientId, senderId } = job.data;
|
||||||
|
|
||||||
|
const logger = getLogger(["federation", "delivery"]);
|
||||||
|
|
||||||
|
const sender = await User.fromId(senderId);
|
||||||
|
|
||||||
|
if (!sender) {
|
||||||
|
throw new Error(
|
||||||
|
`Could not resolve sender ID ${chalk.gray(senderId)}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const recipient = await User.fromId(recipientId);
|
||||||
|
|
||||||
|
if (!recipient) {
|
||||||
|
throw new Error(
|
||||||
|
`Could not resolve recipient ID ${chalk.gray(recipientId)}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug`Federating entity ${chalk.gray(
|
||||||
|
entity.id,
|
||||||
|
)} from ${chalk.gray(`@${sender.getAcct()}`)} to ${chalk.gray(
|
||||||
|
recipient.getAcct(),
|
||||||
|
)}`;
|
||||||
|
|
||||||
|
await sender.federateToUser(entity, recipient);
|
||||||
|
|
||||||
|
logger.debug`${chalk.green(
|
||||||
|
"✔",
|
||||||
|
)} Finished federating entity ${chalk.gray(entity.id)}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
connection,
|
||||||
|
removeOnComplete: {
|
||||||
|
age: config.queues.delivery.remove_on_complete,
|
||||||
|
},
|
||||||
|
removeOnFail: {
|
||||||
|
age: config.queues.delivery.remove_on_failure,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
64
classes/workers/fetch.ts
Normal file
64
classes/workers/fetch.ts
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
import { Instance } from "@versia/kit/db";
|
||||||
|
import { Instances } from "@versia/kit/tables";
|
||||||
|
import { Worker } from "bullmq";
|
||||||
|
import chalk from "chalk";
|
||||||
|
import { eq } from "drizzle-orm";
|
||||||
|
import { config } from "~/packages/config-manager";
|
||||||
|
import { connection } from "~/utils/redis.ts";
|
||||||
|
import {
|
||||||
|
type FetchJobData,
|
||||||
|
FetchJobType,
|
||||||
|
fetchQueue,
|
||||||
|
} from "../queues/fetch.ts";
|
||||||
|
|
||||||
|
export const getFetchWorker = (): Worker<FetchJobData, void, FetchJobType> =>
|
||||||
|
new Worker<FetchJobData, void, FetchJobType>(
|
||||||
|
fetchQueue.name,
|
||||||
|
async (job) => {
|
||||||
|
switch (job.name) {
|
||||||
|
case FetchJobType.Instance: {
|
||||||
|
const { uri } = job.data;
|
||||||
|
|
||||||
|
await job.log(`Fetching instance metadata from [${uri}]`);
|
||||||
|
|
||||||
|
// Check if exists
|
||||||
|
const host = new URL(uri).host;
|
||||||
|
|
||||||
|
const existingInstance = await Instance.fromSql(
|
||||||
|
eq(Instances.baseUrl, host),
|
||||||
|
);
|
||||||
|
|
||||||
|
if (existingInstance) {
|
||||||
|
await job.log(
|
||||||
|
"Instance is known, refetching remote data.",
|
||||||
|
);
|
||||||
|
|
||||||
|
await existingInstance.updateFromRemote();
|
||||||
|
|
||||||
|
await job.log(
|
||||||
|
`Instance [${uri}] successfully refetched`,
|
||||||
|
);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
await Instance.resolve(uri);
|
||||||
|
|
||||||
|
await job.log(
|
||||||
|
`${chalk.green(
|
||||||
|
"✔",
|
||||||
|
)} Finished fetching instance metadata from [${uri}]`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
connection,
|
||||||
|
removeOnComplete: {
|
||||||
|
age: config.queues.fetch.remove_on_complete,
|
||||||
|
},
|
||||||
|
removeOnFail: {
|
||||||
|
age: config.queues.fetch.remove_on_failure,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
167
classes/workers/inbox.ts
Normal file
167
classes/workers/inbox.ts
Normal file
|
|
@ -0,0 +1,167 @@
|
||||||
|
import { getLogger } from "@logtape/logtape";
|
||||||
|
import { Instance, User } from "@versia/kit/db";
|
||||||
|
import { Worker } from "bullmq";
|
||||||
|
import chalk from "chalk";
|
||||||
|
import { config } from "~/packages/config-manager/index.ts";
|
||||||
|
import { connection } from "~/utils/redis.ts";
|
||||||
|
import { InboxProcessor } from "../inbox/processor.ts";
|
||||||
|
import {
|
||||||
|
type InboxJobData,
|
||||||
|
InboxJobType,
|
||||||
|
inboxQueue,
|
||||||
|
} from "../queues/inbox.ts";
|
||||||
|
|
||||||
|
export const getInboxWorker = (): Worker<
|
||||||
|
InboxJobData,
|
||||||
|
Response,
|
||||||
|
InboxJobType
|
||||||
|
> =>
|
||||||
|
new Worker<InboxJobData, Response, InboxJobType>(
|
||||||
|
inboxQueue.name,
|
||||||
|
async (job) => {
|
||||||
|
switch (job.name) {
|
||||||
|
case InboxJobType.ProcessEntity: {
|
||||||
|
const {
|
||||||
|
data,
|
||||||
|
headers: {
|
||||||
|
"x-signature": signature,
|
||||||
|
"x-nonce": nonce,
|
||||||
|
"x-signed-by": signedBy,
|
||||||
|
authorization,
|
||||||
|
},
|
||||||
|
request,
|
||||||
|
ip,
|
||||||
|
} = job.data;
|
||||||
|
|
||||||
|
const logger = getLogger(["federation", "inbox"]);
|
||||||
|
|
||||||
|
logger.debug`Processing entity ${chalk.gray(
|
||||||
|
data.id,
|
||||||
|
)} from ${chalk.gray(signedBy)}`;
|
||||||
|
|
||||||
|
if (authorization) {
|
||||||
|
const processor = new InboxProcessor(
|
||||||
|
request,
|
||||||
|
data,
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
signature,
|
||||||
|
nonce,
|
||||||
|
authorization,
|
||||||
|
},
|
||||||
|
logger,
|
||||||
|
ip,
|
||||||
|
);
|
||||||
|
|
||||||
|
logger.debug`Entity ${chalk.gray(
|
||||||
|
data.id,
|
||||||
|
)} is potentially from a bridge`;
|
||||||
|
|
||||||
|
return await processor.process();
|
||||||
|
}
|
||||||
|
|
||||||
|
// If not potentially from bridge, check for required headers
|
||||||
|
if (!(signature && nonce && signedBy)) {
|
||||||
|
return Response.json(
|
||||||
|
{
|
||||||
|
error: "Missing required headers: x-signature, x-nonce, or x-signed-by",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
status: 400,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const sender = await User.resolve(signedBy);
|
||||||
|
|
||||||
|
if (!(sender || signedBy.startsWith("instance "))) {
|
||||||
|
return Response.json(
|
||||||
|
{
|
||||||
|
error: `Couldn't resolve sender URI ${signedBy}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
status: 404,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sender?.isLocal()) {
|
||||||
|
return Response.json(
|
||||||
|
{
|
||||||
|
error: "Cannot process federation requests from local users",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
status: 400,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const remoteInstance = sender
|
||||||
|
? await Instance.fromUser(sender)
|
||||||
|
: await Instance.resolveFromHost(
|
||||||
|
signedBy.split(" ")[1],
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!remoteInstance) {
|
||||||
|
return Response.json(
|
||||||
|
{ error: "Could not resolve the remote instance." },
|
||||||
|
{
|
||||||
|
status: 500,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug`Entity ${chalk.gray(
|
||||||
|
data.id,
|
||||||
|
)} is from remote instance ${chalk.gray(
|
||||||
|
remoteInstance.data.baseUrl,
|
||||||
|
)}`;
|
||||||
|
|
||||||
|
if (!remoteInstance.data.publicKey?.key) {
|
||||||
|
throw new Error(
|
||||||
|
`Instance ${remoteInstance.data.baseUrl} has no public key stored in database`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const processor = new InboxProcessor(
|
||||||
|
request,
|
||||||
|
data,
|
||||||
|
{
|
||||||
|
instance: remoteInstance,
|
||||||
|
key:
|
||||||
|
sender?.data.publicKey ??
|
||||||
|
remoteInstance.data.publicKey.key,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
signature,
|
||||||
|
nonce,
|
||||||
|
authorization,
|
||||||
|
},
|
||||||
|
logger,
|
||||||
|
ip,
|
||||||
|
);
|
||||||
|
|
||||||
|
const output = await processor.process();
|
||||||
|
|
||||||
|
logger.debug`${chalk.green(
|
||||||
|
"✔",
|
||||||
|
)} Finished processing entity ${chalk.gray(data.id)}`;
|
||||||
|
|
||||||
|
return output;
|
||||||
|
}
|
||||||
|
|
||||||
|
default: {
|
||||||
|
throw new Error(`Unknown job type: ${job.name}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
connection,
|
||||||
|
removeOnComplete: {
|
||||||
|
age: config.queues.inbox.remove_on_complete,
|
||||||
|
},
|
||||||
|
removeOnFail: {
|
||||||
|
age: config.queues.inbox.remove_on_failure,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
@ -3,8 +3,8 @@ import { Instance } from "@versia/kit/db";
|
||||||
import { Instances } from "@versia/kit/tables";
|
import { Instances } from "@versia/kit/tables";
|
||||||
import { eq } from "drizzle-orm";
|
import { eq } from "drizzle-orm";
|
||||||
import ora from "ora";
|
import ora from "ora";
|
||||||
|
import { FetchJobType, fetchQueue } from "~/classes/queues/fetch";
|
||||||
import { BaseCommand } from "~/cli/base";
|
import { BaseCommand } from "~/cli/base";
|
||||||
import { FetchJobType, fetchQueue } from "~/worker";
|
|
||||||
|
|
||||||
export default class FederationInstanceRefetch extends BaseCommand<
|
export default class FederationInstanceRefetch extends BaseCommand<
|
||||||
typeof FederationInstanceRefetch
|
typeof FederationInstanceRefetch
|
||||||
|
|
|
||||||
|
|
@ -1,61 +1,75 @@
|
||||||
services:
|
services:
|
||||||
versia:
|
versia:
|
||||||
image: ghcr.io/versia-pub/server:latest
|
image: ghcr.io/versia-pub/server:latest
|
||||||
volumes:
|
volumes:
|
||||||
- ./logs:/app/dist/logs
|
- ./logs:/app/dist/logs
|
||||||
- ./config:/app/dist/config
|
- ./config:/app/dist/config
|
||||||
- ./uploads:/app/dist/uploads
|
- ./uploads:/app/dist/uploads
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
container_name: versia
|
container_name: versia
|
||||||
tty: true
|
tty: true
|
||||||
networks:
|
networks:
|
||||||
- versia-net
|
- versia-net
|
||||||
depends_on:
|
depends_on:
|
||||||
- db
|
- db
|
||||||
- redis
|
- redis
|
||||||
- sonic
|
- sonic
|
||||||
- fe
|
- fe
|
||||||
|
|
||||||
fe:
|
worker:
|
||||||
image: ghcr.io/versia-pub/frontend:main
|
image: ghcr.io/versia-pub/worker:latest
|
||||||
container_name: versia-fe
|
volumes:
|
||||||
restart: unless-stopped
|
- ./logs:/app/dist/logs
|
||||||
networks:
|
- ./config:/app/dist/config
|
||||||
- versia-net
|
restart: unless-stopped
|
||||||
environment:
|
container_name: versia-worker
|
||||||
NUXT_PUBLIC_API_HOST: https://yourserver.com
|
tty: true
|
||||||
|
networks:
|
||||||
|
- versia-net
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
- redis
|
||||||
|
|
||||||
db:
|
fe:
|
||||||
image: ghcr.io/versia-pub/postgres:main
|
image: ghcr.io/versia-pub/frontend:main
|
||||||
container_name: versia-db
|
container_name: versia-fe
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
environment:
|
networks:
|
||||||
POSTGRES_DB: versia
|
- versia-net
|
||||||
POSTGRES_USER: versia
|
environment:
|
||||||
POSTGRES_PASSWORD: _______________
|
NUXT_PUBLIC_API_HOST: https://yourserver.com
|
||||||
networks:
|
|
||||||
- versia-net
|
|
||||||
volumes:
|
|
||||||
- ./db-data:/var/lib/postgresql/data
|
|
||||||
|
|
||||||
redis:
|
db:
|
||||||
image: redis:alpine
|
image: ghcr.io/versia-pub/postgres:main
|
||||||
container_name: versia-redis
|
container_name: versia-db
|
||||||
volumes:
|
restart: unless-stopped
|
||||||
- ./redis-data:/data
|
environment:
|
||||||
restart: unless-stopped
|
POSTGRES_DB: versia
|
||||||
networks:
|
POSTGRES_USER: versia
|
||||||
- versia-net
|
POSTGRES_PASSWORD: _______________
|
||||||
|
networks:
|
||||||
|
- versia-net
|
||||||
|
volumes:
|
||||||
|
- ./db-data:/var/lib/postgresql/data
|
||||||
|
|
||||||
sonic:
|
redis:
|
||||||
volumes:
|
image: redis:alpine
|
||||||
- ./config.cfg:/etc/sonic.cfg
|
container_name: versia-redis
|
||||||
- ./store/:/var/lib/sonic/store/
|
volumes:
|
||||||
image: valeriansaliou/sonic:v1.4.9
|
- ./redis-data:/data
|
||||||
container_name: versia-sonic
|
restart: unless-stopped
|
||||||
restart: unless-stopped
|
networks:
|
||||||
networks:
|
- versia-net
|
||||||
- versia-net
|
|
||||||
|
sonic:
|
||||||
|
volumes:
|
||||||
|
- ./config.cfg:/etc/sonic.cfg
|
||||||
|
- ./store/:/var/lib/sonic/store/
|
||||||
|
image: valeriansaliou/sonic:v1.4.9
|
||||||
|
container_name: versia-sonic
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- versia-net
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
versia-net:
|
versia-net:
|
||||||
|
|
|
||||||
20
entrypoints/api/index.ts
Normal file
20
entrypoints/api/index.ts
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
import cluster from "node:cluster";
|
||||||
|
import { sentry } from "@/sentry";
|
||||||
|
import { createServer } from "@/server";
|
||||||
|
import { appFactory } from "~/app";
|
||||||
|
import { config } from "~/packages/config-manager/index.ts";
|
||||||
|
|
||||||
|
process.on("SIGINT", () => {
|
||||||
|
process.exit();
|
||||||
|
});
|
||||||
|
|
||||||
|
if (cluster.isPrimary) {
|
||||||
|
for (let i = 0; i < Number(process.env.NUM_CPUS ?? 1); i++) {
|
||||||
|
cluster.fork();
|
||||||
|
}
|
||||||
|
|
||||||
|
await import("~/entrypoints/api/setup.ts");
|
||||||
|
sentry?.captureMessage("Server started", "info");
|
||||||
|
} else {
|
||||||
|
createServer(config, await appFactory());
|
||||||
|
}
|
||||||
|
|
@ -5,7 +5,7 @@ import { Note } from "@versia/kit/db";
|
||||||
import IORedis from "ioredis";
|
import IORedis from "ioredis";
|
||||||
import { setupDatabase } from "~/drizzle/db";
|
import { setupDatabase } from "~/drizzle/db";
|
||||||
import { config } from "~/packages/config-manager/index.ts";
|
import { config } from "~/packages/config-manager/index.ts";
|
||||||
import { searchManager } from "./classes/search/search-manager.ts";
|
import { searchManager } from "../../classes/search/search-manager.ts";
|
||||||
|
|
||||||
const timeAtStart = performance.now();
|
const timeAtStart = performance.now();
|
||||||
|
|
||||||
29
entrypoints/worker/index.ts
Normal file
29
entrypoints/worker/index.ts
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
import { sentry } from "@/sentry";
|
||||||
|
import { getLogger } from "@logtape/logtape";
|
||||||
|
import chalk from "chalk";
|
||||||
|
import { getDeliveryWorker } from "~/classes/workers/delivery";
|
||||||
|
import { getFetchWorker } from "~/classes/workers/fetch";
|
||||||
|
import { getInboxWorker } from "~/classes/workers/inbox";
|
||||||
|
|
||||||
|
process.on("SIGINT", () => {
|
||||||
|
process.exit();
|
||||||
|
});
|
||||||
|
|
||||||
|
await import("~/entrypoints/worker/setup.ts");
|
||||||
|
sentry?.captureMessage("Server started", "info");
|
||||||
|
|
||||||
|
const serverLogger = getLogger("server");
|
||||||
|
|
||||||
|
serverLogger.info`Starting Fetch Worker...`;
|
||||||
|
getFetchWorker();
|
||||||
|
serverLogger.info`${chalk.green("✔")} Fetch Worker started`;
|
||||||
|
|
||||||
|
serverLogger.info`Starting Delivery Worker...`;
|
||||||
|
getDeliveryWorker();
|
||||||
|
serverLogger.info`${chalk.green("✔")} Delivery Worker started`;
|
||||||
|
|
||||||
|
serverLogger.info`Starting Inbox Worker...`;
|
||||||
|
getInboxWorker();
|
||||||
|
serverLogger.info`${chalk.green("✔")} Inbox Worker started`;
|
||||||
|
|
||||||
|
serverLogger.info`${chalk.green("✔✔✔")} All workers started`;
|
||||||
55
entrypoints/worker/setup.ts
Normal file
55
entrypoints/worker/setup.ts
Normal file
|
|
@ -0,0 +1,55 @@
|
||||||
|
import { checkConfig } from "@/init";
|
||||||
|
import { configureLoggers } from "@/loggers";
|
||||||
|
import { getLogger } from "@logtape/logtape";
|
||||||
|
import { Note } from "@versia/kit/db";
|
||||||
|
import chalk from "chalk";
|
||||||
|
import IORedis from "ioredis";
|
||||||
|
import { setupDatabase } from "~/drizzle/db";
|
||||||
|
import { config } from "~/packages/config-manager/index.ts";
|
||||||
|
import { searchManager } from "../../classes/search/search-manager.ts";
|
||||||
|
|
||||||
|
const timeAtStart = performance.now();
|
||||||
|
|
||||||
|
await configureLoggers();
|
||||||
|
|
||||||
|
const serverLogger = getLogger("server");
|
||||||
|
|
||||||
|
console.info(`
|
||||||
|
██╗ ██╗███████╗██████╗ ███████╗██╗ █████╗
|
||||||
|
██║ ██║██╔════╝██╔══██╗██╔════╝██║██╔══██╗
|
||||||
|
██║ ██║█████╗ ██████╔╝███████╗██║███████║
|
||||||
|
╚██╗ ██╔╝██╔══╝ ██╔══██╗╚════██║██║██╔══██║
|
||||||
|
╚████╔╝ ███████╗██║ ██║███████║██║██║ ██║
|
||||||
|
╚═══╝ ╚══════╝╚═╝ ╚═╝╚══════╝╚═╝╚═╝ ╚═╝
|
||||||
|
${chalk.redBright.bold("** WORKER MODE **")}
|
||||||
|
`);
|
||||||
|
|
||||||
|
serverLogger.info`Starting Versia Server Worker...`;
|
||||||
|
|
||||||
|
await setupDatabase();
|
||||||
|
|
||||||
|
if (config.sonic.enabled) {
|
||||||
|
await searchManager.connect();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if database is reachable
|
||||||
|
const postCount = await Note.getCount();
|
||||||
|
|
||||||
|
await checkConfig(config);
|
||||||
|
|
||||||
|
serverLogger.info`Versia Server Worker started at ${config.http.bind}:${config.http.bind_port} in ${(performance.now() - timeAtStart).toFixed(0)}ms`;
|
||||||
|
|
||||||
|
serverLogger.info`Database is online, containing ${postCount} posts`;
|
||||||
|
|
||||||
|
// Check if Redis is reachable
|
||||||
|
const connection = new IORedis({
|
||||||
|
host: config.redis.queue.host,
|
||||||
|
port: config.redis.queue.port,
|
||||||
|
password: config.redis.queue.password,
|
||||||
|
db: config.redis.queue.database,
|
||||||
|
maxRetriesPerRequest: null,
|
||||||
|
});
|
||||||
|
|
||||||
|
await connection.ping();
|
||||||
|
|
||||||
|
serverLogger.info`Redis is online`;
|
||||||
20
index.ts
20
index.ts
|
|
@ -1,19 +1 @@
|
||||||
import cluster from "node:cluster";
|
await import("~/entrypoints/api/index.ts");
|
||||||
import { sentry } from "@/sentry";
|
|
||||||
import { createServer } from "@/server";
|
|
||||||
import { appFactory } from "~/app";
|
|
||||||
import { config } from "~/packages/config-manager/index.ts";
|
|
||||||
|
|
||||||
process.on("SIGINT", () => {
|
|
||||||
process.exit();
|
|
||||||
});
|
|
||||||
|
|
||||||
if (cluster.isPrimary) {
|
|
||||||
for (let i = 0; i < Number(process.env.NUM_CPUS ?? 1); i++) {
|
|
||||||
cluster.fork();
|
|
||||||
}
|
|
||||||
await import("./setup.ts");
|
|
||||||
sentry?.captureMessage("Server started", "info");
|
|
||||||
} else {
|
|
||||||
createServer(config, await appFactory());
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@
|
||||||
"start": "NODE_ENV=production bun run dist/index.js --prod",
|
"start": "NODE_ENV=production bun run dist/index.js --prod",
|
||||||
"lint": "bunx @biomejs/biome check .",
|
"lint": "bunx @biomejs/biome check .",
|
||||||
"build": "bun run build.ts",
|
"build": "bun run build.ts",
|
||||||
|
"build:worker": "bun run build-worker.ts",
|
||||||
"cloc": "cloc . --exclude-dir node_modules,dist,.output,.nuxt,meta,logs --exclude-ext sql,log,pem",
|
"cloc": "cloc . --exclude-dir node_modules,dist,.output,.nuxt,meta,logs --exclude-ext sql,log,pem",
|
||||||
"wc": "find server database *.ts docs packages types utils drizzle tests -type f -print0 | wc -m --files0-from=-",
|
"wc": "find server database *.ts docs packages types utils drizzle tests -type f -print0 | wc -m --files0-from=-",
|
||||||
"cli": "bun run cli/index.ts",
|
"cli": "bun run cli/index.ts",
|
||||||
|
|
|
||||||
|
|
@ -3,9 +3,11 @@ import { BullMQAdapter } from "@bull-board/api/bullMQAdapter";
|
||||||
import { HonoAdapter } from "@bull-board/hono";
|
import { HonoAdapter } from "@bull-board/hono";
|
||||||
import { serveStatic } from "@hono/hono/bun";
|
import { serveStatic } from "@hono/hono/bun";
|
||||||
import type { OpenAPIHono } from "@hono/zod-openapi";
|
import type { OpenAPIHono } from "@hono/zod-openapi";
|
||||||
|
import { deliveryQueue } from "~/classes/queues/delivery";
|
||||||
|
import { fetchQueue } from "~/classes/queues/fetch";
|
||||||
|
import { inboxQueue } from "~/classes/queues/inbox";
|
||||||
import { config } from "~/packages/config-manager";
|
import { config } from "~/packages/config-manager";
|
||||||
import type { HonoEnv } from "~/types/api";
|
import type { HonoEnv } from "~/types/api";
|
||||||
import { deliveryQueue, fetchQueue, inboxQueue } from "~/worker";
|
|
||||||
|
|
||||||
export const applyToHono = (app: OpenAPIHono<HonoEnv>): void => {
|
export const applyToHono = (app: OpenAPIHono<HonoEnv>): void => {
|
||||||
const serverAdapter = new HonoAdapter(serveStatic);
|
const serverAdapter = new HonoAdapter(serveStatic);
|
||||||
|
|
|
||||||
10
utils/redis.ts
Normal file
10
utils/redis.ts
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
import IORedis from "ioredis";
|
||||||
|
import { config } from "~/packages/config-manager/index.ts";
|
||||||
|
|
||||||
|
export const connection = new IORedis({
|
||||||
|
host: config.redis.queue.host,
|
||||||
|
port: config.redis.queue.port,
|
||||||
|
password: config.redis.queue.password,
|
||||||
|
db: config.redis.queue.database,
|
||||||
|
maxRetriesPerRequest: null,
|
||||||
|
});
|
||||||
326
worker.ts
326
worker.ts
|
|
@ -1,326 +0,0 @@
|
||||||
import { getLogger } from "@logtape/logtape";
|
|
||||||
import type { Entity } from "@versia/federation/types";
|
|
||||||
import { Instance, User } from "@versia/kit/db";
|
|
||||||
import { Queue, Worker } from "bullmq";
|
|
||||||
import type { SocketAddress } from "bun";
|
|
||||||
import chalk from "chalk";
|
|
||||||
import { eq } from "drizzle-orm";
|
|
||||||
import IORedis from "ioredis";
|
|
||||||
import { InboxProcessor } from "./classes/inbox/processor.ts";
|
|
||||||
import { Instances } from "./drizzle/schema.ts";
|
|
||||||
import { config } from "./packages/config-manager/index.ts";
|
|
||||||
import type { KnownEntity } from "./types/api.ts";
|
|
||||||
|
|
||||||
const connection = new IORedis({
|
|
||||||
host: config.redis.queue.host,
|
|
||||||
port: config.redis.queue.port,
|
|
||||||
password: config.redis.queue.password,
|
|
||||||
db: config.redis.queue.database,
|
|
||||||
maxRetriesPerRequest: null,
|
|
||||||
});
|
|
||||||
|
|
||||||
export enum DeliveryJobType {
|
|
||||||
FederateEntity = "federateEntity",
|
|
||||||
}
|
|
||||||
|
|
||||||
export enum InboxJobType {
|
|
||||||
ProcessEntity = "processEntity",
|
|
||||||
}
|
|
||||||
|
|
||||||
export enum FetchJobType {
|
|
||||||
Instance = "instance",
|
|
||||||
User = "user",
|
|
||||||
Note = "user",
|
|
||||||
}
|
|
||||||
|
|
||||||
export type InboxJobData = {
|
|
||||||
data: Entity;
|
|
||||||
headers: {
|
|
||||||
"x-signature"?: string;
|
|
||||||
"x-nonce"?: string;
|
|
||||||
"x-signed-by"?: string;
|
|
||||||
authorization?: string;
|
|
||||||
};
|
|
||||||
request: {
|
|
||||||
url: string;
|
|
||||||
method: string;
|
|
||||||
body: string;
|
|
||||||
};
|
|
||||||
ip: SocketAddress | null;
|
|
||||||
};
|
|
||||||
|
|
||||||
export type DeliveryJobData = {
|
|
||||||
entity: KnownEntity;
|
|
||||||
recipientId: string;
|
|
||||||
senderId: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
export type FetchJobData = {
|
|
||||||
uri: string;
|
|
||||||
refetcher?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
export const deliveryQueue = new Queue<DeliveryJobData, void, DeliveryJobType>(
|
|
||||||
"delivery",
|
|
||||||
{
|
|
||||||
connection,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
export const inboxQueue = new Queue<InboxJobData, Response, InboxJobType>(
|
|
||||||
"inbox",
|
|
||||||
{
|
|
||||||
connection,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
export const fetchQueue = new Queue<FetchJobData, void, FetchJobType>("fetch", {
|
|
||||||
connection,
|
|
||||||
});
|
|
||||||
|
|
||||||
export const deliveryWorker = new Worker<
|
|
||||||
DeliveryJobData,
|
|
||||||
void,
|
|
||||||
DeliveryJobType
|
|
||||||
>(
|
|
||||||
deliveryQueue.name,
|
|
||||||
async (job) => {
|
|
||||||
switch (job.name) {
|
|
||||||
case DeliveryJobType.FederateEntity: {
|
|
||||||
const { entity, recipientId, senderId } = job.data;
|
|
||||||
|
|
||||||
const logger = getLogger(["federation", "delivery"]);
|
|
||||||
|
|
||||||
const sender = await User.fromId(senderId);
|
|
||||||
|
|
||||||
if (!sender) {
|
|
||||||
throw new Error(
|
|
||||||
`Could not resolve sender ID ${chalk.gray(senderId)}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const recipient = await User.fromId(recipientId);
|
|
||||||
|
|
||||||
if (!recipient) {
|
|
||||||
throw new Error(
|
|
||||||
`Could not resolve recipient ID ${chalk.gray(recipientId)}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug`Federating entity ${chalk.gray(
|
|
||||||
entity.id,
|
|
||||||
)} from ${chalk.gray(`@${sender.getAcct()}`)} to ${chalk.gray(
|
|
||||||
recipient.getAcct(),
|
|
||||||
)}`;
|
|
||||||
|
|
||||||
await sender.federateToUser(entity, recipient);
|
|
||||||
|
|
||||||
logger.debug`${chalk.green(
|
|
||||||
"✔",
|
|
||||||
)} Finished federating entity ${chalk.gray(entity.id)}`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
connection,
|
|
||||||
removeOnComplete: {
|
|
||||||
age: config.queues.delivery.remove_on_complete,
|
|
||||||
},
|
|
||||||
removeOnFail: {
|
|
||||||
age: config.queues.delivery.remove_on_failure,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
export const inboxWorker = new Worker<InboxJobData, Response, InboxJobType>(
|
|
||||||
inboxQueue.name,
|
|
||||||
async (job) => {
|
|
||||||
switch (job.name) {
|
|
||||||
case InboxJobType.ProcessEntity: {
|
|
||||||
const {
|
|
||||||
data,
|
|
||||||
headers: {
|
|
||||||
"x-signature": signature,
|
|
||||||
"x-nonce": nonce,
|
|
||||||
"x-signed-by": signedBy,
|
|
||||||
authorization,
|
|
||||||
},
|
|
||||||
request,
|
|
||||||
ip,
|
|
||||||
} = job.data;
|
|
||||||
|
|
||||||
const logger = getLogger(["federation", "inbox"]);
|
|
||||||
|
|
||||||
logger.debug`Processing entity ${chalk.gray(
|
|
||||||
data.id,
|
|
||||||
)} from ${chalk.gray(signedBy)}`;
|
|
||||||
|
|
||||||
if (authorization) {
|
|
||||||
const processor = new InboxProcessor(
|
|
||||||
request,
|
|
||||||
data,
|
|
||||||
null,
|
|
||||||
{
|
|
||||||
signature,
|
|
||||||
nonce,
|
|
||||||
authorization,
|
|
||||||
},
|
|
||||||
logger,
|
|
||||||
ip,
|
|
||||||
);
|
|
||||||
|
|
||||||
logger.debug`Entity ${chalk.gray(
|
|
||||||
data.id,
|
|
||||||
)} is potentially from a bridge`;
|
|
||||||
|
|
||||||
return await processor.process();
|
|
||||||
}
|
|
||||||
|
|
||||||
// If not potentially from bridge, check for required headers
|
|
||||||
if (!(signature && nonce && signedBy)) {
|
|
||||||
return Response.json(
|
|
||||||
{
|
|
||||||
error: "Missing required headers: x-signature, x-nonce, or x-signed-by",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
status: 400,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const sender = await User.resolve(signedBy);
|
|
||||||
|
|
||||||
if (!(sender || signedBy.startsWith("instance "))) {
|
|
||||||
return Response.json(
|
|
||||||
{ error: `Couldn't resolve sender URI ${signedBy}` },
|
|
||||||
{
|
|
||||||
status: 404,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sender?.isLocal()) {
|
|
||||||
return Response.json(
|
|
||||||
{
|
|
||||||
error: "Cannot process federation requests from local users",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
status: 400,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const remoteInstance = sender
|
|
||||||
? await Instance.fromUser(sender)
|
|
||||||
: await Instance.resolveFromHost(signedBy.split(" ")[1]);
|
|
||||||
|
|
||||||
if (!remoteInstance) {
|
|
||||||
return Response.json(
|
|
||||||
{ error: "Could not resolve the remote instance." },
|
|
||||||
{
|
|
||||||
status: 500,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug`Entity ${chalk.gray(
|
|
||||||
data.id,
|
|
||||||
)} is from remote instance ${chalk.gray(
|
|
||||||
remoteInstance.data.baseUrl,
|
|
||||||
)}`;
|
|
||||||
|
|
||||||
if (!remoteInstance.data.publicKey?.key) {
|
|
||||||
throw new Error(
|
|
||||||
`Instance ${remoteInstance.data.baseUrl} has no public key stored in database`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const processor = new InboxProcessor(
|
|
||||||
request,
|
|
||||||
data,
|
|
||||||
{
|
|
||||||
instance: remoteInstance,
|
|
||||||
key:
|
|
||||||
sender?.data.publicKey ??
|
|
||||||
remoteInstance.data.publicKey.key,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
signature,
|
|
||||||
nonce,
|
|
||||||
authorization,
|
|
||||||
},
|
|
||||||
logger,
|
|
||||||
ip,
|
|
||||||
);
|
|
||||||
|
|
||||||
const output = await processor.process();
|
|
||||||
|
|
||||||
logger.debug`${chalk.green(
|
|
||||||
"✔",
|
|
||||||
)} Finished processing entity ${chalk.gray(data.id)}`;
|
|
||||||
|
|
||||||
return output;
|
|
||||||
}
|
|
||||||
|
|
||||||
default: {
|
|
||||||
throw new Error(`Unknown job type: ${job.name}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
connection,
|
|
||||||
removeOnComplete: {
|
|
||||||
age: config.queues.inbox.remove_on_complete,
|
|
||||||
},
|
|
||||||
removeOnFail: {
|
|
||||||
age: config.queues.inbox.remove_on_failure,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
export const fetchWorker = new Worker<FetchJobData, void, FetchJobType>(
|
|
||||||
fetchQueue.name,
|
|
||||||
async (job) => {
|
|
||||||
switch (job.name) {
|
|
||||||
case FetchJobType.Instance: {
|
|
||||||
const { uri } = job.data;
|
|
||||||
|
|
||||||
await job.log(`Fetching instance metadata from [${uri}]`);
|
|
||||||
|
|
||||||
// Check if exists
|
|
||||||
const host = new URL(uri).host;
|
|
||||||
|
|
||||||
const existingInstance = await Instance.fromSql(
|
|
||||||
eq(Instances.baseUrl, host),
|
|
||||||
);
|
|
||||||
|
|
||||||
if (existingInstance) {
|
|
||||||
await job.log("Instance is known, refetching remote data.");
|
|
||||||
|
|
||||||
await existingInstance.updateFromRemote();
|
|
||||||
|
|
||||||
await job.log(`Instance [${uri}] successfully refetched`);
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
await Instance.resolve(uri);
|
|
||||||
|
|
||||||
await job.log(
|
|
||||||
`${chalk.green(
|
|
||||||
"✔",
|
|
||||||
)} Finished fetching instance metadata from [${uri}]`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
connection,
|
|
||||||
removeOnComplete: {
|
|
||||||
age: config.queues.fetch.remove_on_complete,
|
|
||||||
},
|
|
||||||
removeOnFail: {
|
|
||||||
age: config.queues.fetch.remove_on_failure,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
Loading…
Reference in a new issue