diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..b0008542 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,9 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true +tab_width = 4 +trim_trailing_whitespace = true \ No newline at end of file diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 4322d55a..92f7b990 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -1,31 +1,31 @@ - name: Check Types +name: Check Types - on: +on: push: - branches: ["*"] + branches: ["*"] pull_request: - # The branches below must be a subset of the branches above - branches: ["main"] + # The branches below must be a subset of the branches above + branches: ["main"] - jobs: +jobs: tests: - runs-on: ubuntu-latest - permissions: - contents: read + runs-on: ubuntu-latest + permissions: + contents: read - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: recursive + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive - - name: Setup Bun - uses: oven-sh/setup-bun@v2 + - name: Setup Bun + uses: oven-sh/setup-bun@v2 - - name: Install NPM packages - run: | - bun install + - name: Install NPM packages + run: | + bun install - - name: Run typechecks - run: | - bun run check + - name: Run typechecks + run: | + bun run check diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml deleted file mode 100644 index 0c92283d..00000000 --- a/.github/workflows/docker-publish.yml +++ /dev/null @@ -1,74 +0,0 @@ -name: Docker Build - -on: - push: - branches: [ "main" ] - # Publish semver tags as releases. - tags: [ 'v*.*.*' ] - pull_request: - branches: [ "main" ] - -env: - # Use docker.io for Docker Hub if empty - REGISTRY: ghcr.io - # github.repository as / - IMAGE_NAME: ${{ github.repository }} - - -jobs: - build: - - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - # This is used to complete the identity challenge - # with sigstore/fulcio when running outside of PRs. - id-token: write - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - submodules: recursive - - - name: Setup QEMU - uses: docker/setup-qemu-action@v3 - with: - platforms: all - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 # v3.0.0 - - - name: Log into registry ${{ env.REGISTRY }} - if: github.event_name != 'pull_request' - uses: docker/login-action@v3 # v3.0.0 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract Docker metadata - id: meta - uses: docker/metadata-action@v5 # v5.0.0 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - - - name: Get the commit hash - run: echo "GIT_COMMIT=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_ENV - - - name: Build and push Docker image - id: build-and-push - uses: docker/build-push-action@v5 # v5.0.0 - with: - context: . - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - GIT_COMMIT=${{ env.GIT_COMMIT }} - provenance: mode=max - sbom: true - platforms: linux/amd64,linux/arm64 - cache-from: type=gha - cache-to: type=gha,mode=max diff --git a/.github/workflows/docker-server.yml b/.github/workflows/docker-server.yml new file mode 100644 index 00000000..2a85ac04 --- /dev/null +++ b/.github/workflows/docker-server.yml @@ -0,0 +1,72 @@ +name: Build Server Docker Image + +on: + push: + branches: ["main"] + # Publish semver tags as releases. + tags: ["v*.*.*"] + pull_request: + branches: ["main"] + +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: ${{ github.repository }} + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + # This is used to complete the identity challenge + # with sigstore/fulcio when running outside of PRs. + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Setup QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: all + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # v3.0.0 + + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 # v3.0.0 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5 # v5.0.0 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Get the commit hash + run: echo "GIT_COMMIT=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_ENV + + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@v5 # v5.0.0 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + GIT_COMMIT=${{ env.GIT_COMMIT }} + provenance: mode=max + sbom: true + platforms: linux/amd64,linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/docker-worker.yml b/.github/workflows/docker-worker.yml new file mode 100644 index 00000000..917ebc89 --- /dev/null +++ b/.github/workflows/docker-worker.yml @@ -0,0 +1,73 @@ +name: Build Worker Docker Image + +on: + push: + branches: ["main"] + # Publish semver tags as releases. + tags: ["v*.*.*"] + pull_request: + branches: ["main"] + +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: ${{ github.repository }} + +jobs: + build: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + # This is used to complete the identity challenge + # with sigstore/fulcio when running outside of PRs. + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Setup QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: all + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 # v3.0.0 + + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 # v3.0.0 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5 # v5.0.0 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Get the commit hash + run: echo "GIT_COMMIT=$(git rev-parse --short ${{ github.sha }})" >> $GITHUB_ENV + + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@v5 # v5.0.0 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + GIT_COMMIT=${{ env.GIT_COMMIT }} + file: ./Worker.Dockerfile + provenance: mode=max + sbom: true + platforms: linux/amd64,linux/arm64 + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index e6e69952..f9f889a8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -1,56 +1,56 @@ name: Deploy Docs to GitHub Pages on: - push: - branches: [main] + push: + branches: [main] - workflow_dispatch: + workflow_dispatch: # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: - contents: read - pages: write - id-token: write + contents: read + pages: write + id-token: write # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. concurrency: - group: pages - cancel-in-progress: false + group: pages + cancel-in-progress: false jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 - - uses: oven-sh/setup-bun@v2 - - name: Setup Pages + - uses: oven-sh/setup-bun@v2 + - name: Setup Pages - uses: actions/configure-pages@v4 - - name: Install dependencies - run: bun install + uses: actions/configure-pages@v4 + - name: Install dependencies + run: bun install - - name: Build with VitePress - run: bun run docs:build + - name: Build with VitePress + run: bun run docs:build - - name: Upload artifact - uses: actions/upload-pages-artifact@v3 - with: - path: docs/.vitepress/dist + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: docs/.vitepress/dist - # Deployment job - deploy: - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - needs: build - runs-on: ubuntu-latest - name: Deploy - steps: - - name: Deploy to GitHub Pages - id: deployment - uses: actions/deploy-pages@v4 + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + needs: build + runs-on: ubuntu-latest + name: Deploy + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d3fc054c..62511425 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,31 +1,31 @@ name: Lint & Format on: - push: - branches: ["*"] - pull_request: - # The branches below must be a subset of the branches above - branches: ["main"] + push: + branches: ["*"] + pull_request: + # The branches below must be a subset of the branches above + branches: ["main"] jobs: - tests: - runs-on: ubuntu-latest - permissions: - contents: read + tests: + runs-on: ubuntu-latest + permissions: + contents: read - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: recursive + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive - - name: Setup Bun - uses: oven-sh/setup-bun@v2 + - name: Setup Bun + uses: oven-sh/setup-bun@v2 - - name: Install NPM packages - run: | - bun install + - name: Install NPM packages + run: | + bun install - - name: Run linting - run: | - bunx @biomejs/biome ci . + - name: Run linting + run: | + bunx @biomejs/biome ci . diff --git a/.github/workflows/mirror.yml b/.github/workflows/mirror.yml index 1dba9541..780b6bbe 100644 --- a/.github/workflows/mirror.yml +++ b/.github/workflows/mirror.yml @@ -2,7 +2,7 @@ name: Mirror to Codeberg on: [push] jobs: - mirror: - name: Mirror - uses: versia-pub/.github/.github/workflows/mirror.yml@main - secrets: inherit + mirror: + name: Mirror + uses: versia-pub/.github/.github/workflows/mirror.yml@main + secrets: inherit diff --git a/.github/workflows/nix-flake.yml b/.github/workflows/nix-flake.yml index 293e7a97..2726535c 100644 --- a/.github/workflows/nix-flake.yml +++ b/.github/workflows/nix-flake.yml @@ -1,25 +1,25 @@ -name: Nix Build +name: Nix Build on: - pull_request: - push: - branches: ["*"] - workflow_dispatch: + pull_request: + push: + branches: ["*"] + workflow_dispatch: jobs: - check: - runs-on: ubuntu-latest - permissions: - id-token: "write" - contents: "read" - steps: - - uses: actions/checkout@v4 - - uses: DeterminateSystems/nix-installer-action@main - with: - extra-conf: accept-flake-config = true - - uses: DeterminateSystems/magic-nix-cache-action@main - - uses: DeterminateSystems/flake-checker-action@main - - name: Build default package - run: nix build . - - name: Check flakes - run: nix flake check --allow-import-from-derivation + check: + runs-on: ubuntu-latest + permissions: + id-token: "write" + contents: "read" + steps: + - uses: actions/checkout@v4 + - uses: DeterminateSystems/nix-installer-action@main + with: + extra-conf: accept-flake-config = true + - uses: DeterminateSystems/magic-nix-cache-action@main + - uses: DeterminateSystems/flake-checker-action@main + - name: Build default package + run: nix build . + - name: Check flakes + run: nix flake check --allow-import-from-derivation diff --git a/.github/workflows/staging.yml b/.github/workflows/staging.yml index 22a9c928..0484bcab 100644 --- a/.github/workflows/staging.yml +++ b/.github/workflows/staging.yml @@ -1,50 +1,50 @@ name: Staging build bundle on: - push: - branches: ["staging"] + push: + branches: ["staging"] jobs: - tests: - runs-on: ubuntu-latest - permissions: - contents: read + tests: + runs-on: ubuntu-latest + permissions: + contents: read - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: recursive + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive - - name: Setup Bun - uses: oven-sh/setup-bun@v2 + - name: Setup Bun + uses: oven-sh/setup-bun@v2 - - name: Install NPM packages - run: | - bun install + - name: Install NPM packages + run: | + bun install - - name: Build dist - run: | - bun run build + - name: Build dist + run: | + bun run build - - name: Bundle - run: | - mkdir bundle - cp -r dist bundle/ - cp -r config bundle/ - cp -r docs bundle/ - cp -r CODE_OF_CONDUCT.md bundle/ - cp -r CONTRIBUTING.md bundle/ - cp -r README.md bundle/ - cp -r flake.nix bundle/ - cp -r shell.nix bundle/ - cp -r flake.lock bundle/ - cp -r LICENSE bundle/ - cp -r SECURITY.md bundle/ - tar cfJ archive.tar.xz bundle/ + - name: Bundle + run: | + mkdir bundle + cp -r dist bundle/ + cp -r config bundle/ + cp -r docs bundle/ + cp -r CODE_OF_CONDUCT.md bundle/ + cp -r CONTRIBUTING.md bundle/ + cp -r README.md bundle/ + cp -r flake.nix bundle/ + cp -r shell.nix bundle/ + cp -r flake.lock bundle/ + cp -r LICENSE bundle/ + cp -r SECURITY.md bundle/ + tar cfJ archive.tar.xz bundle/ - - name: Upload - uses: actions/upload-artifact@v4 - with: - name: staging-dist - path: archive.tar.xz + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: staging-dist + path: archive.tar.xz diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 8ff07472..f6104e2e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,59 +1,59 @@ name: Tests on: - push: - branches: ["*"] - pull_request: - # The branches below must be a subset of the branches above - branches: ["main"] + push: + branches: ["*"] + pull_request: + # The branches below must be a subset of the branches above + branches: ["main"] jobs: - tests: - runs-on: ubuntu-latest - services: - postgres: - image: ghcr.io/versia-pub/postgres:main - ports: - - 5432:5432 - env: - POSTGRES_DB: versia - POSTGRES_USER: versia - POSTGRES_PASSWORD: versia - volumes: - - versia-data:/var/lib/postgresql/data - options: --health-cmd pg_isready - --health-interval 10s - --health-timeout 5s - --health-retries 5 - redis: - image: redis:latest - ports: - - 6379:6379 - options: --health-cmd "redis-cli ping" - --health-interval 10s - --health-timeout 5s - --health-retries 5 - permissions: - contents: read - security-events: write - actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - submodules: recursive + tests: + runs-on: ubuntu-latest + services: + postgres: + image: ghcr.io/versia-pub/postgres:main + ports: + - 5432:5432 + env: + POSTGRES_DB: versia + POSTGRES_USER: versia + POSTGRES_PASSWORD: versia + volumes: + - versia-data:/var/lib/postgresql/data + options: --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + redis: + image: redis:latest + ports: + - 6379:6379 + options: --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + permissions: + contents: read + security-events: write + actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive - - name: Setup Bun - uses: oven-sh/setup-bun@v2 + - name: Setup Bun + uses: oven-sh/setup-bun@v2 - - name: Install NPM packages - run: | - bun install + - name: Install NPM packages + run: | + bun install - - name: Move workflow config to config folder - run: | - mv .github/config.workflow.toml config/config.toml + - name: Move workflow config to config folder + run: | + mv .github/config.workflow.toml config/config.toml - - name: Run tests - run: | - bun run test + - name: Run tests + run: | + bun run test diff --git a/Worker.Dockerfile b/Worker.Dockerfile new file mode 100644 index 00000000..2226dcb8 --- /dev/null +++ b/Worker.Dockerfile @@ -0,0 +1,53 @@ +# Node is required for building the project +FROM imbios/bun-node:1-20-alpine AS base + +RUN apk add --no-cache libstdc++ + +# Install dependencies into temp directory +# This will cache them and speed up future builds +FROM base AS install + +RUN mkdir -p /temp +COPY . /temp +WORKDIR /temp +RUN bun install --production + +FROM base AS build + +# Copy the project +RUN mkdir -p /temp +COPY . /temp +# Copy dependencies +COPY --from=install /temp/node_modules /temp/node_modules + +# Build the project +WORKDIR /temp +RUN bun run build:worker +WORKDIR /temp/dist + +# Copy production dependencies and source code into final image +FROM oven/bun:1.1.36-alpine + +# Install libstdc++ for Bun and create app directory +RUN apk add --no-cache libstdc++ && \ + mkdir -p /app + +COPY --from=build /temp/dist /app/dist +COPY entrypoint.sh /app + +LABEL org.opencontainers.image.authors="Gaspard Wierzbinski (https://cpluspatch.dev)" +LABEL org.opencontainers.image.source="https://github.com/versia-pub/server" +LABEL org.opencontainers.image.vendor="Versia Pub" +LABEL org.opencontainers.image.licenses="AGPL-3.0-or-later" +LABEL org.opencontainers.image.title="Versia Server Worker" +LABEL org.opencontainers.image.description="Versia Server Worker Docker image" + +# Set current Git commit hash as an environment variable +ARG GIT_COMMIT +ENV GIT_COMMIT=$GIT_COMMIT + +# CD to app +WORKDIR /app/dist +ENV NODE_ENV=production +# Run migrations and start the server +CMD [ "bun", "run", "index.js" ] diff --git a/api/inbox/index.ts b/api/inbox/index.ts index 954c1b35..7abbd16a 100644 --- a/api/inbox/index.ts +++ b/api/inbox/index.ts @@ -2,7 +2,7 @@ import { apiRoute, applyConfig } from "@/api"; import { createRoute } from "@hono/zod-openapi"; import type { Entity } from "@versia/federation/types"; import { z } from "zod"; -import { InboxJobType, inboxQueue } from "~/worker"; +import { InboxJobType, inboxQueue } from "~/classes/queues/inbox"; export const meta = applyConfig({ auth: { diff --git a/api/users/:uuid/inbox/index.ts b/api/users/:uuid/inbox/index.ts index 4d9eeeb8..0a591130 100644 --- a/api/users/:uuid/inbox/index.ts +++ b/api/users/:uuid/inbox/index.ts @@ -2,8 +2,8 @@ import { apiRoute, applyConfig } from "@/api"; import { createRoute } from "@hono/zod-openapi"; import type { Entity } from "@versia/federation/types"; import { z } from "zod"; +import { InboxJobType, inboxQueue } from "~/classes/queues/inbox"; import { ErrorSchema } from "~/types/api"; -import { InboxJobType, inboxQueue } from "~/worker"; export const meta = applyConfig({ auth: { diff --git a/build-worker.ts b/build-worker.ts new file mode 100644 index 00000000..d0600559 --- /dev/null +++ b/build-worker.ts @@ -0,0 +1,26 @@ +import { $ } from "bun"; +import ora from "ora"; + +const buildSpinner = ora("Building").start(); + +await $`rm -rf dist && mkdir dist`; + +await Bun.build({ + entrypoints: ["entrypoints/worker/index.ts"], + outdir: "dist", + target: "bun", + splitting: true, + minify: false, +}).then((output) => { + if (!output.success) { + console.error(output.logs); + throw new Error("Build failed"); + } +}); + +buildSpinner.text = "Transforming"; + +// Copy Drizzle migrations to dist +await $`cp -r drizzle dist/drizzle`; + +buildSpinner.stop(); diff --git a/classes/database/note.ts b/classes/database/note.ts index fdc835a8..c9e14145 100644 --- a/classes/database/note.ts +++ b/classes/database/note.ts @@ -41,7 +41,7 @@ import { parseTextMentions, } from "~/classes/functions/status"; import { config } from "~/packages/config-manager"; -import { DeliveryJobType, deliveryQueue } from "~/worker.ts"; +import { DeliveryJobType, deliveryQueue } from "../queues/delivery.ts"; import { Application } from "./application.ts"; import { Attachment } from "./attachment.ts"; import { BaseInterface } from "./base.ts"; diff --git a/classes/database/user.ts b/classes/database/user.ts index abd5c5b9..b6239605 100644 --- a/classes/database/user.ts +++ b/classes/database/user.ts @@ -53,7 +53,7 @@ import { findManyUsers } from "~/classes/functions/user"; import { searchManager } from "~/classes/search/search-manager"; import { type Config, config } from "~/packages/config-manager"; import type { KnownEntity } from "~/types/api.ts"; -import { DeliveryJobType, deliveryQueue } from "~/worker.ts"; +import { DeliveryJobType, deliveryQueue } from "../queues/delivery.ts"; import { BaseInterface } from "./base.ts"; import { Emoji } from "./emoji.ts"; import { Instance } from "./instance.ts"; diff --git a/classes/queues/delivery.ts b/classes/queues/delivery.ts new file mode 100644 index 00000000..85aebafc --- /dev/null +++ b/classes/queues/delivery.ts @@ -0,0 +1,20 @@ +import { Queue } from "bullmq"; +import type { KnownEntity } from "~/types/api"; +import { connection } from "~/utils/redis.ts"; + +export enum DeliveryJobType { + FederateEntity = "federateEntity", +} + +export type DeliveryJobData = { + entity: KnownEntity; + recipientId: string; + senderId: string; +}; + +export const deliveryQueue = new Queue( + "delivery", + { + connection, + }, +); diff --git a/classes/queues/fetch.ts b/classes/queues/fetch.ts new file mode 100644 index 00000000..41e98ab3 --- /dev/null +++ b/classes/queues/fetch.ts @@ -0,0 +1,17 @@ +import { Queue } from "bullmq"; +import { connection } from "~/utils/redis.ts"; + +export enum FetchJobType { + Instance = "instance", + User = "user", + Note = "user", +} + +export type FetchJobData = { + uri: string; + refetcher?: string; +}; + +export const fetchQueue = new Queue("fetch", { + connection, +}); diff --git a/classes/queues/inbox.ts b/classes/queues/inbox.ts new file mode 100644 index 00000000..b6010f76 --- /dev/null +++ b/classes/queues/inbox.ts @@ -0,0 +1,31 @@ +import type { Entity } from "@versia/federation/types"; +import { Queue } from "bullmq"; +import type { SocketAddress } from "bun"; +import { connection } from "~/utils/redis.ts"; + +export enum InboxJobType { + ProcessEntity = "processEntity", +} + +export type InboxJobData = { + data: Entity; + headers: { + "x-signature"?: string; + "x-nonce"?: string; + "x-signed-by"?: string; + authorization?: string; + }; + request: { + url: string; + method: string; + body: string; + }; + ip: SocketAddress | null; +}; + +export const inboxQueue = new Queue( + "inbox", + { + connection, + }, +); diff --git a/classes/workers/delivery.ts b/classes/workers/delivery.ts new file mode 100644 index 00000000..3fcf453b --- /dev/null +++ b/classes/workers/delivery.ts @@ -0,0 +1,66 @@ +import { getLogger } from "@logtape/logtape"; +import { User } from "@versia/kit/db"; +import { Worker } from "bullmq"; +import chalk from "chalk"; +import { config } from "~/packages/config-manager"; +import { connection } from "~/utils/redis.ts"; +import { + type DeliveryJobData, + DeliveryJobType, + deliveryQueue, +} from "../queues/delivery.ts"; + +export const getDeliveryWorker = (): Worker< + DeliveryJobData, + void, + DeliveryJobType +> => + new Worker( + deliveryQueue.name, + async (job) => { + switch (job.name) { + case DeliveryJobType.FederateEntity: { + const { entity, recipientId, senderId } = job.data; + + const logger = getLogger(["federation", "delivery"]); + + const sender = await User.fromId(senderId); + + if (!sender) { + throw new Error( + `Could not resolve sender ID ${chalk.gray(senderId)}`, + ); + } + + const recipient = await User.fromId(recipientId); + + if (!recipient) { + throw new Error( + `Could not resolve recipient ID ${chalk.gray(recipientId)}`, + ); + } + + logger.debug`Federating entity ${chalk.gray( + entity.id, + )} from ${chalk.gray(`@${sender.getAcct()}`)} to ${chalk.gray( + recipient.getAcct(), + )}`; + + await sender.federateToUser(entity, recipient); + + logger.debug`${chalk.green( + "✔", + )} Finished federating entity ${chalk.gray(entity.id)}`; + } + } + }, + { + connection, + removeOnComplete: { + age: config.queues.delivery.remove_on_complete, + }, + removeOnFail: { + age: config.queues.delivery.remove_on_failure, + }, + }, + ); diff --git a/classes/workers/fetch.ts b/classes/workers/fetch.ts new file mode 100644 index 00000000..81dc0f9e --- /dev/null +++ b/classes/workers/fetch.ts @@ -0,0 +1,64 @@ +import { Instance } from "@versia/kit/db"; +import { Instances } from "@versia/kit/tables"; +import { Worker } from "bullmq"; +import chalk from "chalk"; +import { eq } from "drizzle-orm"; +import { config } from "~/packages/config-manager"; +import { connection } from "~/utils/redis.ts"; +import { + type FetchJobData, + FetchJobType, + fetchQueue, +} from "../queues/fetch.ts"; + +export const getFetchWorker = (): Worker => + new Worker( + fetchQueue.name, + async (job) => { + switch (job.name) { + case FetchJobType.Instance: { + const { uri } = job.data; + + await job.log(`Fetching instance metadata from [${uri}]`); + + // Check if exists + const host = new URL(uri).host; + + const existingInstance = await Instance.fromSql( + eq(Instances.baseUrl, host), + ); + + if (existingInstance) { + await job.log( + "Instance is known, refetching remote data.", + ); + + await existingInstance.updateFromRemote(); + + await job.log( + `Instance [${uri}] successfully refetched`, + ); + + return; + } + + await Instance.resolve(uri); + + await job.log( + `${chalk.green( + "✔", + )} Finished fetching instance metadata from [${uri}]`, + ); + } + } + }, + { + connection, + removeOnComplete: { + age: config.queues.fetch.remove_on_complete, + }, + removeOnFail: { + age: config.queues.fetch.remove_on_failure, + }, + }, + ); diff --git a/classes/workers/inbox.ts b/classes/workers/inbox.ts new file mode 100644 index 00000000..3ce3af56 --- /dev/null +++ b/classes/workers/inbox.ts @@ -0,0 +1,167 @@ +import { getLogger } from "@logtape/logtape"; +import { Instance, User } from "@versia/kit/db"; +import { Worker } from "bullmq"; +import chalk from "chalk"; +import { config } from "~/packages/config-manager/index.ts"; +import { connection } from "~/utils/redis.ts"; +import { InboxProcessor } from "../inbox/processor.ts"; +import { + type InboxJobData, + InboxJobType, + inboxQueue, +} from "../queues/inbox.ts"; + +export const getInboxWorker = (): Worker< + InboxJobData, + Response, + InboxJobType +> => + new Worker( + inboxQueue.name, + async (job) => { + switch (job.name) { + case InboxJobType.ProcessEntity: { + const { + data, + headers: { + "x-signature": signature, + "x-nonce": nonce, + "x-signed-by": signedBy, + authorization, + }, + request, + ip, + } = job.data; + + const logger = getLogger(["federation", "inbox"]); + + logger.debug`Processing entity ${chalk.gray( + data.id, + )} from ${chalk.gray(signedBy)}`; + + if (authorization) { + const processor = new InboxProcessor( + request, + data, + null, + { + signature, + nonce, + authorization, + }, + logger, + ip, + ); + + logger.debug`Entity ${chalk.gray( + data.id, + )} is potentially from a bridge`; + + return await processor.process(); + } + + // If not potentially from bridge, check for required headers + if (!(signature && nonce && signedBy)) { + return Response.json( + { + error: "Missing required headers: x-signature, x-nonce, or x-signed-by", + }, + { + status: 400, + }, + ); + } + + const sender = await User.resolve(signedBy); + + if (!(sender || signedBy.startsWith("instance "))) { + return Response.json( + { + error: `Couldn't resolve sender URI ${signedBy}`, + }, + { + status: 404, + }, + ); + } + + if (sender?.isLocal()) { + return Response.json( + { + error: "Cannot process federation requests from local users", + }, + { + status: 400, + }, + ); + } + + const remoteInstance = sender + ? await Instance.fromUser(sender) + : await Instance.resolveFromHost( + signedBy.split(" ")[1], + ); + + if (!remoteInstance) { + return Response.json( + { error: "Could not resolve the remote instance." }, + { + status: 500, + }, + ); + } + + logger.debug`Entity ${chalk.gray( + data.id, + )} is from remote instance ${chalk.gray( + remoteInstance.data.baseUrl, + )}`; + + if (!remoteInstance.data.publicKey?.key) { + throw new Error( + `Instance ${remoteInstance.data.baseUrl} has no public key stored in database`, + ); + } + + const processor = new InboxProcessor( + request, + data, + { + instance: remoteInstance, + key: + sender?.data.publicKey ?? + remoteInstance.data.publicKey.key, + }, + { + signature, + nonce, + authorization, + }, + logger, + ip, + ); + + const output = await processor.process(); + + logger.debug`${chalk.green( + "✔", + )} Finished processing entity ${chalk.gray(data.id)}`; + + return output; + } + + default: { + throw new Error(`Unknown job type: ${job.name}`); + } + } + }, + { + connection, + removeOnComplete: { + age: config.queues.inbox.remove_on_complete, + }, + removeOnFail: { + age: config.queues.inbox.remove_on_failure, + }, + }, + ); diff --git a/cli/commands/federation/instance/refetch.ts b/cli/commands/federation/instance/refetch.ts index 22dd8b9d..21e31e98 100644 --- a/cli/commands/federation/instance/refetch.ts +++ b/cli/commands/federation/instance/refetch.ts @@ -3,8 +3,8 @@ import { Instance } from "@versia/kit/db"; import { Instances } from "@versia/kit/tables"; import { eq } from "drizzle-orm"; import ora from "ora"; +import { FetchJobType, fetchQueue } from "~/classes/queues/fetch"; import { BaseCommand } from "~/cli/base"; -import { FetchJobType, fetchQueue } from "~/worker"; export default class FederationInstanceRefetch extends BaseCommand< typeof FederationInstanceRefetch diff --git a/docker-compose.yml b/docker-compose.yml index 2c2303de..184d7a4b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,61 +1,75 @@ services: - versia: - image: ghcr.io/versia-pub/server:latest - volumes: - - ./logs:/app/dist/logs - - ./config:/app/dist/config - - ./uploads:/app/dist/uploads - restart: unless-stopped - container_name: versia - tty: true - networks: - - versia-net - depends_on: - - db - - redis - - sonic - - fe + versia: + image: ghcr.io/versia-pub/server:latest + volumes: + - ./logs:/app/dist/logs + - ./config:/app/dist/config + - ./uploads:/app/dist/uploads + restart: unless-stopped + container_name: versia + tty: true + networks: + - versia-net + depends_on: + - db + - redis + - sonic + - fe - fe: - image: ghcr.io/versia-pub/frontend:main - container_name: versia-fe - restart: unless-stopped - networks: - - versia-net - environment: - NUXT_PUBLIC_API_HOST: https://yourserver.com + worker: + image: ghcr.io/versia-pub/worker:latest + volumes: + - ./logs:/app/dist/logs + - ./config:/app/dist/config + restart: unless-stopped + container_name: versia-worker + tty: true + networks: + - versia-net + depends_on: + - db + - redis - db: - image: ghcr.io/versia-pub/postgres:main - container_name: versia-db - restart: unless-stopped - environment: - POSTGRES_DB: versia - POSTGRES_USER: versia - POSTGRES_PASSWORD: _______________ - networks: - - versia-net - volumes: - - ./db-data:/var/lib/postgresql/data + fe: + image: ghcr.io/versia-pub/frontend:main + container_name: versia-fe + restart: unless-stopped + networks: + - versia-net + environment: + NUXT_PUBLIC_API_HOST: https://yourserver.com - redis: - image: redis:alpine - container_name: versia-redis - volumes: - - ./redis-data:/data - restart: unless-stopped - networks: - - versia-net + db: + image: ghcr.io/versia-pub/postgres:main + container_name: versia-db + restart: unless-stopped + environment: + POSTGRES_DB: versia + POSTGRES_USER: versia + POSTGRES_PASSWORD: _______________ + networks: + - versia-net + volumes: + - ./db-data:/var/lib/postgresql/data - sonic: - volumes: - - ./config.cfg:/etc/sonic.cfg - - ./store/:/var/lib/sonic/store/ - image: valeriansaliou/sonic:v1.4.9 - container_name: versia-sonic - restart: unless-stopped - networks: - - versia-net + redis: + image: redis:alpine + container_name: versia-redis + volumes: + - ./redis-data:/data + restart: unless-stopped + networks: + - versia-net + + sonic: + volumes: + - ./config.cfg:/etc/sonic.cfg + - ./store/:/var/lib/sonic/store/ + image: valeriansaliou/sonic:v1.4.9 + container_name: versia-sonic + restart: unless-stopped + networks: + - versia-net networks: - versia-net: + versia-net: diff --git a/entrypoints/api/index.ts b/entrypoints/api/index.ts new file mode 100644 index 00000000..2020642e --- /dev/null +++ b/entrypoints/api/index.ts @@ -0,0 +1,20 @@ +import cluster from "node:cluster"; +import { sentry } from "@/sentry"; +import { createServer } from "@/server"; +import { appFactory } from "~/app"; +import { config } from "~/packages/config-manager/index.ts"; + +process.on("SIGINT", () => { + process.exit(); +}); + +if (cluster.isPrimary) { + for (let i = 0; i < Number(process.env.NUM_CPUS ?? 1); i++) { + cluster.fork(); + } + + await import("~/entrypoints/api/setup.ts"); + sentry?.captureMessage("Server started", "info"); +} else { + createServer(config, await appFactory()); +} diff --git a/setup.ts b/entrypoints/api/setup.ts similarity index 97% rename from setup.ts rename to entrypoints/api/setup.ts index 5dab0b0f..7e584849 100644 --- a/setup.ts +++ b/entrypoints/api/setup.ts @@ -5,7 +5,7 @@ import { Note } from "@versia/kit/db"; import IORedis from "ioredis"; import { setupDatabase } from "~/drizzle/db"; import { config } from "~/packages/config-manager/index.ts"; -import { searchManager } from "./classes/search/search-manager.ts"; +import { searchManager } from "../../classes/search/search-manager.ts"; const timeAtStart = performance.now(); diff --git a/entrypoints/worker/index.ts b/entrypoints/worker/index.ts new file mode 100644 index 00000000..64b5b15a --- /dev/null +++ b/entrypoints/worker/index.ts @@ -0,0 +1,29 @@ +import { sentry } from "@/sentry"; +import { getLogger } from "@logtape/logtape"; +import chalk from "chalk"; +import { getDeliveryWorker } from "~/classes/workers/delivery"; +import { getFetchWorker } from "~/classes/workers/fetch"; +import { getInboxWorker } from "~/classes/workers/inbox"; + +process.on("SIGINT", () => { + process.exit(); +}); + +await import("~/entrypoints/worker/setup.ts"); +sentry?.captureMessage("Server started", "info"); + +const serverLogger = getLogger("server"); + +serverLogger.info`Starting Fetch Worker...`; +getFetchWorker(); +serverLogger.info`${chalk.green("✔")} Fetch Worker started`; + +serverLogger.info`Starting Delivery Worker...`; +getDeliveryWorker(); +serverLogger.info`${chalk.green("✔")} Delivery Worker started`; + +serverLogger.info`Starting Inbox Worker...`; +getInboxWorker(); +serverLogger.info`${chalk.green("✔")} Inbox Worker started`; + +serverLogger.info`${chalk.green("✔✔✔")} All workers started`; diff --git a/entrypoints/worker/setup.ts b/entrypoints/worker/setup.ts new file mode 100644 index 00000000..0e4f1e18 --- /dev/null +++ b/entrypoints/worker/setup.ts @@ -0,0 +1,55 @@ +import { checkConfig } from "@/init"; +import { configureLoggers } from "@/loggers"; +import { getLogger } from "@logtape/logtape"; +import { Note } from "@versia/kit/db"; +import chalk from "chalk"; +import IORedis from "ioredis"; +import { setupDatabase } from "~/drizzle/db"; +import { config } from "~/packages/config-manager/index.ts"; +import { searchManager } from "../../classes/search/search-manager.ts"; + +const timeAtStart = performance.now(); + +await configureLoggers(); + +const serverLogger = getLogger("server"); + +console.info(` +██╗ ██╗███████╗██████╗ ███████╗██╗ █████╗ +██║ ██║██╔════╝██╔══██╗██╔════╝██║██╔══██╗ +██║ ██║█████╗ ██████╔╝███████╗██║███████║ +╚██╗ ██╔╝██╔══╝ ██╔══██╗╚════██║██║██╔══██║ + ╚████╔╝ ███████╗██║ ██║███████║██║██║ ██║ + ╚═══╝ ╚══════╝╚═╝ ╚═╝╚══════╝╚═╝╚═╝ ╚═╝ + ${chalk.redBright.bold("** WORKER MODE **")} +`); + +serverLogger.info`Starting Versia Server Worker...`; + +await setupDatabase(); + +if (config.sonic.enabled) { + await searchManager.connect(); +} + +// Check if database is reachable +const postCount = await Note.getCount(); + +await checkConfig(config); + +serverLogger.info`Versia Server Worker started at ${config.http.bind}:${config.http.bind_port} in ${(performance.now() - timeAtStart).toFixed(0)}ms`; + +serverLogger.info`Database is online, containing ${postCount} posts`; + +// Check if Redis is reachable +const connection = new IORedis({ + host: config.redis.queue.host, + port: config.redis.queue.port, + password: config.redis.queue.password, + db: config.redis.queue.database, + maxRetriesPerRequest: null, +}); + +await connection.ping(); + +serverLogger.info`Redis is online`; diff --git a/index.ts b/index.ts index c5f41fd9..6ba16223 100644 --- a/index.ts +++ b/index.ts @@ -1,19 +1 @@ -import cluster from "node:cluster"; -import { sentry } from "@/sentry"; -import { createServer } from "@/server"; -import { appFactory } from "~/app"; -import { config } from "~/packages/config-manager/index.ts"; - -process.on("SIGINT", () => { - process.exit(); -}); - -if (cluster.isPrimary) { - for (let i = 0; i < Number(process.env.NUM_CPUS ?? 1); i++) { - cluster.fork(); - } - await import("./setup.ts"); - sentry?.captureMessage("Server started", "info"); -} else { - createServer(config, await appFactory()); -} +await import("~/entrypoints/api/index.ts"); diff --git a/package.json b/package.json index 2456f93b..3fab0c7e 100644 --- a/package.json +++ b/package.json @@ -33,6 +33,7 @@ "start": "NODE_ENV=production bun run dist/index.js --prod", "lint": "bunx @biomejs/biome check .", "build": "bun run build.ts", + "build:worker": "bun run build-worker.ts", "cloc": "cloc . --exclude-dir node_modules,dist,.output,.nuxt,meta,logs --exclude-ext sql,log,pem", "wc": "find server database *.ts docs packages types utils drizzle tests -type f -print0 | wc -m --files0-from=-", "cli": "bun run cli/index.ts", diff --git a/utils/bull-board.ts b/utils/bull-board.ts index 628ff638..83f6ab46 100644 --- a/utils/bull-board.ts +++ b/utils/bull-board.ts @@ -3,9 +3,11 @@ import { BullMQAdapter } from "@bull-board/api/bullMQAdapter"; import { HonoAdapter } from "@bull-board/hono"; import { serveStatic } from "@hono/hono/bun"; import type { OpenAPIHono } from "@hono/zod-openapi"; +import { deliveryQueue } from "~/classes/queues/delivery"; +import { fetchQueue } from "~/classes/queues/fetch"; +import { inboxQueue } from "~/classes/queues/inbox"; import { config } from "~/packages/config-manager"; import type { HonoEnv } from "~/types/api"; -import { deliveryQueue, fetchQueue, inboxQueue } from "~/worker"; export const applyToHono = (app: OpenAPIHono): void => { const serverAdapter = new HonoAdapter(serveStatic); diff --git a/utils/redis.ts b/utils/redis.ts new file mode 100644 index 00000000..066b2510 --- /dev/null +++ b/utils/redis.ts @@ -0,0 +1,10 @@ +import IORedis from "ioredis"; +import { config } from "~/packages/config-manager/index.ts"; + +export const connection = new IORedis({ + host: config.redis.queue.host, + port: config.redis.queue.port, + password: config.redis.queue.password, + db: config.redis.queue.database, + maxRetriesPerRequest: null, +}); diff --git a/worker.ts b/worker.ts deleted file mode 100644 index a48e850d..00000000 --- a/worker.ts +++ /dev/null @@ -1,326 +0,0 @@ -import { getLogger } from "@logtape/logtape"; -import type { Entity } from "@versia/federation/types"; -import { Instance, User } from "@versia/kit/db"; -import { Queue, Worker } from "bullmq"; -import type { SocketAddress } from "bun"; -import chalk from "chalk"; -import { eq } from "drizzle-orm"; -import IORedis from "ioredis"; -import { InboxProcessor } from "./classes/inbox/processor.ts"; -import { Instances } from "./drizzle/schema.ts"; -import { config } from "./packages/config-manager/index.ts"; -import type { KnownEntity } from "./types/api.ts"; - -const connection = new IORedis({ - host: config.redis.queue.host, - port: config.redis.queue.port, - password: config.redis.queue.password, - db: config.redis.queue.database, - maxRetriesPerRequest: null, -}); - -export enum DeliveryJobType { - FederateEntity = "federateEntity", -} - -export enum InboxJobType { - ProcessEntity = "processEntity", -} - -export enum FetchJobType { - Instance = "instance", - User = "user", - Note = "user", -} - -export type InboxJobData = { - data: Entity; - headers: { - "x-signature"?: string; - "x-nonce"?: string; - "x-signed-by"?: string; - authorization?: string; - }; - request: { - url: string; - method: string; - body: string; - }; - ip: SocketAddress | null; -}; - -export type DeliveryJobData = { - entity: KnownEntity; - recipientId: string; - senderId: string; -}; - -export type FetchJobData = { - uri: string; - refetcher?: string; -}; - -export const deliveryQueue = new Queue( - "delivery", - { - connection, - }, -); - -export const inboxQueue = new Queue( - "inbox", - { - connection, - }, -); - -export const fetchQueue = new Queue("fetch", { - connection, -}); - -export const deliveryWorker = new Worker< - DeliveryJobData, - void, - DeliveryJobType ->( - deliveryQueue.name, - async (job) => { - switch (job.name) { - case DeliveryJobType.FederateEntity: { - const { entity, recipientId, senderId } = job.data; - - const logger = getLogger(["federation", "delivery"]); - - const sender = await User.fromId(senderId); - - if (!sender) { - throw new Error( - `Could not resolve sender ID ${chalk.gray(senderId)}`, - ); - } - - const recipient = await User.fromId(recipientId); - - if (!recipient) { - throw new Error( - `Could not resolve recipient ID ${chalk.gray(recipientId)}`, - ); - } - - logger.debug`Federating entity ${chalk.gray( - entity.id, - )} from ${chalk.gray(`@${sender.getAcct()}`)} to ${chalk.gray( - recipient.getAcct(), - )}`; - - await sender.federateToUser(entity, recipient); - - logger.debug`${chalk.green( - "✔", - )} Finished federating entity ${chalk.gray(entity.id)}`; - } - } - }, - { - connection, - removeOnComplete: { - age: config.queues.delivery.remove_on_complete, - }, - removeOnFail: { - age: config.queues.delivery.remove_on_failure, - }, - }, -); - -export const inboxWorker = new Worker( - inboxQueue.name, - async (job) => { - switch (job.name) { - case InboxJobType.ProcessEntity: { - const { - data, - headers: { - "x-signature": signature, - "x-nonce": nonce, - "x-signed-by": signedBy, - authorization, - }, - request, - ip, - } = job.data; - - const logger = getLogger(["federation", "inbox"]); - - logger.debug`Processing entity ${chalk.gray( - data.id, - )} from ${chalk.gray(signedBy)}`; - - if (authorization) { - const processor = new InboxProcessor( - request, - data, - null, - { - signature, - nonce, - authorization, - }, - logger, - ip, - ); - - logger.debug`Entity ${chalk.gray( - data.id, - )} is potentially from a bridge`; - - return await processor.process(); - } - - // If not potentially from bridge, check for required headers - if (!(signature && nonce && signedBy)) { - return Response.json( - { - error: "Missing required headers: x-signature, x-nonce, or x-signed-by", - }, - { - status: 400, - }, - ); - } - - const sender = await User.resolve(signedBy); - - if (!(sender || signedBy.startsWith("instance "))) { - return Response.json( - { error: `Couldn't resolve sender URI ${signedBy}` }, - { - status: 404, - }, - ); - } - - if (sender?.isLocal()) { - return Response.json( - { - error: "Cannot process federation requests from local users", - }, - { - status: 400, - }, - ); - } - - const remoteInstance = sender - ? await Instance.fromUser(sender) - : await Instance.resolveFromHost(signedBy.split(" ")[1]); - - if (!remoteInstance) { - return Response.json( - { error: "Could not resolve the remote instance." }, - { - status: 500, - }, - ); - } - - logger.debug`Entity ${chalk.gray( - data.id, - )} is from remote instance ${chalk.gray( - remoteInstance.data.baseUrl, - )}`; - - if (!remoteInstance.data.publicKey?.key) { - throw new Error( - `Instance ${remoteInstance.data.baseUrl} has no public key stored in database`, - ); - } - - const processor = new InboxProcessor( - request, - data, - { - instance: remoteInstance, - key: - sender?.data.publicKey ?? - remoteInstance.data.publicKey.key, - }, - { - signature, - nonce, - authorization, - }, - logger, - ip, - ); - - const output = await processor.process(); - - logger.debug`${chalk.green( - "✔", - )} Finished processing entity ${chalk.gray(data.id)}`; - - return output; - } - - default: { - throw new Error(`Unknown job type: ${job.name}`); - } - } - }, - { - connection, - removeOnComplete: { - age: config.queues.inbox.remove_on_complete, - }, - removeOnFail: { - age: config.queues.inbox.remove_on_failure, - }, - }, -); - -export const fetchWorker = new Worker( - fetchQueue.name, - async (job) => { - switch (job.name) { - case FetchJobType.Instance: { - const { uri } = job.data; - - await job.log(`Fetching instance metadata from [${uri}]`); - - // Check if exists - const host = new URL(uri).host; - - const existingInstance = await Instance.fromSql( - eq(Instances.baseUrl, host), - ); - - if (existingInstance) { - await job.log("Instance is known, refetching remote data."); - - await existingInstance.updateFromRemote(); - - await job.log(`Instance [${uri}] successfully refetched`); - - return; - } - - await Instance.resolve(uri); - - await job.log( - `${chalk.green( - "✔", - )} Finished fetching instance metadata from [${uri}]`, - ); - } - } - }, - { - connection, - removeOnComplete: { - age: config.queues.fetch.remove_on_complete, - }, - removeOnFail: { - age: config.queues.fetch.remove_on_failure, - }, - }, -);